diff --git a/.github/dependabot.yml b/.github/dependabot.yml index cd885540d..27b41fa8b 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -9,3 +9,7 @@ updates: directory: "/" # Location of package manifests schedule: interval: "weekly" + groups: + all-dependencies: + patterns: + - "*" diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 8e3029333..8ba9f2ca4 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -15,7 +15,7 @@ on: push: branches: [ "dev", "master" ] pull_request: - branches: [ "dev", "master" ] + branches: [ "master" ] schedule: - cron: '22 22 * * 4' diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index b389ebff2..fa1d690c8 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -7,22 +7,11 @@ on: - master jobs: - test: - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ ubuntu-latest , macos-latest, windows-latest ] - go-version: [ '1.21', '1.22' ] + analyze: + runs-on: ubuntu-latest steps: - - name: Configure Windows - if: matrix.os == 'windows-latest' - run: git config --global core.autocrlf false # fixes go lint fmt error - name: Checkout uses: actions/checkout@v4 - - name: Setup Go - uses: actions/setup-go@v5 - with: - go-version: ${{ matrix.go-version }} - name: Lint uses: golangci/golangci-lint-action@v3 with: @@ -36,6 +25,23 @@ jobs: autopilot bus bus/client worker worker/client + test: + needs: analyze + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ ubuntu-latest , macos-latest, windows-latest ] + go-version: [ '1.21', '1.22' ] + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: ${{ matrix.go-version }} + - name: Configure Windows + if: matrix.os == 'windows-latest' + run: git config --global core.autocrlf false # fixes go lint fmt error - name: Configure MySQL if: matrix.os == 'ubuntu-latest' uses: mirromutth/mysql-action@v1.1 diff --git a/README.md b/README.md index b95dab759..dc51edf86 100644 --- a/README.md +++ b/README.md @@ -1,27 +1,257 @@ # [![Sia](https://sia.tech/assets/banners/sia-banner-expanded-renterd.png)](http://sia.tech) [![GoDoc](https://godoc.org/go.sia.tech/renterd?status.svg)](https://godoc.org/go.sia.tech/renterd) -# Renterd: The Next-Gen Sia Renter - -## Overview `renterd` is an advanced Sia renter engineered by the Sia Foundation. Designed to cater to both casual users seeking straightforward data storage and developers requiring a robust API for building apps on Sia. -`renterd` is now in beta, achieving feature parity with siad. It introduces an -enhanced web UI and the autopilot functionality. While it mirrors most siad -capabilities, renterd does not support backwards compatibility with siad +## Overview + +`renterd` is the successor to `siad`, offering feature parity while extending +its capabilities with new features like an enhanced web UI and an autopilot. +That said, `renterd` does not support backwards compatibility with siad metadata. Consequently, files uploaded via siad cannot currently be migrated to -renterd. Our immediate focus is on refining renterd to enhance its stability, +renterd. Our immediate focus is on refining `renterd` to enhance its stability, scalability, and performance, ensuring it serves as a robust foundation for new -Sia applications. +Sia applications. Useful links: + +- [API documentation](https://api.sia.tech/renterd) +- [Project Roadmap](https://github.com/orgs/SiaFoundation/projects/5) +- [Setup Guide](https://docs.sia.tech/renting/setting-up-renterd) + +## Database + +`renterd` requires a database to store its operational data. We support both +SQLite and MySQL, with SQLite set as the default due to its ease of setup. +SQLite is ideal for testing and development purposes, whereas MySQL is +recommended for production environments. + +## Configuration + +`renterd` can be configured in various ways, through the use of a yaml file, CLI +flags or environment variables. Settings that are configured multiple times will +be evaluated in this order. In the CLI, use the `help` command to see an +overview of all settings configurable through the CLI. + +| **Name** | **Description** | **Default Value** | **CLI Flag** | **Environment Variable** | **YAML Path** | +|--------------------------------------|------------------------------------------------------|-----------------------------------|----------------------------------|------------------------------------------------|----------------------------------------| +| `HTTP.Address` | Address for serving the API | `:9980` | `--http` | - | `http.address` | +| `HTTP.Password` | Password for the HTTP server | - | - | `RENTERD_API_PASSWORD` | `http.password` | +| `Directory` | Directory for storing node state | `.` | `--dir` | - | `directory` | +| `Seed` | Seed for the node | - | - | `RENTERD_SEED` | `seed` | +| `AutoOpenWebUI` | Automatically open the web UI on startup | `true` | `--openui` | - | `autoOpenWebUI` | +| `ShutdownTimeout` | Timeout for node shutdown | `5m` | `--node.shutdownTimeout` | - | `shutdownTimeout` | +| `Log.Level` | Global logger level (debug\|info\|warn\|error). Defaults to 'info' | `info` | `--log.level` | `RENTERD_LOG_LEVEL` | `log.level` | +| `Log.File.Enabled` | Enables logging to disk. Defaults to 'true' | `true` | `--log.file.enabled` | `RENTERD_LOG_FILE_ENABLED` | `log.file.enabled` | +| `Log.File.Format` | Format of log file (json\|human). Defaults to 'json' | `json` | `--log.file.format` | `RENTERD_LOG_FILE_FORMAT` | `log.file.format` | +| `Log.File.Path` | Path of log file. Defaults to 'renterd.log' within the renterd directory | `renterd.log` | `--log.file.path` | `RENTERD_LOG_FILE_PATH` | `log.file.path` | +| `Log.StdOut.Enabled` | Enables logging to stdout. Defaults to 'true' | `true` | `--log.stdout.enabled` | `RENTERD_LOG_STDOUT_ENABLED` | `log.stdout.enabled` | +| `Log.StdOut.Format` | Format of log output (json\|human). Defaults to 'human' | `human` | `--log.stdout.format` | `RENTERD_LOG_STDOUT_FORMAT` | `log.stdout.format` | +| `Log.StdOut.EnableANSI` | Enables ANSI color codes in log output. Defaults to 'true' on non-Windows systems | `true` (`false` on Windows) | `--log.stdout.enableANSI` | `RENTERD_LOG_STDOUT_ENABLE_ANSI` | `log.stdout.enableANSI` | +| `Log.Database.Enabled` | Enable logging database queries. Defaults to 'true' | `true` | `--log.database.enabled` | `RENTERD_LOG_DATABASE_ENABLED` | `log.database.enabled` | +| `Log.Database.Level` | Logger level for database queries (info\|warn\|error). Defaults to 'warn' | `warn` | `--log.database.level` | `RENTERD_LOG_DATABASE_LEVEL`, `RENTERD_LOG_LEVEL` | `log.database.level` | +| `Log.Database.IgnoreRecordNotFoundError` | Enable ignoring 'not found' errors resulting from database queries. Defaults to 'true' | `true` | `--log.database.ignoreRecordNotFoundError` | `RENTERD_LOG_DATABASE_IGNORE_RECORD_NOT_FOUND_ERROR` | `log.database.ignoreRecordNotFoundError` | +| `Log.Database.SlowThreshold` | Threshold for slow queries in logger. Defaults to 100ms | `100ms` | `--log.database.slowThreshold` | `RENTERD_LOG_DATABASE_SLOW_THRESHOLD` | `log.database.slowThreshold` | +| `Log.Database.Level (DEPRECATED)` | Logger level | `warn` | `--db.logger.logLevel` | `RENTERD_DB_LOGGER_LOG_LEVEL` | `log.database.level` | +| `Log.Database.IgnoreRecordNotFoundError (DEPRECATED)` | Ignores 'not found' errors in logger | `true` | `--db.logger.ignoreNotFoundError`| `RENTERD_DB_LOGGER_IGNORE_NOT_FOUND_ERROR` | `log.ignoreRecordNotFoundError` | +| `Log.Database.SlowThreshold (DEPRECATED)` | Threshold for slow queries in logger | `100ms` | `--db.logger.slowThreshold` | `RENTERD_DB_LOGGER_SLOW_THRESHOLD` | `log.slowThreshold` | +| `Log.Path (DEPRECATED)` | Path to directory for logs | - | `--log-path` | `RENTERD_LOG_PATH` | `log.path` | +| `Database.MySQL.URI` | Database URI for the bus | - | `--db.uri` | `RENTERD_DB_URI` | `database.mysql.uri` | +| `Database.MySQL.User` | Database username for the bus | `renterd` | `--db.user` | `RENTERD_DB_USER` | `database.mysql.user` | +| `Database.MySQL.Password` | Database password for the bus | - | - | `RENTERD_DB_PASSWORD` | `database.mysql.password` | +| `Database.MySQL.Database` | Database name for the bus | `renterd` | `--db.name` | `RENTERD_DB_NAME` | `database.mysql.database` | +| `Database.MySQL.MetricsDatabase` | Database for metrics | `renterd_metrics` | `--db.metricsName` | `RENTERD_DB_METRICS_NAME` | `database.mysql.metricsDatabase` | +| `Database.SQLite.Database` | SQLite database name | - | - | - | `database.sqlite.database` | +| `Database.SQLite.MetricsDatabase` | SQLite metrics database name | - | - | - | `database.sqlite.metricsDatabase` | +| `Bus.AnnouncementMaxAgeHours` | Max age for announcements | `8760h` (1 year) | `--bus.announcementMaxAgeHours` | - | `bus.announcementMaxAgeHours` | +| `Bus.Bootstrap` | Bootstraps gateway and consensus modules | `true` | `--bus.bootstrap` | - | `bus.bootstrap` | +| `Bus.GatewayAddr` | Address for Sia peer connections | `:9981` | `--bus.gatewayAddr` | `RENTERD_BUS_GATEWAY_ADDR` | `bus.gatewayAddr` | +| `Bus.RemoteAddr` | Remote address for the bus | - | - | `RENTERD_BUS_REMOTE_ADDR` | `bus.remoteAddr` | +| `Bus.RemotePassword` | Remote password for the bus | - | - | `RENTERD_BUS_API_PASSWORD` | `bus.remotePassword` | +| `Bus.PersistInterval` | Interval for persisting consensus updates | `1m` | `--bus.persistInterval` | - | `bus.persistInterval` | +| `Bus.UsedUTXOExpiry` | Expiry for used UTXOs in transactions | `24h` | `--bus.usedUTXOExpiry` | - | `bus.usedUtxoExpiry` | +| `Bus.SlabBufferCompletionThreshold` | Threshold for slab buffer upload | `4096` | `--bus.slabBufferCompletionThreshold` | `RENTERD_BUS_SLAB_BUFFER_COMPLETION_THRESHOLD` | `bus.slabBufferCompletionThreshold` | +| `Worker.AllowPrivateIPs` | Allows hosts with private IPs | - | `--worker.allowPrivateIPs` | - | `worker.allowPrivateIPs` | +| `Worker.BusFlushInterval` | Interval for flushing data to bus | `5s` | `--worker.busFlushInterval` | - | `worker.busFlushInterval` | +| `Worker.ContractLockTimeout` | Timeout for locking contracts | `30s` | - | - | `worker.contractLockTimeout` | +| `Worker.DownloadMaxOverdrive` | Max overdrive workers for downloads | `5` | `--worker.downloadMaxOverdrive` | - | `worker.downloadMaxOverdrive` | +| `Worker.DownloadMaxMemory` | Max memory for downloads | `1GiB` | `--worker.downloadMaxMemory` | `RENTERD_WORKER_DOWNLOAD_MAX_MEMORY` | `worker.downloadMaxMemory` | +| `Worker.ID` | Unique ID for worker | `worker` | `--worker.id` | `RENTERD_WORKER_ID` | `worker.id` | +| `Worker.DownloadOverdriveTimeout` | Timeout for overdriving slab downloads | `3s` | `--worker.downloadOverdriveTimeout` | - | `worker.downloadOverdriveTimeout` | +| `Worker.UploadMaxMemory` | Max amount of RAM the worker allocates for slabs when uploading | `1GiB` | `--worker.uploadMaxMemory` | `RENTERD_WORKER_UPLOAD_MAX_MEMORY` | `worker.uploadMaxMemory` | +| `Worker.UploadMaxOverdrive` | Max overdrive workers for uploads | `5` | `--worker.uploadMaxOverdrive` | - | `worker.uploadMaxOverdrive` | +| `Worker.UploadOverdriveTimeout` | Timeout for overdriving slab uploads | `3s` | `--worker.uploadOverdriveTimeout` | - | `worker.uploadOverdriveTimeout` | +| `Worker.Enabled` | Enables/disables worker | `true` | `--worker.enabled` | `RENTERD_WORKER_ENABLED` | `worker.enabled` | +| `Worker.AllowUnauthenticatedDownloads` | Allows unauthenticated downloads | - | `--worker.unauthenticatedDownloads` | `RENTERD_WORKER_UNAUTHENTICATED_DOWNLOADS` | `worker.allowUnauthenticatedDownloads` | +| `Worker.ExternalAddress` | Address of the worker on the network, only necessary when the bus is remote | - | - | `RENTERD_WORKER_EXTERNAL_ADDR` | `worker.externalAddress` | +| `Worker.RemoteAddrs` | List of remote worker addresses (semicolon delimited) | - | - | `RENTERD_WORKER_REMOTE_ADDRS` | `worker.remotes` | +| `Worker.RemotePassword` | API password for the remote workers | - | - | `RENTERD_WORKER_API_PASSWORD` | `worker.remotes` | +| `Autopilot.Enabled` | Enables/disables autopilot | `true` | `--autopilot.enabled` | `RENTERD_AUTOPILOT_ENABLED` | `autopilot.enabled` | +| `Autopilot.AccountsRefillInterval` | Interval for refilling workers' account balances | `24h` | `--autopilot.accountRefillInterval` | - | `autopilot.accountsRefillInterval` | +| `Autopilot.Heartbeat` | Interval for autopilot loop execution | `30m` | `--autopilot.heartbeat` | - | `autopilot.heartbeat` | +| `Autopilot.MigrationHealthCutoff` | Threshold for migrating slabs based on health | `0.75` | `--autopilot.migrationHealthCutoff` | - | `autopilot.migrationHealthCutoff` | +| `Autopilot.RevisionBroadcastInterval`| Interval for broadcasting contract revisions | `168h` (7 days) | `--autopilot.revisionBroadcastInterval` | `RENTERD_AUTOPILOT_REVISION_BROADCAST_INTERVAL` | `autopilot.revisionBroadcastInterval` | +| `Autopilot.ScannerBatchSize` | Batch size for host scanning | `1000` | `--autopilot.scannerBatchSize` | - | `autopilot.scannerBatchSize` | +| `Autopilot.ScannerInterval` | Interval for scanning hosts | `24h` | `--autopilot.scannerInterval` | - | `autopilot.scannerInterval` | +| `Autopilot.ScannerNumThreads` | Number of threads for scanning hosts | `100` | - | - | `autopilot.scannerNumThreads` | +| `Autopilot.MigratorParallelSlabsPerWorker` | Parallel slab migrations per worker | `1` | `--autopilot.migratorParallelSlabsPerWorker` | `RENTERD_MIGRATOR_PARALLEL_SLABS_PER_WORKER` | `autopilot.migratorParallelSlabsPerWorker` | +| `S3.Address` | Address for serving S3 API | `:9982` | `--s3.address` | `RENTERD_S3_ADDRESS` | `s3.address` | +| `S3.DisableAuth` | Disables authentication for S3 API | `false` | `--s3.disableAuth` | `RENTERD_S3_DISABLE_AUTH` | `s3.disableAuth` | +| `S3.Enabled` | Enables/disables S3 API | `true` | `--s3.enabled` | `RENTERD_S3_ENABLED` | `s3.enabled` | +| `S3.HostBucketBases` | Enables bucket rewriting in the router for the provided bases | - | `--s3.hostBucketBases` | `RENTERD_S3_HOST_BUCKET_BASES` | `s3.hostBucketBases` | +| `S3.HostBucketEnabled` | Enables bucket rewriting in the router | - | `--s3.hostBucketEnabled` | `RENTERD_S3_HOST_BUCKET_ENABLED` | `s3.hostBucketEnabled` | +| `S3.KeypairsV4 (DEPRECATED)` | V4 keypairs for S3 | - | - | - | `s3.keypairsV4` | + +### Single-Node Setup + +A single-node setup involves running all components (bus, worker, and autopilot) +on the same machine. This is ideal for testing, development, or small-scale +deployments. This setup is the default when running `renterd` without any flags. + +### Cluster Setup + +In a cluster setup, the bus, worker, and autopilot run on separate nodes. This +setup is ideal for large-scale deployments where you want to horizontally scale +your renter. The worker nodes can be spread across multiple machines, and the +autopilot can be run on a separate machine. + +#### Bus Node Configuration + +The bus is the only node that exposes the UI. To run the bus separately, the +autopilot and worker have to be disabled using the `--autopilot.enabled` and +`--worker.enabled` flags. The only other requirement to run a bus is the (walet) +seed. + +#### Worker Node Configuration + +To configure the worker as a standalone node, the autopilot has to be disabled +using the `--autopilot.enabled` flag, and the bus has to be disabled. There's no +flag to explicitly disable the `bus`, it's implied by configuring a remote +address for the bus using the `--bus.remoteAddr` and `--bus.remotePassword` +flags. When the bus is remote, the worker has to be configured with an external +address of the form `http://:`, on localhost however this can be +the same as the worker's HTTP address. The worker needs to know its location on +the network because it relies on some webhooks it needs to register with the +bus, which in turn needs to know how to reach the worker when certain events +occur. Therefor it is important to start the worker after the bus is reachable. + +#### Autopilot Node Configuration + +To run the autopilot separately, the worker has to be disabled using the +`--worker.enabled` flag. Similar to the worker, the autopilot has to be +configured with a remote bus for the node not to start a bus itself. Alongside +with knowing where the bus is located, the autopilot also has to be aware of the +workers. These remote workers can be configured through yaml under the option +`worker.remotes`, or through environment variables +(`RENTERD_WORKER_REMOTE_ADDRS` and `RENTERD_WORKER_API_PASSWORD`). + +#### Example docker-compose with minimal configuration + +```yaml +version: '3.9' + +services: + bus: + image: ghcr.io/siafoundation/renterd:master + container_name: renterd_bus + environment: + - RENTERD_SEED= + - RENTERD_API_PASSWORD=bus-pass + ports: + - "9980:9980" + - "9981:9981" + + worker-1: + image: ghcr.io/siafoundation/renterd:master + container_name: renterd_worker-1 + environment: + - RENTERD_AUTOPILOT_ENABLED=false + - RENTERD_SEED= + - RENTERD_API_PASSWORD=worker-pass + - RENTERD_BUS_API_PASSWORD=bus-pass + - RENTERD_BUS_REMOTE_ADDR=http://bus:9980/api/bus + - RENTERD_WORKER_EXTERNAL_ADDR=http://worker-1:9980/api/worker + ports: + - "9982:9980" + - "8082:8080" + depends_on: + - bus + + worker-2: + image: ghcr.io/siafoundation/renterd:master + container_name: renterd_worker-2 + environment: + - RENTERD_SEED= + - RENTERD_API_PASSWORD=worker-pass + - RENTERD_BUS_API_PASSWORD=bus-pass + - RENTERD_BUS_REMOTE_ADDR=http://bus:9980/api/bus + - RENTERD_WORKER_EXTERNAL_ADDR=http://worker-2:9980/api/worker + ports: + - "9983:9980" + - "8083:8080" + depends_on: + - bus + + autopilot: + image: ghcr.io/siafoundation/renterd:master + container_name: renterd_autopilot + environment: + - RENTERD_API_PASSWORD=autopilot-pass + - RENTERD_BUS_API_PASSWORD=bus-pass + - RENTERD_BUS_REMOTE_ADDR=http://bus:9980/api/bus + - RENTERD_WORKER_API_PASSWORD= + - RENTERD_WORKER_REMOTE_ADDRS=http://worker-1:9980/api/worker;http://worker-2:9980/api/worker + ports: + - "9984:9980" + depends_on: + - bus + - worker-1 + - worker-2 +``` + +## Tweaking Performance + +Depending on hardware specs, you can change the [configuration](#configuration) +to better utilize it and gain more performance out of `renterd`. This section +highlights some of the more obvious tweaks one can apply. + +### Increase/Decrease memory -## Useful Links +By default, `renterd` uses reasonable limits for RAM consumed by uploads and +downloads. Especially when downloading or uploading single large files, more RAM +can make a difference since it allows for processing the download/upload in +parallel. To change the max RAM `renterd` is going to use update the +`Worker.DownloadMaxMemory` and `Worker.UploadMaxMemory` settings. + +### Overdrive + +Both uploads and downloads have a setting we call "overdrive". Since `renterd` +operates in a trustless environment, we can't rely on all of our hosts being +reliable and of high quality. So when uploading `n` shards of some data to the +network (or downloading from it), the process is bottlenecked by the slowest +host. That is where the overdrive comes in. + +`Worker.UploadMaxOverdrive` and `Worker.DownloadMaxOverdrive` can be used to +configure how many additional hosts to the number we need to upload/download we +use to reduce the chance of getting hung up on a slow one. The default is `3` +which means up to 3 hosts can get stuck with the upload/download remaining +mostly unaffected. `Worker.UploadOverdriveTimeout` and +`Worker.DownloadOverdriveTimeout` specify the time that needs to pass before we +launch the overdrive uploads/downloads. + +Two conditions need to be met before the overdrive launches: +1. When uploading/downloading to/from `n` hosts (without overdrive), `n - overdriveHosts` pieces need to finish. +2. Once condition 1. is met, the configured overdrive timeout needs to pass + +What this means is that there is a tradeoff between using/paying for more +bandwidth and the ability to compensate for slow/stuck hosts. If you handpick +hosts you trust to be reliable, you can set the max overdrive to 0 for more max +efficiency while you can also increase the overdrive to 30 hosts after 100ms for +faster uploads at the cost of uploading more data than necessary and overpaying. +Regardless, we recommend that you perform your own benchmarking to see what +works best for your set of hosts, budget and use-case. -API documentation can be found [here](https://api.sia.tech/renterd).
-Setup guides are available on our [website](https://docs.sia.tech/renting/setting-up-renterd).
-A project roadmap is available on [GitHub](https://github.com/orgs/SiaFoundation/projects/5). ## Backups @@ -81,7 +311,7 @@ renter and shut it down again. Use the `mysqldump` command to create a backup of the MySQL databases. It's a utility provided by MySQL to backup or transfer a MySQL database and it's -usually installed alongside the MySQL cient tools. Replace placeholders with +usually installed alongside the MySQL client tools. Replace placeholders with actual user and password. ```bash @@ -228,9 +458,9 @@ docker run -d --name renterd-testnet -e RENTERD_API_PASSWORD="" -e REN ## Architecture -`renterd` distinguishes itself from its predecessor, `siad`, through a unique -architecture comprised of three main components: the autopilot, the bus, and one -or more workers. +`renterd` distinguishes itself from `siad` through a unique architecture +comprised of three main components: the autopilot, the bus, and one or more +workers. Instead of adopting another Electron app bundle, `renterd` incorporates an embedded web UI. This approach caters to developers and power users who prefer a @@ -263,15 +493,6 @@ data persistence. ## Usage -`renterd` can be configured in various ways, through the use of a yaml file, CLI -flags or environment variables. Settings that are configured multiple times will -be evaluated in this order. Use the `help` command to see an overview of all -settings. - -```sh -renterd --help -``` - The Web UI streamlines the initial setup and configuration for newcomers. However, if manual configuration is necessary, the subsequent sections outline a step-by-step guide to achieving a functional `renterd` instance. @@ -413,48 +634,3 @@ following entries exclude a decent amount of bad/old/malicious hosts: - 51.158.108.244 - siacentral.ddnsfree.com - siacentral.mooo.com - -## Debugging - -### Logging - -`renterd` has both console and file logging, the logs are stored in -`renterd.log` and contain logs from all of the components that are enabled, e.g. -if only the `bus` and `worker` are enabled it will only contain the logs from -those two components. - -### Ephemeral Account Drift - -The Autopilot manages a collection of ephemeral accounts, each corresponding to -a specific contract. These accounts facilitate quicker payments to hosts for -various actions, offering advantages over contract payments in terms of speed -and parallel execution. Account balances are periodically synchronized with -hosts, and discrepancies, if any, are detected during this process. renterd -incorporates built-in safeguards to deter host manipulation, discontinuing -interactions with hosts that exhibit excessive account balance drift. In rare -scenarios, issues may arise due to this drift; these can be rectified by -resetting the drift via a specific endpoint: - -- `POST /account/:id/resetdrift` - -### Contract Set Contracts - -The autopilot forms and manages contracts in the contract set with name -configured in the autopilot's configuration object, by default this is called -the `autopilot` contract set. This contract set should contain the amount of -contracts configured in the contracts section of the configuration. - -That means that, if everything is running smoothly, the following curl call -should return that number - -```bash -curl -u ":[YOUR_PASSWORD]" [BASE_URL]/api/bus/contracts/set/autopilot | jq '.|length' -``` - -### Autopilot Loop Trigger - -The autopilot allows triggering its loop using the following endpoint. The UI -triggers this endpoint after the user updates the configuration, but it can be -useful for debugging purposes too. - -- `POST /api/autopilot/trigger` diff --git a/api/bus.go b/api/bus.go index 248dfd313..8652a5f49 100644 --- a/api/bus.go +++ b/api/bus.go @@ -1,9 +1,16 @@ package api import ( + "errors" + "go.sia.tech/core/types" ) +var ( + ErrMarkerNotFound = errors.New("marker not found") + ErrMaxFundAmountExceeded = errors.New("renewal exceeds max fund amount") +) + type ( // ConsensusState holds the current blockheight and whether we are synced or not. ConsensusState struct { diff --git a/api/events.go b/api/events.go new file mode 100644 index 000000000..dbfa68a3f --- /dev/null +++ b/api/events.go @@ -0,0 +1,175 @@ +package api + +import ( + "encoding/json" + "errors" + "fmt" + "time" + + "go.sia.tech/core/types" + "go.sia.tech/renterd/webhooks" +) + +const ( + ModuleConsensus = "consensus" + ModuleContract = "contract" + ModuleContractSet = "contract_set" + ModuleSetting = "setting" + + EventUpdate = "update" + EventDelete = "delete" + EventArchive = "archive" + EventRenew = "renew" +) + +var ( + ErrUnknownEvent = errors.New("unknown event") +) + +type ( + EventConsensusUpdate struct { + ConsensusState + TransactionFee types.Currency `json:"transactionFee"` + Timestamp time.Time `json:"timestamp"` + } + + EventContractArchive struct { + ContractID types.FileContractID `json:"contractID"` + Reason string `json:"reason"` + Timestamp time.Time `json:"timestamp"` + } + + EventContractRenew struct { + Renewal ContractMetadata `json:"renewal"` + Timestamp time.Time `json:"timestamp"` + } + + EventContractSetUpdate struct { + Name string `json:"name"` + ContractIDs []types.FileContractID `json:"contractIDs"` + Timestamp time.Time `json:"timestamp"` + } + + EventSettingUpdate struct { + Key string `json:"key"` + Update interface{} `json:"update"` + Timestamp time.Time `json:"timestamp"` + } + + EventSettingDelete struct { + Key string `json:"key"` + Timestamp time.Time `json:"timestamp"` + } +) + +var ( + WebhookConsensusUpdate = func(url string, headers map[string]string) webhooks.Webhook { + return webhooks.Webhook{ + Event: EventUpdate, + Headers: headers, + Module: ModuleConsensus, + URL: url, + } + } + + WebhookContractArchive = func(url string, headers map[string]string) webhooks.Webhook { + return webhooks.Webhook{ + Event: EventArchive, + Headers: headers, + Module: ModuleContract, + URL: url, + } + } + + WebhookContractRenew = func(url string, headers map[string]string) webhooks.Webhook { + return webhooks.Webhook{ + Event: EventRenew, + Headers: headers, + Module: ModuleContract, + URL: url, + } + } + + WebhookContractSetUpdate = func(url string, headers map[string]string) webhooks.Webhook { + return webhooks.Webhook{ + Event: EventUpdate, + Headers: headers, + Module: ModuleContractSet, + URL: url, + } + } + + WebhookSettingUpdate = func(url string, headers map[string]string) webhooks.Webhook { + return webhooks.Webhook{ + Event: EventUpdate, + Headers: headers, + Module: ModuleSetting, + URL: url, + } + } + + WebhookSettingDelete = func(url string, headers map[string]string) webhooks.Webhook { + return webhooks.Webhook{ + Event: EventDelete, + Headers: headers, + Module: ModuleSetting, + URL: url, + } + } +) + +func ParseEventWebhook(event webhooks.Event) (interface{}, error) { + bytes, err := json.Marshal(event.Payload) + if err != nil { + return nil, err + } + switch event.Module { + case ModuleContract: + switch event.Event { + case EventArchive: + var e EventContractArchive + if err := json.Unmarshal(bytes, &e); err != nil { + return nil, err + } + return e, nil + case EventRenew: + var e EventContractRenew + if err := json.Unmarshal(bytes, &e); err != nil { + return nil, err + } + return e, nil + } + case ModuleContractSet: + if event.Event == EventUpdate { + var e EventContractSetUpdate + if err := json.Unmarshal(bytes, &e); err != nil { + return nil, err + } + return e, nil + } + case ModuleConsensus: + if event.Event == EventUpdate { + var e EventConsensusUpdate + if err := json.Unmarshal(bytes, &e); err != nil { + return nil, err + } + return e, nil + } + case ModuleSetting: + switch event.Event { + case EventUpdate: + var e EventSettingUpdate + if err := json.Unmarshal(bytes, &e); err != nil { + return nil, err + } + return e, nil + case EventDelete: + var e EventSettingDelete + if err := json.Unmarshal(bytes, &e); err != nil { + return nil, err + } + return e, nil + } + } + return nil, fmt.Errorf("%w: module %s event %s", ErrUnknownEvent, event.Module, event.Event) +} diff --git a/api/host.go b/api/host.go index 4ad1f87a1..0dfe6da81 100644 --- a/api/host.go +++ b/api/host.go @@ -159,6 +159,7 @@ type ( Blocked bool `json:"blocked"` Checks map[string]HostCheck `json:"checks"` StoredData uint64 `json:"storedData"` + Subnets []string `json:"subnets"` } HostAddress struct { @@ -181,10 +182,11 @@ type ( HostScan struct { HostKey types.PublicKey `json:"hostKey"` + PriceTable rhpv3.HostPriceTable + Settings rhpv2.HostSettings + Subnets []string Success bool Timestamp time.Time - Settings rhpv2.HostSettings - PriceTable rhpv3.HostPriceTable } HostPriceTable struct { diff --git a/api/object.go b/api/object.go index 91332eec7..a5cef0422 100644 --- a/api/object.go +++ b/api/object.go @@ -228,9 +228,11 @@ type ( } ListObjectOptions struct { - Prefix string - Marker string - Limit int + Prefix string + Marker string + Limit int + SortBy string + SortDir string } SearchObjectOptions struct { diff --git a/api/setting.go b/api/setting.go index 923863e58..ff93550ef 100644 --- a/api/setting.go +++ b/api/setting.go @@ -12,6 +12,7 @@ import ( const ( SettingContractSet = "contractset" SettingGouging = "gouging" + SettingPricePinning = "pricepinning" SettingRedundancy = "redundancy" SettingS3Authentication = "s3authentication" SettingUploadPacking = "uploadpacking" @@ -80,6 +81,55 @@ type ( MigrationSurchargeMultiplier uint64 `json:"migrationSurchargeMultiplier"` } + // PricePinSettings holds the configuration for pinning certain settings to + // a specific currency (e.g., USD). It uses a Forex API to fetch the current + // exchange rate, allowing users to set prices in USD instead of SC. + PricePinSettings struct { + // Enabled can be used to either enable or temporarily disable price + // pinning. If enabled, both the currency and the Forex endpoint URL + // must be valid. + Enabled bool `json:"enabled"` + + // Currency is the external three-letter currency code. + Currency string `json:"currency"` + + // ForexEndpointURL is the endpoint that returns the exchange rate for + // Siacoin against the underlying currency. + ForexEndpointURL string `json:"forexEndpointURL"` + + // Threshold is a percentage between 0 and 1 that determines when the + // pinned settings are updated based on the exchange rate at the time. + Threshold float64 `json:"threshold"` + + // Autopilots contains the pinned settings for every autopilot. + Autopilots map[string]AutopilotPins `json:"autopilots,omitempty"` + + // GougingSettingsPins contains the pinned settings for the gouging + // settings. + GougingSettingsPins GougingSettingsPins `json:"gougingSettingsPins,omitempty"` + } + + // AutopilotPins contains the available autopilot settings that can be + // pinned. + AutopilotPins struct { + Allowance Pin `json:"allowance"` + } + + // GougingSettingsPins contains the available gouging settings that can be + // pinned. + GougingSettingsPins struct { + MaxDownload Pin `json:"maxDownload"` + MaxRPCPrice Pin `json:"maxRPCPrice"` + MaxStorage Pin `json:"maxStorage"` + MaxUpload Pin `json:"maxUpload"` + } + + // A Pin is a pinned price in an external currency. + Pin struct { + Pinned bool `json:"pinned"` + Value float64 `json:"value"` + } + // RedundancySettings contain settings that dictate an object's redundancy. RedundancySettings struct { MinShards int `json:"minShards"` @@ -98,6 +148,28 @@ type ( } ) +// IsPinned returns true if the pin is enabled and the value is greater than 0. +func (p Pin) IsPinned() bool { + return p.Pinned && p.Value > 0 +} + +// Validate returns an error if the price pin settings are not considered valid. +func (pps PricePinSettings) Validate() error { + if !pps.Enabled { + return nil + } + if pps.ForexEndpointURL == "" { + return fmt.Errorf("price pin settings must have a forex endpoint URL") + } + if pps.Currency == "" { + return fmt.Errorf("price pin settings must have a currency") + } + if pps.Threshold <= 0 || pps.Threshold >= 1 { + return fmt.Errorf("price pin settings must have a threshold between 0 and 1") + } + return nil +} + // Validate returns an error if the gouging settings are not considered valid. func (gs GougingSettings) Validate() error { if gs.HostBlockHeightLeeway < 3 { diff --git a/api/wallet.go b/api/wallet.go index 576a2da99..80da310d3 100644 --- a/api/wallet.go +++ b/api/wallet.go @@ -45,6 +45,7 @@ type ( ExpectedNewStorage uint64 `json:"expectedNewStorage"` HostAddress types.Address `json:"hostAddress"` PriceTable rhpv3.HostPriceTable `json:"priceTable"` + MaxFundAmount types.Currency `json:"maxFundAmount"` MinNewCollateral types.Currency `json:"minNewCollateral"` RenterAddress types.Address `json:"renterAddress"` RenterFunds types.Currency `json:"renterFunds"` @@ -55,6 +56,7 @@ type ( // WalletPrepareRenewResponse is the response type for the /wallet/prepare/renew // endpoint. WalletPrepareRenewResponse struct { + FundAmount types.Currency `json:"fundAmount"` ToSign []types.Hash256 `json:"toSign"` TransactionSet []types.Transaction `json:"transactionSet"` } diff --git a/api/webhooks.go b/api/webhooks.go index 379da7d55..0bca3d4ca 100644 --- a/api/webhooks.go +++ b/api/webhooks.go @@ -2,7 +2,7 @@ package api import "go.sia.tech/renterd/webhooks" -type WebHookResponse struct { +type WebhookResponse struct { Webhooks []webhooks.Webhook `json:"webhooks"` Queues []webhooks.WebhookQueueInfo `json:"queues"` } diff --git a/api/worker.go b/api/worker.go index 2908802f7..ae6024b84 100644 --- a/api/worker.go +++ b/api/worker.go @@ -27,6 +27,10 @@ var ( // be scanned since it is on a private network. ErrHostOnPrivateNetwork = errors.New("host is on a private network") + // ErrHostTooManyAddresses is returned by the worker API when a host has + // more than two addresses of the same type. + ErrHostTooManyAddresses = errors.New("host has more than two addresses, or two of the same type") + // ErrMultiRangeNotSupported is returned by the worker API when a request // tries to download multiple ranges at once. ErrMultiRangeNotSupported = errors.New("multipart ranges are not supported") @@ -133,6 +137,7 @@ type ( ExpectedNewStorage uint64 `json:"expectedNewStorage"` HostAddress types.Address `json:"hostAddress"` HostKey types.PublicKey `json:"hostKey"` + MaxFundAmount types.Currency `json:"maxFundAmount"` MinNewCollateral types.Currency `json:"minNewCollateral"` SiamuxAddr string `json:"siamuxAddr"` RenterAddress types.Address `json:"renterAddress"` @@ -146,6 +151,7 @@ type ( ContractID types.FileContractID `json:"contractID"` Contract rhpv2.ContractRevision `json:"contract"` ContractPrice types.Currency `json:"contractPrice"` + FundAmount types.Currency `json:"fundAmount"` TransactionSet []types.Transaction `json:"transactionSet"` } diff --git a/autopilot/alerts.go b/autopilot/alerts.go index 51ed31a87..7f496798c 100644 --- a/autopilot/alerts.go +++ b/autopilot/alerts.go @@ -73,23 +73,18 @@ func newAccountRefillAlert(id rhpv3.Account, contract api.ContractMetadata, err } } -func newContractPruningFailedAlert(hk types.PublicKey, version string, fcid types.FileContractID, err error) *alerts.Alert { - data := map[string]interface{}{"error": err.Error()} - if hk != (types.PublicKey{}) { - data["hostKey"] = hk.String() - } - if version != "" { - data["hostVersion"] = version - } - if fcid != (types.FileContractID{}) { - data["contractID"] = fcid.String() - } - - return &alerts.Alert{ - ID: alerts.IDForContract(alertPruningID, fcid), - Severity: alerts.SeverityWarning, - Message: "Contract pruning failed", - Data: data, +func newContractPruningFailedAlert(hk types.PublicKey, version, release string, fcid types.FileContractID, err error) alerts.Alert { + return alerts.Alert{ + ID: alerts.IDForContract(alertPruningID, fcid), + Severity: alerts.SeverityWarning, + Message: "Contract pruning failed", + Data: map[string]interface{}{ + "contractID": fcid.String(), + "error": err.Error(), + "hostKey": hk.String(), + "hostVersion": version, + "hostRelease": release, + }, Timestamp: time.Now(), } } diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 66b5792c2..263744ca5 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -117,6 +117,7 @@ type Autopilot struct { mu sync.Mutex pruning bool pruningLastStart time.Time + pruningAlertIDs map[types.FileContractID]types.Hash256 maintenanceTxnIDs []types.TransactionID } @@ -136,6 +137,8 @@ func New(id string, bus Bus, workers []Worker, logger *zap.Logger, heartbeat tim shutdownCtxCancel: shutdownCtxCancel, tickerDuration: heartbeat, + + pruningAlertIDs: make(map[types.FileContractID]types.Hash256), } scanner, err := newScanner( ap, @@ -203,7 +206,15 @@ func (ap *Autopilot) configHandlerPOST(jc jape.Context) { } // evaluate the config - jc.Encode(contractor.EvaluateConfig(reqCfg, cs, fee, rs, gs, hosts)) + res, err := contractor.EvaluateConfig(reqCfg, cs, fee, rs, gs, hosts) + if errors.Is(err, contractor.ErrMissingRequiredFields) { + jc.Error(err, http.StatusBadRequest) + return + } else if err != nil { + jc.Error(err, http.StatusInternalServerError) + return + } + jc.Encode(res) } func (ap *Autopilot) Run() error { @@ -876,7 +887,7 @@ func (ap *Autopilot) buildState(ctx context.Context) (*contractor.MaintenanceSta return nil, err } ap.logger.Infof("initialised current period to %d", autopilot.CurrentPeriod) - } else if nextPeriod := autopilot.CurrentPeriod + autopilot.Config.Contracts.Period; cs.BlockHeight >= nextPeriod { + } else if nextPeriod := computeNextPeriod(cs.BlockHeight, autopilot.CurrentPeriod, autopilot.Config.Contracts.Period); nextPeriod != autopilot.CurrentPeriod { prevPeriod := autopilot.CurrentPeriod autopilot.CurrentPeriod = nextPeriod err := ap.bus.UpdateAutopilot(ctx, autopilot) @@ -950,3 +961,12 @@ func compatV105UsabilityFilterModeCheck(usabilityMode string) error { } return nil } + +func computeNextPeriod(bh, currentPeriod, period uint64) uint64 { + prevPeriod := currentPeriod + nextPeriod := prevPeriod + for bh >= nextPeriod+period { + nextPeriod += period + } + return nextPeriod +} diff --git a/autopilot/autopilot_test.go b/autopilot/autopilot_test.go new file mode 100644 index 000000000..60143f550 --- /dev/null +++ b/autopilot/autopilot_test.go @@ -0,0 +1,35 @@ +package autopilot + +import "testing" + +func TestComputeNextPeriod(t *testing.T) { + currentPeriod := uint64(100) + period := uint64(100) + tests := []struct { + blockHeight uint64 + nextPeriod uint64 + }{ + { + blockHeight: 100, + nextPeriod: 100, + }, + { + blockHeight: 150, + nextPeriod: 100, + }, + { + blockHeight: 200, + nextPeriod: 200, + }, + { + blockHeight: 400, + nextPeriod: 400, + }, + } + for _, test := range tests { + nextPeriod := computeNextPeriod(test.blockHeight, currentPeriod, period) + if nextPeriod != test.nextPeriod { + t.Fatalf("expected next period to be %d, got %d", test.nextPeriod, nextPeriod) + } + } +} diff --git a/autopilot/contract_pruning.go b/autopilot/contract_pruning.go index 78d9adf89..2f491249b 100644 --- a/autopilot/contract_pruning.go +++ b/autopilot/contract_pruning.go @@ -11,83 +11,42 @@ import ( "go.sia.tech/renterd/api" "go.sia.tech/renterd/internal/utils" "go.sia.tech/siad/build" + "go.uber.org/zap" ) var ( + errInvalidHandshake = errors.New("couldn't read host's handshake") errInvalidHandshakeSignature = errors.New("host's handshake signature was invalid") errInvalidMerkleProof = errors.New("host supplied invalid Merkle proof") + errInvalidSectorRootsRange = errors.New("number of roots does not match range") ) const ( - // timeoutPruneContract is the amount of time we wait for a contract to get - // pruned. + // timeoutPruneContract defines the maximum amount of time we lock a + // contract for pruning timeoutPruneContract = 10 * time.Minute ) -type ( - pruneResult struct { - ts time.Time - - fcid types.FileContractID - hk types.PublicKey - version string - - pruned uint64 - remaining uint64 - duration time.Duration - - err error - } - - pruneMetrics []api.ContractPruneMetric -) +func (ap *Autopilot) dismissPruneAlerts(prunable []api.ContractPrunableData) { + ap.mu.Lock() + defer ap.mu.Unlock() -func (pr pruneResult) String() string { - msg := fmt.Sprintf("contract %v", pr.fcid) - if pr.hk != (types.PublicKey{}) { - msg += fmt.Sprintf(", host %v version %s", pr.hk, pr.version) - } - if pr.pruned > 0 { - msg += fmt.Sprintf(", pruned %d bytes, remaining %d bytes, elapsed %v", pr.pruned, pr.remaining, pr.duration) - } - if pr.err != nil { - msg += fmt.Sprintf(", err: %v", pr.err) - } - return msg -} + // use a sane timeout + ctx, cancel := context.WithTimeout(ap.shutdownCtx, 5*time.Minute) + defer cancel() -func (pm pruneMetrics) String() string { - var total uint64 - for _, m := range pm { - total += m.Pruned + // fetch contract ids that are prunable + prunableIDs := make(map[types.FileContractID]struct{}) + for _, contract := range prunable { + prunableIDs[contract.ID] = struct{}{} } - return fmt.Sprintf("pruned %d (%s) from %v contracts", total, humanReadableSize(int(total)), len(pm)) -} -func (pr pruneResult) toAlert() (id types.Hash256, alert *alerts.Alert) { - id = alerts.IDForContract(alertPruningID, pr.fcid) - - if shouldTrigger := pr.err != nil && !((utils.IsErr(pr.err, errInvalidMerkleProof) && build.VersionCmp(pr.version, "1.6.0") < 0) || - utils.IsErr(pr.err, api.ErrContractNotFound) || // contract got archived - utils.IsErr(pr.err, utils.ErrConnectionRefused) || - utils.IsErr(pr.err, utils.ErrConnectionTimedOut) || - utils.IsErr(pr.err, utils.ErrConnectionResetByPeer) || - utils.IsErr(pr.err, errInvalidHandshakeSignature) || - utils.IsErr(pr.err, utils.ErrNoRouteToHost) || - utils.IsErr(pr.err, utils.ErrNoSuchHost)); shouldTrigger { - alert = newContractPruningFailedAlert(pr.hk, pr.version, pr.fcid, pr.err) - } - return -} - -func (pr pruneResult) toMetric() api.ContractPruneMetric { - return api.ContractPruneMetric{ - Timestamp: api.TimeRFC3339(pr.ts), - ContractID: pr.fcid, - HostKey: pr.hk, - Pruned: pr.pruned, - Remaining: pr.remaining, - Duration: pr.duration, + // dismiss alerts for contracts that are no longer prunable + for fcid, alertID := range ap.pruningAlertIDs { + if _, ok := prunableIDs[fcid]; !ok { + ap.DismissAlert(ctx, alertID) + delete(ap.pruningAlertIDs, fcid) + } } } @@ -131,7 +90,11 @@ func (ap *Autopilot) fetchPrunableContracts() (prunable []api.ContractPrunableDa return } -func (ap *Autopilot) hostForContract(ctx context.Context, fcid types.FileContractID) (host api.Host, metadata api.ContractMetadata, err error) { +func (ap *Autopilot) fetchHostContract(fcid types.FileContractID) (host api.Host, metadata api.ContractMetadata, err error) { + // use a sane timeout + ctx, cancel := context.WithTimeout(ap.shutdownCtx, time.Minute) + defer cancel() + // fetch the contract metadata, err = ap.bus.Contract(ctx, fcid) if err != nil { @@ -151,102 +114,93 @@ func (ap *Autopilot) performContractPruning(wp *workerPool) { if err != nil { ap.logger.Error(err) return - } else if len(prunable) == 0 { - ap.logger.Info("no contracts to prune") - return } - // prune every contract individually, one at a time and for a maximum - // duration of 'timeoutPruneContract' to limit the amount of time we lock - // the contract as contracts on old hosts can take a long time to prune - var metrics pruneMetrics - wp.withWorker(func(w Worker) { - for _, contract := range prunable { - // return if we're stopped - if ap.isStopped() { - return - } - - // prune contract - result := ap.pruneContract(w, contract.ID) - if result.err != nil { - ap.logger.Error(result) - } else { - ap.logger.Info(result) - } - - // handle alert - ctx, cancel := context.WithTimeout(ap.shutdownCtx, time.Minute) - if id, alert := result.toAlert(); alert != nil { - ap.RegisterAlert(ctx, *alert) - } else { - ap.DismissAlert(ctx, id) - } - cancel() - - // handle metrics - metrics = append(metrics, result.toMetric()) + // dismiss alerts for contracts that are no longer prunable + ap.dismissPruneAlerts(prunable) + + // loop prunable contracts + var total uint64 + for _, contract := range prunable { + // check if we're stopped + if ap.isStopped() { + break } - }) - // record metrics - ctx, cancel := context.WithTimeout(ap.shutdownCtx, time.Minute) - if err := ap.bus.RecordContractPruneMetric(ctx, metrics...); err != nil { - ap.logger.Error(err) + // fetch host + h, _, err := ap.fetchHostContract(contract.ID) + if utils.IsErr(err, api.ErrContractNotFound) { + continue // contract got archived + } else if err != nil { + ap.logger.Errorf("failed to fetch host for contract '%v', err %v", contract.ID, err) + continue + } + + // prune contract using a random worker + wp.withWorker(func(w Worker) { + total += ap.pruneContract(w, contract.ID, h.PublicKey, h.Settings.Version, h.Settings.Release) + }) } - cancel() - // log metrics - ap.logger.Info(metrics) + // log total pruned + ap.logger.Info(fmt.Sprintf("pruned %d (%s) from %v contracts", total, humanReadableSize(int(total)), len(prunable))) } -func (ap *Autopilot) pruneContract(w Worker, fcid types.FileContractID) pruneResult { - // create a sane timeout - ctx, cancel := context.WithTimeout(ap.shutdownCtx, 2*timeoutPruneContract) +func (ap *Autopilot) pruneContract(w Worker, fcid types.FileContractID, hk types.PublicKey, hostVersion, hostRelease string) uint64 { + // use a sane timeout + ctx, cancel := context.WithTimeout(ap.shutdownCtx, timeoutPruneContract+5*time.Minute) defer cancel() - // fetch the host - host, _, err := ap.hostForContract(ctx, fcid) - if err != nil { - return pruneResult{fcid: fcid, err: err} - } - // prune the contract start := time.Now() pruned, remaining, err := w.RHPPruneContract(ctx, fcid, timeoutPruneContract) - if err != nil && pruned == 0 { - return pruneResult{fcid: fcid, hk: host.PublicKey, version: host.Settings.Version, err: err} - } else if err != nil && utils.IsErr(err, context.DeadlineExceeded) { + duration := time.Since(start) + + // ignore slow pruning until host network is 1.6.0+ + if utils.IsErr(err, context.DeadlineExceeded) && pruned > 0 { err = nil } - return pruneResult{ - ts: start, - - fcid: fcid, - hk: host.PublicKey, - version: host.Settings.Version, + // handle metrics + if err == nil || pruned > 0 { + if err := ap.bus.RecordContractPruneMetric(ctx, api.ContractPruneMetric{ + Timestamp: api.TimeRFC3339(start), - pruned: pruned, - remaining: remaining, - duration: time.Since(start), + ContractID: fcid, + HostKey: hk, + HostVersion: hostVersion, - err: err, + Pruned: pruned, + Remaining: remaining, + Duration: duration, + }); err != nil { + ap.logger.Error(err) + } } -} -func humanReadableSize(b int) string { - const unit = 1024 - if b < unit { - return fmt.Sprintf("%d B", b) + // handle logs + log := ap.logger.With("contract", fcid, "host", hk, "version", hostVersion, "release", hostRelease, "pruned", pruned, "remaining", remaining, "elapsed", duration) + if err != nil && pruned > 0 { + log.With(zap.Error(err)).Error("unexpected error interrupted pruning") + } else if err != nil { + log.With(zap.Error(err)).Error("failed to prune contract") + } else { + log.Info("successfully pruned contract") } - div, exp := int64(unit), 0 - for n := b / unit; n >= unit; n /= unit { - div *= unit - exp++ + + // handle alerts + ap.mu.Lock() + defer ap.mu.Unlock() + alertID := alerts.IDForContract(alertPruningID, fcid) + if shouldSendPruneAlert(err, hostVersion, hostRelease) { + ap.RegisterAlert(ctx, newContractPruningFailedAlert(hk, hostVersion, hostRelease, fcid, err)) + ap.pruningAlertIDs[fcid] = alertID // store id to dismiss stale alerts + } else { + ap.DismissAlert(ctx, alertID) + delete(ap.pruningAlertIDs, fcid) } - return fmt.Sprintf("%.1f %ciB", - float64(b)/float64(div), "KMGTPE"[exp]) + + return pruned } func (ap *Autopilot) tryPerformPruning(wp *workerPool) { @@ -268,3 +222,31 @@ func (ap *Autopilot) tryPerformPruning(wp *workerPool) { ap.mu.Unlock() }() } + +func humanReadableSize(b int) string { + const unit = 1024 + if b < unit { + return fmt.Sprintf("%d B", b) + } + div, exp := int64(unit), 0 + for n := b / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + return fmt.Sprintf("%.1f %ciB", + float64(b)/float64(div), "KMGTPE"[exp]) +} + +func shouldSendPruneAlert(err error, version, release string) bool { + oldHost := (build.VersionCmp(version, "1.6.0") < 0 || version == "1.6.0" && release == "") + sectorRootsIssue := utils.IsErr(err, errInvalidSectorRootsRange) && oldHost + merkleRootIssue := utils.IsErr(err, errInvalidMerkleProof) && oldHost + return err != nil && !(sectorRootsIssue || merkleRootIssue || + utils.IsErr(err, utils.ErrConnectionRefused) || + utils.IsErr(err, utils.ErrConnectionTimedOut) || + utils.IsErr(err, utils.ErrConnectionResetByPeer) || + utils.IsErr(err, errInvalidHandshakeSignature) || + utils.IsErr(err, errInvalidHandshake) || + utils.IsErr(err, utils.ErrNoRouteToHost) || + utils.IsErr(err, utils.ErrNoSuchHost)) +} diff --git a/autopilot/contractor/alerts.go b/autopilot/contractor/alerts.go index 661d5393a..3505667ad 100644 --- a/autopilot/contractor/alerts.go +++ b/autopilot/contractor/alerts.go @@ -41,7 +41,7 @@ func newContractRenewalFailedAlert(contract api.ContractMetadata, interrupted bo } } -func newLostSectorsAlert(hk types.PublicKey, lostSectors uint64) alerts.Alert { +func newLostSectorsAlert(hk types.PublicKey, version, release string, lostSectors uint64) alerts.Alert { return alerts.Alert{ ID: alerts.IDForHost(alertLostSectorsID, hk), Severity: alerts.SeverityWarning, @@ -49,6 +49,8 @@ func newLostSectorsAlert(hk types.PublicKey, lostSectors uint64) alerts.Alert { Data: map[string]interface{}{ "lostSectors": lostSectors, "hostKey": hk.String(), + "hostVersion": version, + "hostRelease": release, "hint": "The host has reported that it can't serve at least one sector. Consider blocking this host through the blocklist feature. If you think this was a mistake and you want to ignore this warning for now you can reset the lost sector count", }, Timestamp: time.Now(), diff --git a/autopilot/contractor/contract_spending.go b/autopilot/contractor/contract_spending.go index f72a045d3..82e08831c 100644 --- a/autopilot/contractor/contract_spending.go +++ b/autopilot/contractor/contract_spending.go @@ -1,25 +1,10 @@ package contractor import ( - "context" - "go.sia.tech/core/types" "go.sia.tech/renterd/api" ) -func (c *Contractor) contractSpending(ctx context.Context, contract api.Contract, currentPeriod uint64) (api.ContractSpending, error) { - ancestors, err := c.bus.AncestorContracts(ctx, contract.ID, currentPeriod) - if err != nil { - return api.ContractSpending{}, err - } - // compute total spending - total := contract.Spending - for _, ancestor := range ancestors { - total = total.Add(ancestor.Spending) - } - return total, nil -} - func (c *Contractor) currentPeriodSpending(contracts []api.Contract, currentPeriod uint64) types.Currency { totalCosts := make(map[types.FileContractID]types.Currency) for _, c := range contracts { @@ -28,13 +13,11 @@ func (c *Contractor) currentPeriodSpending(contracts []api.Contract, currentPeri // filter contracts in the current period var filtered []api.ContractMetadata - c.mu.Lock() for _, contract := range contracts { if contract.WindowStart <= currentPeriod { filtered = append(filtered, contract.ContractMetadata) } } - c.mu.Unlock() // calculate the money allocated var totalAllocated types.Currency diff --git a/autopilot/contractor/contractor.go b/autopilot/contractor/contractor.go index 4e3a361d6..ca0c5c9df 100644 --- a/autopilot/contractor/contractor.go +++ b/autopilot/contractor/contractor.go @@ -7,7 +7,6 @@ import ( "math" "sort" "strings" - "sync" "time" "github.com/montanaflynn/stats" @@ -104,17 +103,16 @@ type Worker interface { RHPBroadcast(ctx context.Context, fcid types.FileContractID) (err error) RHPForm(ctx context.Context, endHeight uint64, hk types.PublicKey, hostIP string, renterAddress types.Address, renterFunds types.Currency, hostCollateral types.Currency) (rhpv2.ContractRevision, []types.Transaction, error) RHPPriceTable(ctx context.Context, hostKey types.PublicKey, siamuxAddr string, timeout time.Duration) (api.HostPriceTable, error) - RHPRenew(ctx context.Context, fcid types.FileContractID, endHeight uint64, hk types.PublicKey, hostIP string, hostAddress, renterAddress types.Address, renterFunds, minNewCollateral types.Currency, expectedStorage, windowSize uint64) (api.RHPRenewResponse, error) + RHPRenew(ctx context.Context, fcid types.FileContractID, endHeight uint64, hk types.PublicKey, hostIP string, hostAddress, renterAddress types.Address, renterFunds, minNewCollateral, maxFundAmount types.Currency, expectedNewStorage, windowSize uint64) (api.RHPRenewResponse, error) RHPScan(ctx context.Context, hostKey types.PublicKey, hostIP string, timeout time.Duration) (api.RHPScanResponse, error) } type ( Contractor struct { - alerter alerts.Alerter - bus Bus - churn *accumulatedChurn - resolver *ipResolver - logger *zap.SugaredLogger + alerter alerts.Alerter + bus Bus + churn *accumulatedChurn + logger *zap.SugaredLogger revisionBroadcastInterval time.Duration revisionLastBroadcast map[types.FileContractID]time.Time @@ -122,8 +120,6 @@ type ( firstRefreshFailure map[types.FileContractID]time.Time - mu sync.Mutex - shutdownCtx context.Context shutdownCtxCancel context.CancelFunc } @@ -134,9 +130,8 @@ type ( } contractInfo struct { + host api.Host contract api.Contract - settings rhpv2.HostSettings - priceTable rhpv3.HostPriceTable usable bool recoverable bool } @@ -184,8 +179,6 @@ func New(bus Bus, alerter alerts.Alerter, logger *zap.SugaredLogger, revisionSub firstRefreshFailure: make(map[types.FileContractID]time.Time), - resolver: newIPResolver(ctx, resolverLookupTimeout, logger.Named("resolver")), - shutdownCtx: ctx, shutdownCtxCancel: cancel, } @@ -246,13 +239,15 @@ func (c *Contractor) performContractMaintenance(ctx *mCtx, w Worker) (bool, erro if err != nil && !strings.Contains(err.Error(), api.ErrContractSetNotFound.Error()) { return false, err } + hasContractInSet := make(map[types.PublicKey]types.FileContractID) isInCurrentSet := make(map[types.FileContractID]struct{}) for _, c := range currentSet { + hasContractInSet[c.HostKey] = c.ID isInCurrentSet[c.ID] = struct{}{} } c.logger.Infof("contract set '%s' holds %d contracts", ctx.ContractSet(), len(currentSet)) - // fetch all contracts from the worker. + // fetch all contracts from the worker start := time.Now() resp, err := w.Contracts(ctx, timeoutHostRevision) if err != nil { @@ -295,11 +290,24 @@ func (c *Contractor) performContractMaintenance(ctx *mCtx, w Worker) (bool, erro return false, err } + // resolve host IPs on the fly for hosts that have a contract in the set but + // no subnet information, this was added to minimize churn immediately after + // adding 'subnets' to the host table + for _, h := range hosts { + if fcid, ok := hasContractInSet[h.PublicKey]; ok && len(h.Subnets) == 0 { + h.Subnets, _, err = utils.ResolveHostIP(ctx, h.NetAddress) + if err != nil { + c.logger.Warnw("failed to resolve host IP for a host with a contract in the set", "hk", h.PublicKey, "fcid", fcid, "err", err) + continue + } + } + } + // check if any used hosts have lost data to warn the user var toDismiss []types.Hash256 for _, h := range hosts { if registerLostSectorsAlert(h.Interactions.LostSectors*rhpv2.SectorSize, h.StoredData) { - c.alerter.RegisterAlert(ctx, newLostSectorsAlert(h.PublicKey, h.Interactions.LostSectors)) + c.alerter.RegisterAlert(ctx, newLostSectorsAlert(h.PublicKey, h.Settings.Version, h.Settings.Release, h.Interactions.LostSectors)) } else { toDismiss = append(toDismiss, alerts.IDForHost(alertLostSectorsID, h.PublicKey)) } @@ -416,6 +424,7 @@ func (c *Contractor) performContractMaintenance(ctx *mCtx, w Worker) (bool, erro // check if we need to form contracts and add them to the contract set var formed []api.ContractMetadata if uint64(len(updatedSet)) < threshold && !ctx.state.SkipContractFormations { + // form contracts formed, err = c.runContractFormations(ctx, w, candidates, usedHosts, unusableHosts, ctx.WantedContracts()-uint64(len(updatedSet)), &remaining) if err != nil { c.logger.Errorf("failed to form contracts, err: %v", err) // continue @@ -460,7 +469,7 @@ func (c *Contractor) performContractMaintenance(ctx *mCtx, w Worker) (bool, erro return c.computeContractSetChanged(mCtx, currentSet, updatedSet, formed, refreshed, renewed, toStopUsing, contractData), nil } -func (c *Contractor) computeContractSetChanged(ctx *mCtx, oldSet, newSet []api.ContractMetadata, formed []api.ContractMetadata, refreshed, renewed []renewal, toStopUsing map[types.FileContractID]string, contractData map[types.FileContractID]uint64) bool { +func (c *Contractor) computeContractSetChanged(ctx *mCtx, oldSet, newSet, formed []api.ContractMetadata, refreshed, renewed []renewal, toStopUsing map[types.FileContractID]string, contractData map[types.FileContractID]uint64) bool { name := ctx.ContractSet() // build set lookups @@ -696,10 +705,11 @@ LOOP: // if we were not able to the contract's revision, we can't properly // perform the checks that follow, however we do want to be lenient if // this contract is in the current set and we still have leeway left + _, inSet := inCurrentSet[fcid] if contract.Revision == nil { - if _, found := inCurrentSet[fcid]; !found || remainingKeepLeeway == 0 { + if !inSet || remainingKeepLeeway == 0 { toStopUsing[fcid] = errContractNoRevision.Error() - } else if !ctx.AllowRedundantIPs() && ipFilter.IsRedundantIP(contract.HostIP, contract.HostKey) { + } else if ctx.ShouldFilterRedundantIPs() && ipFilter.HasRedundantIP(host) { toStopUsing[fcid] = fmt.Sprintf("%v; %v", api.ErrUsabilityHostRedundantIP, errContractNoRevision) hostChecks[contract.HostKey].Usability.RedundantIP = true } else { @@ -710,8 +720,8 @@ LOOP: } // decide whether the contract is still good - ci := contractInfo{contract: contract, priceTable: host.PriceTable.HostPriceTable, settings: host.Settings} - usable, recoverable, refresh, renew, reasons := c.isUsableContract(ctx.AutopilotConfig(), ctx.state.RS, ci, bh, ipFilter) + ci := contractInfo{contract: contract, host: host} + usable, recoverable, refresh, renew, reasons := c.isUsableContract(ctx.AutopilotConfig(), ctx.state.RS, ci, inSet, bh, ipFilter) ci.usable = usable ci.recoverable = recoverable if !usable { @@ -767,9 +777,6 @@ func (c *Contractor) runContractFormations(ctx *mCtx, w Worker, candidates score default: } - // convenience variables - shouldFilter := !ctx.AllowRedundantIPs() - c.logger.Infow( "run contract formations", "usedHosts", len(usedHosts), @@ -785,6 +792,14 @@ func (c *Contractor) runContractFormations(ctx *mCtx, w Worker, candidates score ) }() + // build a new host filter + filter := c.newIPFilter() + for _, h := range candidates { + if _, used := usedHosts[h.host.PublicKey]; used { + _ = filter.HasRedundantIP(h.host) + } + } + // select candidates wanted := int(addLeeway(missing, leewayPctCandidateHosts)) selected := candidates.randSelectByScore(wanted) @@ -792,8 +807,10 @@ func (c *Contractor) runContractFormations(ctx *mCtx, w Worker, candidates score // print warning if we couldn't find enough hosts were found c.logger.Infof("looking for %d candidate hosts", wanted) if len(selected) < wanted { - msg := "no candidate hosts found" - if len(selected) > 0 { + var msg string + if len(selected) == 0 { + msg = "no candidate hosts found" + } else { msg = fmt.Sprintf("only found %d candidate host(s) out of the %d we wanted", len(selected), wanted) } if len(candidates) >= wanted { @@ -813,16 +830,6 @@ func (c *Contractor) runContractFormations(ctx *mCtx, w Worker, candidates score // prepare a gouging checker gc := ctx.GougingChecker(cs) - // prepare an IP filter that contains all used hosts - ipFilter := c.newIPFilter() - if shouldFilter { - for _, h := range candidates { - if _, used := usedHosts[h.host.PublicKey]; used { - _ = ipFilter.IsRedundantIP(h.host.NetAddress, h.host.PublicKey) - } - } - } - // calculate min/max contract funds minInitialContractFunds, maxInitialContractFunds := initialContractFundingMinMax(ctx.AutopilotConfig()) @@ -862,12 +869,16 @@ LOOP: } // check if we already have a contract with a host on that subnet - if shouldFilter && ipFilter.IsRedundantIP(host.NetAddress, host.PublicKey) { + if ctx.ShouldFilterRedundantIPs() && filter.HasRedundantIP(host) { continue } + // form the contract formedContract, proceed, err := c.formContract(ctx, w, host, minInitialContractFunds, maxInitialContractFunds, budget) - if err == nil { + if err != nil { + // remove the host from the filter + filter.Remove(host) + } else { // add contract to contract set formed = append(formed, formedContract) missing-- @@ -1080,7 +1091,7 @@ func (c *Contractor) refreshFundingEstimate(cfg api.AutopilotConfig, ci contract // check for a sane minimum that is equal to the initial contract funding // but without an upper cap. minInitialContractFunds, _ := initialContractFundingMinMax(cfg) - minimum := c.initialContractFunding(ci.settings, txnFeeEstimate, minInitialContractFunds, types.ZeroCurrency) + minimum := c.initialContractFunding(ci.host.Settings, txnFeeEstimate, minInitialContractFunds, types.ZeroCurrency) refreshAmountCapped := refreshAmount if refreshAmountCapped.Cmp(minimum) < 0 { refreshAmountCapped = minimum @@ -1092,91 +1103,6 @@ func (c *Contractor) refreshFundingEstimate(cfg api.AutopilotConfig, ci contract return refreshAmountCapped } -func (c *Contractor) renewFundingEstimate(ctx *mCtx, ci contractInfo, fee types.Currency, renewing bool) (types.Currency, error) { - // estimate the cost of the current data stored - dataStored := ci.contract.FileSize() - storageCost := sectorStorageCost(ci.priceTable, ctx.state.Period()).Mul64(bytesToSectors(dataStored)) - - // fetch the spending of the contract we want to renew. - prevSpending, err := c.contractSpending(ctx, ci.contract, ctx.state.Period()) - if err != nil { - c.logger.Errorw( - fmt.Sprintf("could not retrieve contract spending, err: %v", err), - "hk", ci.contract.HostKey, - "fcid", ci.contract.ID, - ) - return types.ZeroCurrency, err - } - - // estimate the amount of data uploaded, sanity check with data stored - // - // TODO: estimate is not ideal because price can change, better would be to - // look at the amount of data stored in the contract from the previous cycle - prevUploadDataEstimate := types.NewCurrency64(dataStored) // default to assuming all data was uploaded - sectorUploadCost := sectorUploadCost(ci.priceTable, ctx.Period()) - if !sectorUploadCost.IsZero() { - prevUploadDataEstimate = prevSpending.Uploads.Div(sectorUploadCost).Mul64(rhpv2.SectorSize) - } - if prevUploadDataEstimate.Cmp(types.NewCurrency64(dataStored)) > 0 { - prevUploadDataEstimate = types.NewCurrency64(dataStored) - } - - // estimate the - // - upload cost: previous uploads + prev storage - // - download cost: assumed to be the same - // - fund acount cost: assumed to be the same - newUploadsCost := prevSpending.Uploads.Add(sectorUploadCost.Mul(prevUploadDataEstimate.Div64(rhpv2.SectorSize))) - newDownloadsCost := prevSpending.Downloads - newFundAccountCost := prevSpending.FundAccount - - // estimate the siafund fees - // - // NOTE: the transaction fees are not included in the siafunds estimate - // because users are not charged siafund fees on money that doesn't go into - // the file contract (and the transaction fee goes to the miners, not the - // file contract). - subTotal := storageCost.Add(newUploadsCost).Add(newDownloadsCost).Add(newFundAccountCost).Add(ci.settings.ContractPrice) - siaFundFeeEstimate, err := c.bus.FileContractTax(ctx, subTotal) - if err != nil { - return types.ZeroCurrency, err - } - - // estimate the txn fee - txnFeeEstimate := fee.Mul64(estimatedFileContractTransactionSetSize) - - // add them all up and then return the estimate plus 33% for error margin - // and just general volatility of usage pattern. - estimatedCost := subTotal.Add(siaFundFeeEstimate).Add(txnFeeEstimate) - estimatedCost = estimatedCost.Add(estimatedCost.Div64(3)) // TODO: arbitrary divisor - - // check for a sane minimum that is equal to the initial contract funding - // but without an upper cap. - minInitialContractFunds, _ := initialContractFundingMinMax(ctx.AutopilotConfig()) - minimum := c.initialContractFunding(ci.settings, txnFeeEstimate, minInitialContractFunds, types.ZeroCurrency) - cappedEstimatedCost := estimatedCost - if cappedEstimatedCost.Cmp(minimum) < 0 { - cappedEstimatedCost = minimum - } - - if renewing { - c.logger.Infow("renew estimate", - "fcid", ci.contract.ID, - "dataStored", dataStored, - "storageCost", storageCost.String(), - "newUploadsCost", newUploadsCost.String(), - "newDownloadsCost", newDownloadsCost.String(), - "newFundAccountCost", newFundAccountCost.String(), - "contractPrice", ci.settings.ContractPrice.String(), - "prevUploadDataEstimate", prevUploadDataEstimate.String(), - "estimatedCost", estimatedCost.String(), - "minInitialContractFunds", minInitialContractFunds.String(), - "minimum", minimum.String(), - "cappedEstimatedCost", cappedEstimatedCost.String(), - ) - } - return cappedEstimatedCost, nil -} - func (c *Contractor) calculateMinScore(candidates []scoredHost, numContracts uint64) float64 { // return early if there's no hosts if len(candidates) == 0 { @@ -1307,10 +1233,12 @@ func (c *Contractor) renewContract(ctx *mCtx, w Worker, ci contractInfo, budget if ci.contract.Revision == nil { return api.ContractMetadata{}, true, errors.New("can't renew contract without a revision") } + log := c.logger.With("to_renew", ci.contract.ID, "hk", ci.contract.HostKey, "hostVersion", ci.host.Settings.Version, "hostRelease", ci.host.Settings.Release) // convenience variables contract := ci.contract - settings := ci.settings + settings := ci.host.Settings + pt := ci.host.PriceTable.HostPriceTable fcid := contract.ID rev := contract.Revision hk := contract.HostKey @@ -1321,37 +1249,33 @@ func (c *Contractor) renewContract(ctx *mCtx, w Worker, ci contractInfo, budget return api.ContractMetadata{}, false, err } - // calculate the renter funds - renterFunds, err := c.renewFundingEstimate(ctx, ci, ctx.state.Fee, true) - if err != nil { - c.logger.Errorw(fmt.Sprintf("could not get renew funding estimate, err: %v", err), "hk", hk, "fcid", fcid) - return api.ContractMetadata{}, true, err - } + // calculate the renter funds for the renewal a.k.a. the funds the renter will + // be able to spend + minRenterFunds, _ := initialContractFundingMinMax(ctx.AutopilotConfig()) + renterFunds := renewFundingEstimate(minRenterFunds, contract.TotalCost, contract.RenterFunds(), log) // check our budget if budget.Cmp(renterFunds) < 0 { - c.logger.Infow("insufficient budget", "budget", budget, "needed", renterFunds) + log.Infow("insufficient budget", "budget", budget, "needed", renterFunds) return api.ContractMetadata{}, false, errors.New("insufficient budget") } // sanity check the endheight is not the same on renewals endHeight := ctx.EndHeight() if endHeight <= rev.EndHeight() { - c.logger.Infow("invalid renewal endheight", "oldEndheight", rev.EndHeight(), "newEndHeight", endHeight, "period", ctx.state.Period, "bh", cs.BlockHeight) + log.Infow("invalid renewal endheight", "oldEndheight", rev.EndHeight(), "newEndHeight", endHeight, "period", ctx.state.Period, "bh", cs.BlockHeight) return api.ContractMetadata{}, false, fmt.Errorf("renewal endheight should surpass the current contract endheight, %v <= %v", endHeight, rev.EndHeight()) } - // calculate the host collateral - expectedNewStorage := renterFundsToExpectedStorage(renterFunds, endHeight-cs.BlockHeight, ci.priceTable) + // calculate the expected new storage + expectedNewStorage := renterFundsToExpectedStorage(renterFunds, endHeight-cs.BlockHeight, pt) // renew the contract - resp, err := w.RHPRenew(ctx, fcid, endHeight, hk, contract.SiamuxAddr, settings.Address, ctx.state.Address, renterFunds, types.ZeroCurrency, expectedNewStorage, settings.WindowSize) + resp, err := w.RHPRenew(ctx, fcid, endHeight, hk, contract.SiamuxAddr, settings.Address, ctx.state.Address, renterFunds, types.ZeroCurrency, *budget, expectedNewStorage, settings.WindowSize) if err != nil { - c.logger.Errorw( + log.Errorw( "renewal failed", zap.Error(err), - "hk", hk, - "fcid", fcid, "endHeight", endHeight, "renterFunds", renterFunds, "expectedNewStorage", expectedNewStorage, @@ -1363,17 +1287,17 @@ func (c *Contractor) renewContract(ctx *mCtx, w Worker, ci contractInfo, budget } // update the budget - *budget = budget.Sub(renterFunds) + *budget = budget.Sub(resp.FundAmount) // persist the contract renewedContract, err := c.bus.AddRenewedContract(ctx, resp.Contract, resp.ContractPrice, renterFunds, cs.BlockHeight, fcid, api.ContractStatePending) if err != nil { - c.logger.Errorw(fmt.Sprintf("renewal failed to persist, err: %v", err), "hk", hk, "fcid", fcid) + log.Errorw(fmt.Sprintf("renewal failed to persist, err: %v", err)) return api.ContractMetadata{}, false, err } newCollateral := resp.Contract.Revision.MissedHostPayout().Sub(resp.ContractPrice) - c.logger.Infow( + log.Infow( "renewal succeeded", "fcid", renewedContract.ID, "renewedFrom", fcid, @@ -1387,10 +1311,12 @@ func (c *Contractor) refreshContract(ctx *mCtx, w Worker, ci contractInfo, budge if ci.contract.Revision == nil { return api.ContractMetadata{}, true, errors.New("can't refresh contract without a revision") } + log := c.logger.With("to_renew", ci.contract.ID, "hk", ci.contract.HostKey, "hostVersion", ci.host.Settings.Version, "hostRelease", ci.host.Settings.Release) // convenience variables contract := ci.contract - settings := ci.settings + settings := ci.host.Settings + pt := ci.host.PriceTable.HostPriceTable fcid := contract.ID rev := contract.Revision hk := contract.HostKey @@ -1403,7 +1329,7 @@ func (c *Contractor) refreshContract(ctx *mCtx, w Worker, ci contractInfo, budge // calculate the renter funds var renterFunds types.Currency - if isOutOfFunds(ctx.AutopilotConfig(), ci.priceTable, ci.contract) { + if isOutOfFunds(ctx.AutopilotConfig(), pt, ci.contract) { renterFunds = c.refreshFundingEstimate(ctx.AutopilotConfig(), ci, ctx.state.Fee) } else { renterFunds = rev.ValidRenterPayout() // don't increase funds @@ -1411,21 +1337,25 @@ func (c *Contractor) refreshContract(ctx *mCtx, w Worker, ci contractInfo, budge // check our budget if budget.Cmp(renterFunds) < 0 { - c.logger.Warnw("insufficient budget for refresh", "hk", hk, "fcid", fcid, "budget", budget, "needed", renterFunds) + log.Warnw("insufficient budget for refresh", "hk", hk, "fcid", fcid, "budget", budget, "needed", renterFunds) return api.ContractMetadata{}, false, fmt.Errorf("insufficient budget: %s < %s", budget.String(), renterFunds.String()) } - expectedStorage := renterFundsToExpectedStorage(renterFunds, contract.EndHeight()-cs.BlockHeight, ci.priceTable) + expectedNewStorage := renterFundsToExpectedStorage(renterFunds, contract.EndHeight()-cs.BlockHeight, pt) unallocatedCollateral := contract.RemainingCollateral() // a refresh should always result in a contract that has enough collateral - minNewCollateral := minRemainingCollateral(ctx.AutopilotConfig(), ctx.state.RS, renterFunds, settings, ci.priceTable).Mul64(2) + minNewCollateral := minRemainingCollateral(ctx.AutopilotConfig(), ctx.state.RS, renterFunds, settings, pt).Mul64(2) + + // maxFundAmount is the remaining funds of the contract to refresh plus the + // budget since the previous contract was in the same period + maxFundAmount := budget.Add(rev.ValidRenterPayout()) // renew the contract - resp, err := w.RHPRenew(ctx, contract.ID, contract.EndHeight(), hk, contract.SiamuxAddr, settings.Address, ctx.state.Address, renterFunds, minNewCollateral, expectedStorage, settings.WindowSize) + resp, err := w.RHPRenew(ctx, contract.ID, contract.EndHeight(), hk, contract.SiamuxAddr, settings.Address, ctx.state.Address, renterFunds, minNewCollateral, maxFundAmount, expectedNewStorage, settings.WindowSize) if err != nil { if strings.Contains(err.Error(), "new collateral is too low") { - c.logger.Infow("refresh failed: contract wouldn't have enough collateral after refresh", + log.Infow("refresh failed: contract wouldn't have enough collateral after refresh", "hk", hk, "fcid", fcid, "unallocatedCollateral", unallocatedCollateral.String(), @@ -1433,7 +1363,7 @@ func (c *Contractor) refreshContract(ctx *mCtx, w Worker, ci contractInfo, budge ) return api.ContractMetadata{}, true, err } - c.logger.Errorw("refresh failed", zap.Error(err), "hk", hk, "fcid", fcid) + log.Errorw("refresh failed", zap.Error(err), "hk", hk, "fcid", fcid) if utils.IsErr(err, wallet.ErrInsufficientBalance) && !worker.IsErrHost(err) { return api.ContractMetadata{}, false, err } @@ -1441,18 +1371,18 @@ func (c *Contractor) refreshContract(ctx *mCtx, w Worker, ci contractInfo, budge } // update the budget - *budget = budget.Sub(renterFunds) + *budget = budget.Sub(resp.FundAmount) // persist the contract refreshedContract, err := c.bus.AddRenewedContract(ctx, resp.Contract, resp.ContractPrice, renterFunds, cs.BlockHeight, contract.ID, api.ContractStatePending) if err != nil { - c.logger.Errorw("adding refreshed contract failed", zap.Error(err), "hk", hk, "fcid", fcid) + log.Errorw("adding refreshed contract failed", zap.Error(err), "hk", hk, "fcid", fcid) return api.ContractMetadata{}, false, err } // add to renewed set newCollateral := resp.Contract.Revision.MissedHostPayout().Sub(resp.ContractPrice) - c.logger.Infow("refresh succeeded", + log.Infow("refresh succeeded", "fcid", refreshedContract.ID, "renewedFrom", contract.ID, "renterFunds", renterFunds.String(), @@ -1463,13 +1393,15 @@ func (c *Contractor) refreshContract(ctx *mCtx, w Worker, ci contractInfo, budge } func (c *Contractor) formContract(ctx *mCtx, w Worker, host api.Host, minInitialContractFunds, maxInitialContractFunds types.Currency, budget *types.Currency) (cm api.ContractMetadata, proceed bool, err error) { + log := c.logger.With("hk", host.PublicKey, "hostVersion", host.Settings.Version, "hostRelease", host.Settings.Release) + // convenience variables hk := host.PublicKey // fetch host settings scan, err := w.RHPScan(ctx, hk, host.NetAddress, 0) if err != nil { - c.logger.Infow(err.Error(), "hk", hk) + log.Infow(err.Error(), "hk", hk) return api.ContractMetadata{}, true, err } @@ -1483,7 +1415,7 @@ func (c *Contractor) formContract(ctx *mCtx, w Worker, host api.Host, minInitial txnFee := ctx.state.Fee.Mul64(estimatedFileContractTransactionSetSize) renterFunds := initialContractFunding(scan.Settings, txnFee, minInitialContractFunds, maxInitialContractFunds) if budget.Cmp(renterFunds) < 0 { - c.logger.Infow("insufficient budget", "budget", budget, "needed", renterFunds) + log.Infow("insufficient budget", "budget", budget, "needed", renterFunds) return api.ContractMetadata{}, false, errors.New("insufficient budget") } @@ -1496,7 +1428,7 @@ func (c *Contractor) formContract(ctx *mCtx, w Worker, host api.Host, minInitial contract, _, err := w.RHPForm(ctx, endHeight, hk, host.NetAddress, ctx.state.Address, renterFunds, hostCollateral) if err != nil { // TODO: keep track of consecutive failures and break at some point - c.logger.Errorw(fmt.Sprintf("contract formation failed, err: %v", err), "hk", hk) + log.Errorw(fmt.Sprintf("contract formation failed, err: %v", err), "hk", hk) if strings.Contains(err.Error(), wallet.ErrInsufficientBalance.Error()) { return api.ContractMetadata{}, false, err } @@ -1510,12 +1442,11 @@ func (c *Contractor) formContract(ctx *mCtx, w Worker, host api.Host, minInitial contractPrice := contract.Revision.MissedHostPayout().Sub(hostCollateral) formedContract, err := c.bus.AddContract(ctx, contract, contractPrice, renterFunds, cs.BlockHeight, api.ContractStatePending) if err != nil { - c.logger.Errorw(fmt.Sprintf("contract formation failed, err: %v", err), "hk", hk) + log.Errorw(fmt.Sprintf("contract formation failed, err: %v", err), "hk", hk) return api.ContractMetadata{}, true, err } - c.logger.Infow("formation succeeded", - "hk", hk, + log.Infow("formation succeeded", "fcid", formedContract.ID, "renterFunds", renterFunds.String(), "collateral", hostCollateral.String(), @@ -1578,6 +1509,40 @@ func refreshPriceTable(ctx context.Context, w Worker, host *api.Host) error { return nil } +// renewFundingEstimate computes the funds the renter should use to renew a +// contract. 'minRenterFunds' is the minimum amount the renter should use to +// renew a contract, 'initRenterFunds' is the amount the renter used to form the +// contract we are about to renew, and 'remainingRenterFunds' is the amount the +// contract currently has left. +func renewFundingEstimate(minRenterFunds, initRenterFunds, remainingRenterFunds types.Currency, log *zap.SugaredLogger) types.Currency { + log = log.With("minRenterFunds", minRenterFunds, "initRenterFunds", initRenterFunds, "remainingRenterFunds", remainingRenterFunds) + + // compute the funds used + usedFunds := types.ZeroCurrency + if initRenterFunds.Cmp(remainingRenterFunds) >= 0 { + usedFunds = initRenterFunds.Sub(remainingRenterFunds) + } + log = log.With("usedFunds", usedFunds) + + var renterFunds types.Currency + if usedFunds.IsZero() { + // if no funds were used, we use a fraction of the previous funding + log.Info("no funds were used, using half the funding from before") + renterFunds = initRenterFunds.Div64(2) // half the funds from before + } else { + // otherwise we use the remaining funds from before because a renewal + // shouldn't add more funds, that's what a refresh is for + renterFunds = remainingRenterFunds + } + + // but the funds should not drop below the amount we'd fund a new contract with + if renterFunds.Cmp(minRenterFunds) < 0 { + log.Info("funds would drop below the minimum, using the minimum") + renterFunds = minRenterFunds + } + return renterFunds +} + // renterFundsToExpectedStorage returns how much storage a renter is expected to // be able to afford given the provided 'renterFunds'. func renterFundsToExpectedStorage(renterFunds types.Currency, duration uint64, pt rhpv3.HostPriceTable) uint64 { diff --git a/autopilot/contractor/contractor_test.go b/autopilot/contractor/contractor_test.go index 7b7f855e3..245e19f7b 100644 --- a/autopilot/contractor/contractor_test.go +++ b/autopilot/contractor/contractor_test.go @@ -40,6 +40,60 @@ func TestCalculateMinScore(t *testing.T) { } } +func TestRenewFundingEstimate(t *testing.T) { + tests := []struct { + name string + minRenterFunds uint64 + initRenterFunds uint64 + remainingRenterFunds uint64 + expected uint64 + }{ + { + name: "UnusedAboveMinAboveInit", + minRenterFunds: 40, + initRenterFunds: 100, + remainingRenterFunds: 100, + expected: 50, + }, + { + name: "UnusedAboveMinBelowInit", + minRenterFunds: 80, + initRenterFunds: 100, + remainingRenterFunds: 100, + expected: 80, + }, + { + name: "UnusedBelowMin", + minRenterFunds: 0, + initRenterFunds: 100, + remainingRenterFunds: 100, + expected: 50, + }, + { + name: "UsedUnderMin", + minRenterFunds: 50, + initRenterFunds: 10, + remainingRenterFunds: 0, + expected: 50, + }, + { + name: "UsedAboveMin", + minRenterFunds: 50, + initRenterFunds: 100, + remainingRenterFunds: 90, + expected: 90, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + renterFunds := renewFundingEstimate(types.NewCurrency64(test.minRenterFunds), types.NewCurrency64(test.initRenterFunds), types.NewCurrency64(test.remainingRenterFunds), zap.NewNop().Sugar()) + if !renterFunds.Equals(types.NewCurrency64(test.expected)) { + t.Errorf("expected %v but got %v", test.expected, renterFunds) + } + }) + } +} + func TestShouldForgiveFailedRenewal(t *testing.T) { var fcid types.FileContractID frand.Read(fcid[:]) diff --git a/autopilot/contractor/evaluate.go b/autopilot/contractor/evaluate.go index b3a5cc0a5..b40cc3be6 100644 --- a/autopilot/contractor/evaluate.go +++ b/autopilot/contractor/evaluate.go @@ -1,11 +1,15 @@ package contractor import ( + "errors" + "go.sia.tech/core/types" "go.sia.tech/renterd/api" "go.sia.tech/renterd/worker" ) +var ErrMissingRequiredFields = errors.New("missing required fields in configuration, both allowance and amount must be set") + func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, period uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []api.Host) (usables uint64) { gc := worker.NewGougingChecker(gs, cs, fee, period, cfg.Contracts.RenewWindow) for _, host := range hosts { @@ -20,7 +24,12 @@ func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types. // EvaluateConfig evaluates the given configuration and if the gouging settings // are too strict for the number of contracts required by 'cfg', it will provide // a recommendation on how to loosen it. -func EvaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, rs api.RedundancySettings, gs api.GougingSettings, hosts []api.Host) (resp api.ConfigEvaluationResponse) { +func EvaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, rs api.RedundancySettings, gs api.GougingSettings, hosts []api.Host) (resp api.ConfigEvaluationResponse, _ error) { + // we need an allowance and a target amount of contracts to evaluate + if cfg.Contracts.Allowance.IsZero() || cfg.Contracts.Amount == 0 { + return api.ConfigEvaluationResponse{}, ErrMissingRequiredFields + } + period := cfg.Contracts.Period gc := worker.NewGougingChecker(gs, cs, fee, period, cfg.Contracts.RenewWindow) diff --git a/autopilot/contractor/hostfilter.go b/autopilot/contractor/hostfilter.go index eb1a559a4..9298ab009 100644 --- a/autopilot/contractor/hostfilter.go +++ b/autopilot/contractor/hostfilter.go @@ -102,8 +102,8 @@ func (u *unusableHostsBreakdown) keysAndValues() []interface{} { // - recoverable -> can be usable in the contract set if it is refreshed/renewed // - refresh -> should be refreshed // - renew -> should be renewed -func (c *Contractor) isUsableContract(cfg api.AutopilotConfig, rs api.RedundancySettings, ci contractInfo, bh uint64, f *ipFilter) (usable, recoverable, refresh, renew bool, reasons []string) { - contract, s, pt := ci.contract, ci.settings, ci.priceTable +func (c *Contractor) isUsableContract(cfg api.AutopilotConfig, rs api.RedundancySettings, ci contractInfo, inSet bool, bh uint64, f *ipFilter) (usable, recoverable, refresh, renew bool, reasons []string) { + contract, s, pt := ci.contract, ci.host.Settings, ci.host.PriceTable.HostPriceTable usable = true if bh > contract.EndHeight() { @@ -121,14 +121,14 @@ func (c *Contractor) isUsableContract(cfg api.AutopilotConfig, rs api.Redundancy } else { if isOutOfCollateral(cfg, rs, contract, s, pt) { reasons = append(reasons, errContractOutOfCollateral.Error()) - usable = false - recoverable = true + usable = usable && inSet && c.shouldForgiveFailedRefresh(contract.ID) + recoverable = !usable // only needs to be recoverable if !usable refresh = true renew = false } if isOutOfFunds(cfg, pt, contract) { reasons = append(reasons, errContractOutOfFunds.Error()) - usable = usable && c.shouldForgiveFailedRefresh(contract.ID) + usable = usable && inSet && c.shouldForgiveFailedRefresh(contract.ID) recoverable = !usable // only needs to be recoverable if !usable refresh = true renew = false @@ -144,7 +144,7 @@ func (c *Contractor) isUsableContract(cfg api.AutopilotConfig, rs api.Redundancy // IP check should be last since it modifies the filter shouldFilter := !cfg.Hosts.AllowRedundantIPs && (usable || recoverable) - if shouldFilter && f.IsRedundantIP(contract.HostIP, contract.HostKey) { + if shouldFilter && f.HasRedundantIP(ci.host) { reasons = append(reasons, api.ErrUsabilityHostRedundantIP.Error()) usable = false recoverable = false // do not use in the contract set, but keep it around for downloads diff --git a/autopilot/contractor/hosts_test.go b/autopilot/contractor/hosts_test.go index 028da682d..9ca1fa6bd 100644 --- a/autopilot/contractor/hosts_test.go +++ b/autopilot/contractor/hosts_test.go @@ -52,20 +52,27 @@ func TestScoredHostsRandSelectByScore(t *testing.T) { t.Fatal("unexpected") } - // assert select is random on equal inputs - counts := make([]int, 2) + // assert select is random on equal inputs, we calculate the chi-square + // statistic and assert it's less than critical value of 10.828 (1 degree of + // freedom, using alpha of 0.001) + var counts [2]int hosts = scoredHosts{ {score: .1, host: api.Host{PublicKey: types.PublicKey{1}}}, {score: .1, host: api.Host{PublicKey: types.PublicKey{2}}}, } - for i := 0; i < 100; i++ { + nRuns := 1e5 + for i := 0; i < int(nRuns); i++ { if hosts.randSelectByScore(1)[0].host.PublicKey == (types.PublicKey{1}) { counts[0]++ } else { counts[1]++ } } - if diff := absDiffInt(counts[0], counts[1]); diff > 40 { - t.Fatal("unexpected", counts[0], counts[1], diff) + var chi2 float64 + for i := 0; i < 2; i++ { + chi2 += math.Pow(float64(counts[i])-nRuns/2, 2) / (nRuns / 2) + } + if chi2 > 10.828 { + t.Fatal("unexpected", counts[0], counts[1], chi2) } } diff --git a/autopilot/contractor/hostscore.go b/autopilot/contractor/hostscore.go index 51d8275fc..68abf1b21 100644 --- a/autopilot/contractor/hostscore.go +++ b/autopilot/contractor/hostscore.go @@ -52,6 +52,8 @@ func hostScore(cfg api.AutopilotConfig, h api.Host, expectedRedundancy float64) // priceAdjustmentScore computes a score between 0 and 1 for a host giving its // price settings and the autopilot's configuration. +// - If the given config is missing required fields (e.g. allowance or amount), +// math.SmallestNonzeroFloat64 is returned. // - 0.5 is returned if the host's costs exactly match the settings. // - If the host is cheaper than expected, a linear bonus is applied. The best // score of 1 is reached when the ratio between host cost and expectations is @@ -60,6 +62,12 @@ func hostScore(cfg api.AutopilotConfig, h api.Host, expectedRedundancy float64) // A 2x ratio will already cause the score to drop to 0.16 and a 3x ratio causes // it to drop to 0.05. func priceAdjustmentScore(hostCostPerPeriod types.Currency, cfg api.ContractsConfig) float64 { + // return early if the allowance or amount of hosts is zero, avoiding a + // division by zero panic below. + if cfg.Allowance.IsZero() || cfg.Amount == 0 { + return math.SmallestNonzeroFloat64 + } + hostPeriodBudget := cfg.Allowance.Div64(cfg.Amount) ratio := new(big.Rat).SetFrac(hostCostPerPeriod.Big(), hostPeriodBudget.Big()) diff --git a/autopilot/contractor/hostscore_test.go b/autopilot/contractor/hostscore_test.go index ae1b7668e..41347ec72 100644 --- a/autopilot/contractor/hostscore_test.go +++ b/autopilot/contractor/hostscore_test.go @@ -103,6 +103,15 @@ func TestHostScore(t *testing.T) { if hostScore(cfg, h1, redundancy).Score() <= hostScore(cfg, h2, redundancy).Score() { t.Fatal("unexpected") } + + // assert zero allowance does not panic + cfg.Contracts.Allowance = types.ZeroCurrency + _ = hostScore(cfg, h1, redundancy) + + // assert missing amount does not panic + cfg.Contracts.Allowance = types.Siacoins(1000) // reset + cfg.Contracts.Amount = 0 + _ = hostScore(cfg, h1, redundancy) } func TestPriceAdjustmentScore(t *testing.T) { @@ -231,10 +240,3 @@ func TestCollateralScore(t *testing.T) { t.Errorf("expected %v but got %v", 0, s) } } - -func absDiffInt(x, y int) int { - if x < y { - return y - x - } - return x - y -} diff --git a/autopilot/contractor/ipfilter.go b/autopilot/contractor/ipfilter.go index 3b754fa0a..b29668372 100644 --- a/autopilot/contractor/ipfilter.go +++ b/autopilot/contractor/ipfilter.go @@ -1,190 +1,66 @@ package contractor import ( - "context" "errors" - "fmt" - "net" - "time" - "go.sia.tech/core/types" - "go.sia.tech/renterd/internal/utils" + "go.sia.tech/renterd/api" "go.uber.org/zap" ) -const ( - // number of unique bits the host IP must have to prevent it from being filtered - ipv4FilterRange = 24 - ipv6FilterRange = 32 - - // ipCacheEntryValidity defines the amount of time the IP filter uses a - // cached entry when it encounters an error while trying to resolve a host's - // IP address - ipCacheEntryValidity = 24 * time.Hour - - // resolverLookupTimeout is the timeout we apply when resolving a host's IP address - resolverLookupTimeout = 10 * time.Second -) - var ( - ErrIOTimeout = errors.New("i/o timeout") - errServerMisbehaving = errors.New("server misbehaving") - errTooManyAddresses = errors.New("host has more than two addresses, or two of the same type") - errUnparsableAddress = errors.New("host address could not be parsed to a subnet") + errHostTooManySubnets = errors.New("host has more than two subnets") ) type ( ipFilter struct { subnetToHostKey map[string]string - resolver *ipResolver - logger *zap.SugaredLogger + logger *zap.SugaredLogger } ) func (c *Contractor) newIPFilter() *ipFilter { - c.resolver.pruneCache() return &ipFilter{ + logger: c.logger, subnetToHostKey: make(map[string]string), - - resolver: c.resolver, - logger: c.logger, } } -func (f *ipFilter) IsRedundantIP(hostIP string, hostKey types.PublicKey) bool { - // perform lookup - subnets, err := f.resolver.lookup(hostIP) - if err != nil { - if !utils.IsErr(err, utils.ErrNoSuchHost) { - f.logger.Errorf("failed to check for redundant IP, treating host %v with IP %v as redundant, err: %v", hostKey, hostIP, err) - } +func (f *ipFilter) HasRedundantIP(host api.Host) bool { + // validate host subnets + if len(host.Subnets) == 0 { + f.logger.Errorf("host %v has no subnet, treating its IP %v as redundant", host.PublicKey, host.NetAddress) return true - } - - // return early if we couldn't resolve to a subnet - if len(subnets) == 0 { - f.logger.Errorf("failed to resolve IP to a subnet, treating host %v with IP %v as redundant, err: %v", hostKey, hostIP, errUnparsableAddress) + } else if len(host.Subnets) > 2 { + f.logger.Errorf("host %v has more than 2 subnets, treating its IP %v as redundant", host.PublicKey, errHostTooManySubnets) return true } - // check if we know about this subnet, if not register all the subnets - host, found := f.subnetToHostKey[subnets[0]] - if !found { - for _, subnet := range subnets { - f.subnetToHostKey[subnet] = hostKey.String() + // check if we know about this subnet + var knownHost string + for _, subnet := range host.Subnets { + if knownHost = f.subnetToHostKey[subnet]; knownHost != "" { + break } - return false - } - - // otherwise compare host keys - sameHost := host == hostKey.String() - return !sameHost -} - -type ( - resolver interface { - LookupIPAddr(ctx context.Context, host string) ([]net.IPAddr, error) } - ipResolver struct { - resolver resolver - cache map[string]ipCacheEntry - timeout time.Duration - shutdownCtx context.Context - logger *zap.SugaredLogger + // if we know about the subnet, the host is redundant if it's not the same + if knownHost != "" { + return host.PublicKey.String() != knownHost } - ipCacheEntry struct { - created time.Time - subnets []string + // otherwise register all the host'ssubnets + for _, subnet := range host.Subnets { + f.subnetToHostKey[subnet] = host.PublicKey.String() } -) -func newIPResolver(ctx context.Context, timeout time.Duration, logger *zap.SugaredLogger) *ipResolver { - if timeout == 0 { - panic("timeout must be greater than zero") // developer error - } - return &ipResolver{ - resolver: &net.Resolver{}, - cache: make(map[string]ipCacheEntry), - timeout: resolverLookupTimeout, - shutdownCtx: ctx, - logger: logger, - } + return false } -func (r *ipResolver) pruneCache() { - for hostIP, entry := range r.cache { - if time.Since(entry.created) > ipCacheEntryValidity { - delete(r.cache, hostIP) +func (f *ipFilter) Remove(h api.Host) { + for k, v := range f.subnetToHostKey { + if v == h.PublicKey.String() { + delete(f.subnetToHostKey, k) } } } - -func (r *ipResolver) lookup(hostIP string) ([]string, error) { - // split off host - host, _, err := net.SplitHostPort(hostIP) - if err != nil { - return nil, err - } - - // make sure we don't hang - ctx, cancel := context.WithTimeout(r.shutdownCtx, r.timeout) - defer cancel() - - // lookup IP addresses - addrs, err := r.resolver.LookupIPAddr(ctx, host) - if err != nil { - // check the cache if it's an i/o timeout or server misbehaving error - if utils.IsErr(err, ErrIOTimeout) || utils.IsErr(err, errServerMisbehaving) { - if entry, found := r.cache[hostIP]; found && time.Since(entry.created) < ipCacheEntryValidity { - r.logger.Infof("using cached IP addresses for %v, err: %v", hostIP, err) - return entry.subnets, nil - } - } - return nil, err - } - - // filter out hosts associated with more than two addresses or two of the same type - if len(addrs) > 2 || (len(addrs) == 2) && (len(addrs[0].IP) == len(addrs[1].IP)) { - return nil, errTooManyAddresses - } - - // parse out subnets - subnets := parseSubnets(addrs) - - // add to cache - if len(subnets) > 0 { - r.cache[hostIP] = ipCacheEntry{ - created: time.Now(), - subnets: subnets, - } - } - - return subnets, nil -} - -func parseSubnets(addresses []net.IPAddr) []string { - subnets := make([]string, 0, len(addresses)) - - for _, address := range addresses { - // figure out the IP range - ipRange := ipv6FilterRange - if address.IP.To4() != nil { - ipRange = ipv4FilterRange - } - - // parse the subnet - cidr := fmt.Sprintf("%s/%d", address.String(), ipRange) - _, ipnet, err := net.ParseCIDR(cidr) - if err != nil { - continue - } - - // add it - subnets = append(subnets, ipnet.String()) - } - - return subnets -} diff --git a/autopilot/contractor/ipfilter_test.go b/autopilot/contractor/ipfilter_test.go deleted file mode 100644 index 63be78753..000000000 --- a/autopilot/contractor/ipfilter_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package contractor - -import ( - "context" - "errors" - "net" - "testing" - "time" - - "go.sia.tech/core/types" - "go.sia.tech/renterd/internal/utils" - "go.uber.org/zap" -) - -var ( - ipv4Localhost = net.IP{127, 0, 0, 1} - ipv6Localhost = net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1} -) - -type testResolver struct { - addr map[string][]net.IPAddr - err error -} - -func (r *testResolver) LookupIPAddr(ctx context.Context, host string) ([]net.IPAddr, error) { - // return error if set - if err := r.err; err != nil { - r.err = nil - return nil, err - } - // return IP addr if set - if addrs, ok := r.addr[host]; ok { - return addrs, nil - } - return nil, nil -} - -func (r *testResolver) setNextErr(err error) { r.err = err } -func (r *testResolver) setAddr(host string, addrs []net.IPAddr) { r.addr[host] = addrs } - -func newTestResolver() *testResolver { - return &testResolver{addr: make(map[string][]net.IPAddr)} -} - -func newTestIPResolver(r resolver) *ipResolver { - ipr := newIPResolver(context.Background(), time.Minute, zap.NewNop().Sugar()) - ipr.resolver = r - return ipr -} - -func newTestIPFilter(r resolver) *ipFilter { - return &ipFilter{ - subnetToHostKey: make(map[string]string), - resolver: newTestIPResolver(r), - logger: zap.NewNop().Sugar(), - } -} - -func TestIPResolver(t *testing.T) { - r := newTestResolver() - ipr := newTestIPResolver(r) - - // test lookup error - r.setNextErr(errors.New("unknown error")) - if _, err := ipr.lookup("example.com:1234"); !utils.IsErr(err, errors.New("unknown error")) { - t.Fatal("unexpected error", err) - } - - // test IO timeout - no cache entry - r.setNextErr(ErrIOTimeout) - if _, err := ipr.lookup("example.com:1234"); !utils.IsErr(err, ErrIOTimeout) { - t.Fatal("unexpected error", err) - } - - // test IO timeout - expired cache entry - ipr.cache["example.com:1234"] = ipCacheEntry{subnets: []string{"a"}} - r.setNextErr(ErrIOTimeout) - if _, err := ipr.lookup("example.com:1234"); !utils.IsErr(err, ErrIOTimeout) { - t.Fatal("unexpected error", err) - } - - // test IO timeout - live cache entry - ipr.cache["example.com:1234"] = ipCacheEntry{created: time.Now(), subnets: []string{"a"}} - r.setNextErr(ErrIOTimeout) - if subnets, err := ipr.lookup("example.com:1234"); err != nil { - t.Fatal("unexpected error", err) - } else if len(subnets) != 1 || subnets[0] != "a" { - t.Fatal("unexpected subnets", subnets) - } - - // test too many addresses - more than two - r.setAddr("example.com", []net.IPAddr{{}, {}, {}}) - if _, err := ipr.lookup("example.com:1234"); !utils.IsErr(err, errTooManyAddresses) { - t.Fatal("unexpected error", err) - } - - // test too many addresses - two of the same type - r.setAddr("example.com", []net.IPAddr{{IP: net.IPv4(1, 2, 3, 4)}, {IP: net.IPv4(1, 2, 3, 4)}}) - if _, err := ipr.lookup("example.com:1234"); !utils.IsErr(err, errTooManyAddresses) { - t.Fatal("unexpected error", err) - } - - // test invalid addresses - r.setAddr("example.com", []net.IPAddr{{IP: ipv4Localhost}, {IP: net.IP{127, 0, 0, 2}}}) - if _, err := ipr.lookup("example.com:1234"); !utils.IsErr(err, errTooManyAddresses) { - t.Fatal("unexpected error", err) - } - - // test valid addresses - r.setAddr("example.com", []net.IPAddr{{IP: ipv4Localhost}, {IP: ipv6Localhost}}) - if subnets, err := ipr.lookup("example.com:1234"); err != nil { - t.Fatal("unexpected error", err) - } else if len(subnets) != 2 || subnets[0] != "127.0.0.0/24" || subnets[1] != "::/32" { - t.Fatal("unexpected subnets", subnets) - } -} - -func TestIPFilter(t *testing.T) { - r := newTestResolver() - r.setAddr("host1.com", []net.IPAddr{{IP: net.IP{192, 168, 0, 1}}}) - r.setAddr("host2.com", []net.IPAddr{{IP: net.IP{192, 168, 1, 1}}}) - r.setAddr("host3.com", []net.IPAddr{{IP: net.IP{192, 168, 2, 1}}}) - ipf := newTestIPFilter(r) - - // add 3 hosts - unique IPs - r1 := ipf.IsRedundantIP("host1.com:1234", types.PublicKey{1}) - r2 := ipf.IsRedundantIP("host2.com:1234", types.PublicKey{2}) - r3 := ipf.IsRedundantIP("host3.com:1234", types.PublicKey{3}) - if r1 || r2 || r3 { - t.Fatal("unexpected result", r1, r2, r3) - } - - // try add 4th host - redundant IP - r.setAddr("host4.com", []net.IPAddr{{IP: net.IP{192, 168, 0, 12}}}) - if redundant := ipf.IsRedundantIP("host4.com:1234", types.PublicKey{4}); !redundant { - t.Fatal("unexpected result", redundant) - } - - // add 4th host - unique IP - 2 subnets - r.setAddr("host4.com", []net.IPAddr{{IP: net.IP{192, 168, 3, 1}}, {IP: net.ParseIP("2001:0db8:85a3::8a2e:0370:7334")}}) - if redundant := ipf.IsRedundantIP("host4.com:1234", types.PublicKey{4}); redundant { - t.Fatal("unexpected result", redundant) - } - - // try add 5th host - redundant IP based on the IPv6 subnet from host4 - r.setAddr("host5.com", []net.IPAddr{{IP: net.ParseIP("2001:0db8:85b3::8a2e:0370:7335")}}) - if redundant := ipf.IsRedundantIP("host5.com:1234", types.PublicKey{5}); !redundant { - t.Fatal("unexpected result", redundant) - } - - // add 5th host - unique IP - r.setAddr("host5.com", []net.IPAddr{{IP: net.ParseIP("2001:0db9:85b3::8a2e:0370:7335")}}) - if redundant := ipf.IsRedundantIP("host5.com:1234", types.PublicKey{5}); redundant { - t.Fatal("unexpected result", redundant) - } -} diff --git a/autopilot/contractor/state.go b/autopilot/contractor/state.go index 2bf549da1..9f06fe168 100644 --- a/autopilot/contractor/state.go +++ b/autopilot/contractor/state.go @@ -40,26 +40,6 @@ func (ctx *mCtx) ApID() string { return ctx.state.AP.ID } -func (ctx *mCtx) Deadline() (deadline time.Time, ok bool) { - return ctx.ctx.Deadline() -} - -func (ctx *mCtx) Done() <-chan struct{} { - return ctx.ctx.Done() -} - -func (ctx *mCtx) Err() error { - return ctx.ctx.Err() -} - -func (ctx *mCtx) Value(key interface{}) interface{} { - return ctx.ctx.Value(key) -} - -func (ctx *mCtx) AllowRedundantIPs() bool { - return ctx.state.AP.Config.Hosts.AllowRedundantIPs -} - func (ctx *mCtx) Allowance() types.Currency { return ctx.state.Allowance() } @@ -76,16 +56,24 @@ func (ctx *mCtx) ContractSet() string { return ctx.state.AP.Config.Contracts.Set } +func (ctx *mCtx) Deadline() (deadline time.Time, ok bool) { + return ctx.ctx.Deadline() +} + +func (ctx *mCtx) Done() <-chan struct{} { + return ctx.ctx.Done() +} + func (ctx *mCtx) EndHeight() uint64 { return ctx.state.AP.EndHeight() } -func (ctx *mCtx) GougingChecker(cs api.ConsensusState) worker.GougingChecker { - return worker.NewGougingChecker(ctx.state.GS, cs, ctx.state.Fee, ctx.Period(), ctx.RenewWindow()) +func (ctx *mCtx) Err() error { + return ctx.ctx.Err() } -func (ctx *mCtx) WantedContracts() uint64 { - return ctx.state.AP.Config.Contracts.Amount +func (ctx *mCtx) GougingChecker(cs api.ConsensusState) worker.GougingChecker { + return worker.NewGougingChecker(ctx.state.GS, cs, ctx.state.Fee, ctx.Period(), ctx.RenewWindow()) } func (ctx *mCtx) Period() uint64 { @@ -96,6 +84,18 @@ func (ctx *mCtx) RenewWindow() uint64 { return ctx.state.AP.Config.Contracts.RenewWindow } +func (ctx *mCtx) ShouldFilterRedundantIPs() bool { + return !ctx.state.AP.Config.Hosts.AllowRedundantIPs +} + +func (ctx *mCtx) Value(key interface{}) interface{} { + return ctx.ctx.Value(key) +} + +func (ctx *mCtx) WantedContracts() uint64 { + return ctx.state.AP.Config.Contracts.Amount +} + func (state *MaintenanceState) Allowance() types.Currency { return state.AP.Config.Contracts.Allowance } diff --git a/autopilot/scanner.go b/autopilot/scanner.go index 475511c7b..fa317fafa 100644 --- a/autopilot/scanner.go +++ b/autopilot/scanner.go @@ -11,7 +11,6 @@ import ( rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" "go.sia.tech/renterd/api" - "go.sia.tech/renterd/autopilot/contractor" "go.sia.tech/renterd/internal/utils" "go.uber.org/zap" ) @@ -309,7 +308,7 @@ func (s *scanner) launchScanWorkers(ctx context.Context, w scanWorker, reqs chan scan, err := w.RHPScan(ctx, req.hostKey, req.hostIP, s.currentTimeout()) if err != nil { break // abort - } else if !utils.IsErr(errors.New(scan.ScanError), contractor.ErrIOTimeout) && scan.Ping > 0 { + } else if !utils.IsErr(errors.New(scan.ScanError), utils.ErrIOTimeout) && scan.Ping > 0 { s.tracker.addDataPoint(time.Duration(scan.Ping)) } diff --git a/autopilot/workerpool.go b/autopilot/workerpool.go index 16a6b4c99..990498e62 100644 --- a/autopilot/workerpool.go +++ b/autopilot/workerpool.go @@ -24,7 +24,7 @@ type Worker interface { RHPFund(ctx context.Context, contractID types.FileContractID, hostKey types.PublicKey, hostIP, siamuxAddr string, balance types.Currency) (err error) RHPPriceTable(ctx context.Context, hostKey types.PublicKey, siamuxAddr string, timeout time.Duration) (api.HostPriceTable, error) RHPPruneContract(ctx context.Context, fcid types.FileContractID, timeout time.Duration) (pruned, remaining uint64, err error) - RHPRenew(ctx context.Context, fcid types.FileContractID, endHeight uint64, hk types.PublicKey, hostIP string, hostAddress, renterAddress types.Address, renterFunds, minNewCollateral types.Currency, expectedStorage, windowSize uint64) (api.RHPRenewResponse, error) + RHPRenew(ctx context.Context, fcid types.FileContractID, endHeight uint64, hk types.PublicKey, hostIP string, hostAddress, renterAddress types.Address, renterFunds, minNewCollateral, maxFundAmount types.Currency, expectedStorage, windowSize uint64) (api.RHPRenewResponse, error) RHPScan(ctx context.Context, hostKey types.PublicKey, hostIP string, timeout time.Duration) (api.RHPScanResponse, error) RHPSync(ctx context.Context, contractID types.FileContractID, hostKey types.PublicKey, hostIP, siamuxAddr string) (err error) } diff --git a/build/env_default.go b/build/env_default.go index 83003de60..3730fd5b2 100644 --- a/build/env_default.go +++ b/build/env_default.go @@ -34,6 +34,13 @@ var ( MigrationSurchargeMultiplier: 10, // 10x } + // DefaultPricePinSettings define the default price pin settings the bus is + // configured with on startup. These values can be adjusted using the + // settings API. + DefaultPricePinSettings = api.PricePinSettings{ + Enabled: false, + } + // DefaultUploadPackingSettings define the default upload packing settings // the bus is configured with on startup. DefaultUploadPackingSettings = api.UploadPackingSettings{ diff --git a/build/env_testnet.go b/build/env_testnet.go index 0bdef28f2..5ccf6f24f 100644 --- a/build/env_testnet.go +++ b/build/env_testnet.go @@ -36,6 +36,13 @@ var ( MigrationSurchargeMultiplier: 10, // 10x } + // DefaultPricePinSettings define the default price pin settings the bus is + // configured with on startup. These values can be adjusted using the + // settings API. + DefaultPricePinSettings = api.PricePinSettings{ + Enabled: false, + } + // DefaultUploadPackingSettings define the default upload packing settings // the bus is configured with on startup. DefaultUploadPackingSettings = api.UploadPackingSettings{ diff --git a/bus/accounts.go b/bus/accounts.go index 42dafefcf..d072de1c7 100644 --- a/bus/accounts.go +++ b/bus/accounts.go @@ -164,6 +164,7 @@ func (a *accounts) SetBalance(id rhpv3.Account, hk types.PublicKey, balance *big acc.Balance.Set(balance) acc.CleanShutdown = true acc.RequiresSync = false // resetting the balance resets the sync field + balanceAfter := acc.Balance.String() acc.mu.Unlock() // Log resets. @@ -171,7 +172,7 @@ func (a *accounts) SetBalance(id rhpv3.Account, hk types.PublicKey, balance *big "account", acc.ID, "host", acc.HostKey.String(), "balanceBefore", balanceBefore, - "balanceAfter", acc.Balance.String(), + "balanceAfter", balanceAfter, "driftBefore", driftBefore, "driftAfter", acc.Drift.String(), "delta", delta.String()) diff --git a/bus/bus.go b/bus/bus.go index 804184e43..312089c9e 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -23,6 +23,7 @@ import ( "go.sia.tech/renterd/api" "go.sia.tech/renterd/build" "go.sia.tech/renterd/bus/client" + ibus "go.sia.tech/renterd/internal/bus" "go.sia.tech/renterd/object" "go.sia.tech/renterd/wallet" "go.sia.tech/renterd/webhooks" @@ -30,6 +31,11 @@ import ( "go.uber.org/zap" ) +const ( + defaultPinUpdateInterval = 5 * time.Minute + defaultPinRateWindow = 6 * time.Hour +) + // Client re-exports the client from the client package. type Client struct { *client.Client @@ -226,10 +232,11 @@ type bus struct { contractLocks *contractLocks uploadingSectors *uploadingSectorsCache - alerts alerts.Alerter - alertMgr *alerts.Manager - hooks *webhooks.Manager - logger *zap.SugaredLogger + alerts alerts.Alerter + alertMgr *alerts.Manager + pinMgr ibus.PinManager + webhooksMgr *webhooks.Manager + logger *zap.SugaredLogger } // Handler returns an HTTP handler that serves the bus API. @@ -374,9 +381,14 @@ func (b *bus) Handler() http.Handler { }) } +// Setup starts the pin manager. +func (b *bus) Setup(ctx context.Context) error { + return b.pinMgr.Run(ctx) +} + // Shutdown shuts down the bus. func (b *bus) Shutdown(ctx context.Context) error { - b.hooks.Close() + b.webhooksMgr.Close() accounts := b.accounts.ToPersist() err := b.eas.SaveAccounts(ctx, accounts) if err != nil { @@ -384,7 +396,11 @@ func (b *bus) Shutdown(ctx context.Context) error { } else { b.logger.Infof("successfully saved %v accounts", len(accounts)) } - return err + + return errors.Join( + err, + b.pinMgr.Close(ctx), + ) } func (b *bus) fetchSetting(ctx context.Context, key string, value interface{}) error { @@ -709,6 +725,13 @@ func (b *bus) walletPrepareRenewHandler(jc jape.Context) { // Compute how much renter funds to put into the new contract. cost := rhpv3.ContractRenewalCost(cs, wprr.PriceTable, fc, txn.MinerFees[0], basePrice) + // Make sure we don't exceed the max fund amount. + // TODO: remove the IsZero check for the v2 change + if /*!wprr.MaxFundAmount.IsZero() &&*/ wprr.MaxFundAmount.Cmp(cost) < 0 { + jc.Error(fmt.Errorf("%w: %v > %v", api.ErrMaxFundAmountExceeded, cost, wprr.MaxFundAmount), http.StatusBadRequest) + return + } + // Fund the txn. We are not signing it yet since it's not complete. The host // still needs to complete it and the revision + contract are signed with // the renter key by the worker. @@ -724,6 +747,7 @@ func (b *bus) walletPrepareRenewHandler(jc jape.Context) { return } jc.Encode(api.WalletPrepareRenewResponse{ + FundAmount: cost, ToSign: toSign, TransactionSet: append(parents, txn), }) @@ -944,7 +968,19 @@ func (b *bus) contractsArchiveHandlerPOST(jc jape.Context) { return } - jc.Check("failed to archive contracts", b.ms.ArchiveContracts(jc.Request.Context(), toArchive)) + if jc.Check("failed to archive contracts", b.ms.ArchiveContracts(jc.Request.Context(), toArchive)) == nil { + for fcid, reason := range toArchive { + b.broadcastAction(webhooks.Event{ + Module: api.ModuleContract, + Event: api.EventArchive, + Payload: api.EventContractArchive{ + ContractID: fcid, + Reason: reason, + Timestamp: time.Now().UTC(), + }, + }) + } + } } func (b *bus) contractsSetsHandlerGET(jc jape.Context) { @@ -958,8 +994,21 @@ func (b *bus) contractsSetHandlerPUT(jc jape.Context) { var contractIds []types.FileContractID if set := jc.PathParam("set"); set == "" { jc.Error(errors.New("path parameter 'set' can not be empty"), http.StatusBadRequest) - } else if jc.Decode(&contractIds) == nil { - jc.Check("could not add contracts to set", b.ms.SetContractSet(jc.Request.Context(), set, contractIds)) + return + } else if jc.Decode(&contractIds) != nil { + return + } else if jc.Check("could not add contracts to set", b.ms.SetContractSet(jc.Request.Context(), set, contractIds)) != nil { + return + } else { + b.broadcastAction(webhooks.Event{ + Module: api.ModuleContractSet, + Event: api.EventUpdate, + Payload: api.EventContractSetUpdate{ + Name: set, + ContractIDs: contractIds, + Timestamp: time.Now().UTC(), + }, + }) } } @@ -1140,10 +1189,21 @@ func (b *bus) contractIDRenewedHandlerPOST(jc jape.Context) { req.State = api.ContractStatePending } r, err := b.ms.AddRenewedContract(jc.Request.Context(), req.Contract, req.ContractPrice, req.TotalCost, req.StartHeight, req.RenewedFrom, req.State) - if jc.Check("couldn't store contract", err) == nil { - jc.Encode(r) + if jc.Check("couldn't store contract", err) != nil { + return } + b.uploadingSectors.HandleRenewal(req.Contract.ID(), req.RenewedFrom) + b.broadcastAction(webhooks.Event{ + Module: api.ModuleContract, + Event: api.EventRenew, + Payload: api.EventContractRenew{ + Renewal: r, + Timestamp: time.Now().UTC(), + }, + }) + + jc.Encode(r) } func (b *bus) contractIDRootsHandlerGET(jc jape.Context) { @@ -1304,7 +1364,10 @@ func (b *bus) objectsListHandlerPOST(jc jape.Context) { req.Bucket = api.DefaultBucketName } resp, err := b.ms.ListObjects(jc.Request.Context(), req.Bucket, req.Prefix, req.SortBy, req.SortDir, req.Marker, req.Limit) - if jc.Check("couldn't list objects", err) != nil { + if errors.Is(err, api.ErrMarkerNotFound) { + jc.Error(err, http.StatusBadRequest) + return + } else if jc.Check("couldn't list objects", err) != nil { return } jc.Encode(resp) @@ -1619,6 +1682,7 @@ func (b *bus) settingKeyHandlerPUT(jc jape.Context) { jc.Error(fmt.Errorf("couldn't update gouging settings, error: %v", err), http.StatusBadRequest) return } + b.pinMgr.TriggerUpdate() case api.SettingRedundancy: var rs api.RedundancySettings if err := json.Unmarshal(data, &rs); err != nil { @@ -1637,9 +1701,34 @@ func (b *bus) settingKeyHandlerPUT(jc jape.Context) { jc.Error(fmt.Errorf("couldn't update s3 authentication settings, error: %v", err), http.StatusBadRequest) return } + case api.SettingPricePinning: + var pps api.PricePinSettings + if err := json.Unmarshal(data, &pps); err != nil { + jc.Error(fmt.Errorf("couldn't update price pinning settings, invalid request body"), http.StatusBadRequest) + return + } else if err := pps.Validate(); err != nil { + jc.Error(fmt.Errorf("couldn't update price pinning settings, invalid settings, error: %v", err), http.StatusBadRequest) + return + } else if pps.Enabled { + if _, err := ibus.NewForexClient(pps.ForexEndpointURL).SiacoinExchangeRate(jc.Request.Context(), pps.Currency); err != nil { + jc.Error(fmt.Errorf("couldn't update price pinning settings, forex API unreachable,error: %v", err), http.StatusBadRequest) + return + } + } + b.pinMgr.TriggerUpdate() + } + + if jc.Check("could not update setting", b.ss.UpdateSetting(jc.Request.Context(), key, string(data))) == nil { + b.broadcastAction(webhooks.Event{ + Module: api.ModuleSetting, + Event: api.EventUpdate, + Payload: api.EventSettingUpdate{ + Key: key, + Update: value, + Timestamp: time.Now().UTC(), + }, + }) } - - jc.Check("could not update setting", b.ss.UpdateSetting(jc.Request.Context(), key, string(data))) } func (b *bus) settingKeyHandlerDELETE(jc jape.Context) { @@ -1648,7 +1737,17 @@ func (b *bus) settingKeyHandlerDELETE(jc jape.Context) { jc.Error(errors.New("path parameter 'key' can not be empty"), http.StatusBadRequest) return } - jc.Check("could not delete setting", b.ss.DeleteSetting(jc.Request.Context(), key)) + + if jc.Check("could not delete setting", b.ss.DeleteSetting(jc.Request.Context(), key)) == nil { + b.broadcastAction(webhooks.Event{ + Module: api.ModuleSetting, + Event: api.EventDelete, + Payload: api.EventSettingDelete{ + Key: key, + Timestamp: time.Now().UTC(), + }, + }) + } } func (b *bus) contractIDAncestorsHandler(jc jape.Context) { @@ -1966,7 +2065,9 @@ func (b *bus) autopilotsHandlerPUT(jc jape.Context) { return } - jc.Check("failed to update autopilot", b.as.UpdateAutopilot(jc.Request.Context(), ap)) + if jc.Check("failed to update autopilot", b.as.UpdateAutopilot(jc.Request.Context(), ap)) == nil { + b.pinMgr.TriggerUpdate() + } } func (b *bus) autopilotHostCheckHandlerPUT(jc jape.Context) { @@ -1992,6 +2093,16 @@ func (b *bus) autopilotHostCheckHandlerPUT(jc jape.Context) { } } +func (b *bus) broadcastAction(e webhooks.Event) { + log := b.logger.With("event", e.Event).With("module", e.Module) + err := b.webhooksMgr.BroadcastAction(context.Background(), e) + if err != nil { + log.With(zap.Error(err)).Error("failed to broadcast action") + } else { + log.Debug("successfully broadcast action") + } +} + func (b *bus) contractTaxHandlerGET(jc jape.Context) { var payout types.Currency if jc.DecodeParam("payout", (*api.ParamCurrency)(&payout)) != nil { @@ -2045,7 +2156,7 @@ func (b *bus) webhookActionHandlerPost(jc jape.Context) { if jc.Check("failed to decode action", jc.Decode(&action)) != nil { return } - b.hooks.BroadcastAction(jc.Request.Context(), action) + b.broadcastAction(action) } func (b *bus) webhookHandlerDelete(jc jape.Context) { @@ -2053,7 +2164,7 @@ func (b *bus) webhookHandlerDelete(jc jape.Context) { if jc.Decode(&wh) != nil { return } - err := b.hooks.Delete(jc.Request.Context(), wh) + err := b.webhooksMgr.Delete(jc.Request.Context(), wh) if errors.Is(err, webhooks.ErrWebhookNotFound) { jc.Error(fmt.Errorf("webhook for URL %v and event %v.%v not found", wh.URL, wh.Module, wh.Event), http.StatusNotFound) return @@ -2063,8 +2174,8 @@ func (b *bus) webhookHandlerDelete(jc jape.Context) { } func (b *bus) webhookHandlerGet(jc jape.Context) { - webhooks, queueInfos := b.hooks.Info() - jc.Encode(api.WebHookResponse{ + webhooks, queueInfos := b.webhooksMgr.Info() + jc.Encode(api.WebhookResponse{ Queues: queueInfos, Webhooks: webhooks, }) @@ -2075,10 +2186,12 @@ func (b *bus) webhookHandlerPost(jc jape.Context) { if jc.Decode(&req) != nil { return } - err := b.hooks.Register(jc.Request.Context(), webhooks.Webhook{ - Event: req.Event, - Module: req.Module, - URL: req.URL, + + err := b.webhooksMgr.Register(jc.Request.Context(), webhooks.Webhook{ + Event: req.Event, + Module: req.Module, + URL: req.URL, + Headers: req.Headers, }) if err != nil { jc.Error(fmt.Errorf("failed to add Webhook: %w", err), http.StatusInternalServerError) @@ -2342,12 +2455,23 @@ func (b *bus) multipartHandlerListPartsPOST(jc jape.Context) { jc.Encode(resp) } +func (b *bus) ProcessConsensusChange(cc modules.ConsensusChange) { + if cc.Synced { + b.broadcastAction(webhooks.Event{ + Module: api.ModuleConsensus, + Event: api.EventUpdate, + Payload: api.EventConsensusUpdate{ + ConsensusState: b.consensusState(), + TransactionFee: b.tp.RecommendedFee(), + Timestamp: time.Now().UTC(), + }, + }) + } +} + // New returns a new Bus. -func New(s Syncer, am *alerts.Manager, hm *webhooks.Manager, cm ChainManager, tp TransactionPool, w Wallet, hdb HostDB, as AutopilotStore, ms MetadataStore, ss SettingStore, eas EphemeralAccountStore, mtrcs MetricsStore, l *zap.Logger) (*bus, error) { +func New(s Syncer, am *alerts.Manager, whm *webhooks.Manager, cm ChainManager, tp TransactionPool, w Wallet, hdb HostDB, as AutopilotStore, ms MetadataStore, ss SettingStore, eas EphemeralAccountStore, mtrcs MetricsStore, l *zap.Logger) (*bus, error) { b := &bus{ - alerts: alerts.WithOrigin(am, "bus"), - alertMgr: am, - hooks: hm, s: s, cm: cm, tp: tp, @@ -2360,11 +2484,17 @@ func New(s Syncer, am *alerts.Manager, hm *webhooks.Manager, cm ChainManager, tp eas: eas, contractLocks: newContractLocks(), uploadingSectors: newUploadingSectorsCache(), - logger: l.Sugar().Named("bus"), + + alerts: alerts.WithOrigin(am, "bus"), + alertMgr: am, + webhooksMgr: whm, + logger: l.Sugar().Named("bus"), startTime: time.Now(), } + b.pinMgr = ibus.NewPinManager(whm, as, ss, defaultPinUpdateInterval, defaultPinRateWindow, b.logger.Desugar()) + // ensure we don't hang indefinitely ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() @@ -2372,6 +2502,7 @@ func New(s Syncer, am *alerts.Manager, hm *webhooks.Manager, cm ChainManager, tp // load default settings if the setting is not already set for key, value := range map[string]interface{}{ api.SettingGouging: build.DefaultGougingSettings, + api.SettingPricePinning: build.DefaultPricePinSettings, api.SettingRedundancy: build.DefaultRedundancySettings, api.SettingUploadPacking: build.DefaultUploadPackingSettings, } { @@ -2446,5 +2577,9 @@ func New(s Syncer, am *alerts.Manager, hm *webhooks.Manager, cm ChainManager, tp if err := eas.SetUncleanShutdown(ctx); err != nil { return nil, fmt.Errorf("failed to mark account shutdown as unclean: %w", err) } + + if err := cm.Subscribe(b, modules.ConsensusChangeRecent, nil); err != nil { + return nil, fmt.Errorf("failed to subscribe to consensus changes: %w", err) + } return b, nil } diff --git a/bus/client/client_test.go b/bus/client/client_test.go index 795439669..92a7fdcd2 100644 --- a/bus/client/client_test.go +++ b/bus/client/client_test.go @@ -70,7 +70,7 @@ func newTestClient(dir string) (*client.Client, func() error, func(context.Conte // create client client := client.New("http://"+l.Addr().String(), "test") - b, cleanup, err := node.NewBus(node.BusConfig{ + b, _, cleanup, err := node.NewBus(node.BusConfig{ Bus: config.Bus{ AnnouncementMaxAgeHours: 24 * 7 * 52, // 1 year Bootstrap: false, @@ -78,8 +78,11 @@ func newTestClient(dir string) (*client.Client, func() error, func(context.Conte UsedUTXOExpiry: time.Minute, SlabBufferCompletionThreshold: 0, }, - Miner: node.NewMiner(client), - SlabPruningInterval: time.Minute, + DatabaseLog: config.DatabaseLog{ + SlowThreshold: 100 * time.Millisecond, + }, + Miner: node.NewMiner(client), + Logger: zap.NewNop(), }, filepath.Join(dir, "bus"), types.GeneratePrivateKey(), zap.New(zapcore.NewNopCore())) if err != nil { return nil, nil, nil, err diff --git a/bus/client/objects.go b/bus/client/objects.go index 6a17691e2..fca893a49 100644 --- a/bus/client/objects.go +++ b/bus/client/objects.go @@ -52,10 +52,12 @@ func (c *Client) DeleteObject(ctx context.Context, bucket, path string, opts api // ListOBjects lists objects in the given bucket. func (c *Client) ListObjects(ctx context.Context, bucket string, opts api.ListObjectOptions) (resp api.ObjectsListResponse, err error) { err = c.c.WithContext(ctx).POST("/objects/list", api.ObjectsListRequest{ - Bucket: bucket, - Limit: opts.Limit, - Prefix: opts.Prefix, - Marker: opts.Marker, + Bucket: bucket, + Limit: opts.Limit, + Prefix: opts.Prefix, + Marker: opts.Marker, + SortBy: opts.SortBy, + SortDir: opts.SortDir, }, &resp) return } diff --git a/bus/client/wallet.go b/bus/client/wallet.go index db9ab4239..dd419c4ea 100644 --- a/bus/client/wallet.go +++ b/bus/client/wallet.go @@ -95,13 +95,14 @@ func (c *Client) WalletPrepareForm(ctx context.Context, renterAddress types.Addr } // WalletPrepareRenew funds and signs a contract renewal transaction. -func (c *Client) WalletPrepareRenew(ctx context.Context, revision types.FileContractRevision, hostAddress, renterAddress types.Address, renterKey types.PrivateKey, renterFunds, minNewCollateral types.Currency, pt rhpv3.HostPriceTable, endHeight, windowSize, expectedStorage uint64) (api.WalletPrepareRenewResponse, error) { +func (c *Client) WalletPrepareRenew(ctx context.Context, revision types.FileContractRevision, hostAddress, renterAddress types.Address, renterKey types.PrivateKey, renterFunds, minNewCollateral, maxFundAmount types.Currency, pt rhpv3.HostPriceTable, endHeight, windowSize, expectedStorage uint64) (api.WalletPrepareRenewResponse, error) { req := api.WalletPrepareRenewRequest{ Revision: revision, EndHeight: endHeight, ExpectedNewStorage: expectedStorage, HostAddress: hostAddress, PriceTable: pt, + MaxFundAmount: maxFundAmount, MinNewCollateral: minNewCollateral, RenterAddress: renterAddress, RenterFunds: renterFunds, diff --git a/bus/client/webhooks.go b/bus/client/webhooks.go index fe6f6d320..769d1cf57 100644 --- a/bus/client/webhooks.go +++ b/bus/client/webhooks.go @@ -23,17 +23,13 @@ func (c *Client) DeleteWebhook(ctx context.Context, url, module, event string) e } // RegisterWebhook registers a new webhook for the given URL. -func (c *Client) RegisterWebhook(ctx context.Context, url, module, event string) error { - err := c.c.WithContext(ctx).POST("/webhooks", webhooks.Webhook{ - Event: event, - Module: module, - URL: url, - }, nil) +func (c *Client) RegisterWebhook(ctx context.Context, webhook webhooks.Webhook) error { + err := c.c.WithContext(ctx).POST("/webhooks", webhook, nil) return err } // Webhooks returns all webhooks currently registered. -func (c *Client) Webhooks(ctx context.Context) (resp api.WebHookResponse, err error) { +func (c *Client) Webhooks(ctx context.Context) (resp api.WebhookResponse, err error) { err = c.c.WithContext(ctx).GET("/webhooks", &resp) return } diff --git a/cmd/renterd/commands.go b/cmd/renterd/commands.go new file mode 100644 index 000000000..96c798abf --- /dev/null +++ b/cmd/renterd/commands.go @@ -0,0 +1,86 @@ +package main + +import ( + "fmt" + "os" + + "go.sia.tech/core/types" + "go.sia.tech/coreutils/wallet" + "go.sia.tech/renterd/build" + "gopkg.in/yaml.v3" +) + +func cmdBuildConfig() { + if _, err := os.Stat("renterd.yml"); err == nil { + if !promptYesNo("renterd.yml already exists. Would you like to overwrite it?") { + return + } + } + + fmt.Println("") + if cfg.Seed != "" { + fmt.Println(wrapANSI("\033[33m", "A wallet seed phrase is already set.", "\033[0m")) + fmt.Println("If you change your wallet seed phrase, your renter will not be able to access Siacoin associated with this wallet.") + fmt.Println("Ensure that you have backed up your wallet seed phrase before continuing.") + if promptYesNo("Would you like to change your wallet seed phrase?") { + setSeedPhrase() + } + } else { + setSeedPhrase() + } + + fmt.Println("") + if cfg.HTTP.Password != "" { + fmt.Println(wrapANSI("\033[33m", "An admin password is already set.", "\033[0m")) + fmt.Println("If you change your admin password, you will need to update any scripts or applications that use the admin API.") + if promptYesNo("Would you like to change your admin password?") { + setAPIPassword() + } + } else { + setAPIPassword() + } + + fmt.Println("") + setS3Config() + + fmt.Println("") + setAdvancedConfig() + + // write the config file + configPath := "renterd.yml" + if str := os.Getenv("RENTERD_CONFIG_FILE"); str != "" { + configPath = str + } + + f, err := os.Create(configPath) + if err != nil { + stdoutFatalError("Failed to create config file: " + err.Error()) + return + } + defer f.Close() + + enc := yaml.NewEncoder(f) + if err := enc.Encode(cfg); err != nil { + stdoutFatalError("Failed to encode config file: " + err.Error()) + return + } +} + +func cmdSeed() { + var seed [32]byte + phrase := wallet.NewSeedPhrase() + if err := wallet.SeedFromPhrase(&seed, phrase); err != nil { + println(err.Error()) + os.Exit(1) + } + key := wallet.KeyFromSeed(&seed, 0) + fmt.Println("Recovery Phrase:", phrase) + fmt.Println("Address", types.StandardUnlockHash(key.PublicKey())) +} + +func cmdVersion() { + fmt.Println("renterd", build.Version()) + fmt.Println("Network", build.NetworkName()) + fmt.Println("Commit:", build.Commit()) + fmt.Println("Build Date:", build.BuildTime()) +} diff --git a/cmd/renterd/config.go b/cmd/renterd/config.go index ec153f452..f4b728c7e 100644 --- a/cmd/renterd/config.go +++ b/cmd/renterd/config.go @@ -12,8 +12,8 @@ import ( "go.sia.tech/core/types" "go.sia.tech/coreutils/wallet" + "go.sia.tech/renterd/config" "golang.org/x/term" - "gopkg.in/yaml.v3" "lukechampine.com/frand" ) @@ -105,6 +105,15 @@ func stdoutError(msg string) { } } +func setInputValue(context string, value *string) { + if *value != "" { + context = fmt.Sprintf("%s (default: %q)", context, *value) + } + if input := readInput(context); input != "" { + *value = input + } +} + func setListenAddress(context string, value *string, allowEmpty bool) { // will continue to prompt until a valid value is entered for { @@ -195,20 +204,23 @@ func setSeedPhrase() { // setAPIPassword prompts the user to enter an API password if one is not // already set via environment variable or config file. func setAPIPassword() { + // return early if the password is already set + if len(cfg.HTTP.Password) >= 4 { + return + } + // retry until a valid API password is entered - for { + for len(cfg.HTTP.Password) < 4 { fmt.Println("Please choose a password for the renterd admin UI.") fmt.Println("This password will be required to access the admin UI in your web browser.") fmt.Println("(The password must be at least 4 characters.)") cfg.HTTP.Password = readPasswordInput("Enter password") - if len(cfg.HTTP.Password) >= 4 { - break + if len(cfg.HTTP.Password) < 4 { + // invalid password, retry + fmt.Println(wrapANSI("\033[31m", "Password must be at least 4 characters!", "\033[0m")) + fmt.Println("") } - - // invalid password, retry - fmt.Println(wrapANSI("\033[31m", "Password must be at least 4 characters!", "\033[0m")) - fmt.Println("") } } @@ -237,8 +249,8 @@ func setAdvancedConfig() { // database fmt.Println("") fmt.Println("The database is used to store the renter's metadata.") - fmt.Println("The embedded SQLite database is recommended for small (< 50TB), single-user setups. Choose this for the easiest setup.") - fmt.Println("MySQL database is recommended for larger (> 50TB) or multi-user setups. MySQL requires a separate MySQL server to connect to.") + fmt.Println("The embedded SQLite database requires no additional configuration and is ideal for testing or demo purposes.") + fmt.Println("For production usage, we recommend MySQL, which requires a separate MySQL server.") setStoreConfig() } @@ -258,15 +270,10 @@ func setStoreConfig() { cfg.Database.MySQL.User = readInput("MySQL username") cfg.Database.MySQL.Password = readPasswordInput("MySQL password") - objectDB := readInput("Object database name (default: renterd)") - if objectDB != "" { - cfg.Database.MySQL.Database = objectDB - } - metricsDB := readInput("Metrics database name (default: renterd_metrics)") - if metricsDB != "" { - cfg.Database.MySQL.MetricsDatabase = metricsDB - } + setInputValue("Object database name", &cfg.Database.MySQL.Database) + setInputValue("Metrics database name", &cfg.Database.MySQL.MetricsDatabase) default: + cfg.Database.MySQL = config.MySQL{} // omit defaults return } } @@ -337,59 +344,3 @@ func setS3Config() { cfg.S3.KeypairsV4[accessKey] = secretKey } - -func cmdBuildConfig() { - if _, err := os.Stat("renterd.yml"); err == nil { - if !promptYesNo("renterd.yml already exists. Would you like to overwrite it?") { - return - } - } - - fmt.Println("") - if cfg.Seed != "" { - fmt.Println(wrapANSI("\033[33m", "A wallet seed phrase is already set.", "\033[0m")) - fmt.Println("If you change your wallet seed phrase, your renter will not be able to access Siacoin associated with this wallet.") - fmt.Println("Ensure that you have backed up your wallet seed phrase before continuing.") - if promptYesNo("Would you like to change your wallet seed phrase?") { - setSeedPhrase() - } - } else { - setSeedPhrase() - } - - fmt.Println("") - if cfg.HTTP.Password != "" { - fmt.Println(wrapANSI("\033[33m", "An admin password is already set.", "\033[0m")) - fmt.Println("If you change your admin password, you will need to update any scripts or applications that use the admin API.") - if promptYesNo("Would you like to change your admin password?") { - setAPIPassword() - } - } else { - setAPIPassword() - } - - fmt.Println("") - setS3Config() - - fmt.Println("") - setAdvancedConfig() - - // write the config file - configPath := "renterd.yml" - if str := os.Getenv("RENTERD_CONFIG_FILE"); str != "" { - configPath = str - } - - f, err := os.Create(configPath) - if err != nil { - stdoutFatalError("Failed to create config file: " + err.Error()) - return - } - defer f.Close() - - enc := yaml.NewEncoder(f) - if err := enc.Encode(cfg); err != nil { - stdoutFatalError("Failed to encode config file: " + err.Error()) - return - } -} diff --git a/cmd/renterd/main.go b/cmd/renterd/main.go index c569b304b..d0e75d680 100644 --- a/cmd/renterd/main.go +++ b/cmd/renterd/main.go @@ -28,16 +28,13 @@ import ( "go.sia.tech/renterd/config" "go.sia.tech/renterd/internal/node" "go.sia.tech/renterd/internal/utils" - "go.sia.tech/renterd/stores" + iworker "go.sia.tech/renterd/internal/worker" "go.sia.tech/renterd/worker" "go.sia.tech/renterd/worker/s3" "go.sia.tech/web/renterd" "go.uber.org/zap" "golang.org/x/sys/cpu" - "golang.org/x/term" "gopkg.in/yaml.v3" - "gorm.io/gorm/logger" - "moul.io/zapgorm2" ) const ( @@ -61,10 +58,17 @@ There are 3 ways to configure renterd (sorted from lowest to highest precedence) - CLI flags - Environment variables +Usage: +` + // usageFooter is the footer for the CLI usage text. + usageFooter = ` +There are 3 commands: + - version: prints the network as well as build information + - config: builds a YAML config file through a series of prompts + - seed: generates a new seed and prints the recovery phrase + See the documentation (https://docs.sia.tech/) for more information and examples on how to configure and use renterd. - -Usage: ` ) @@ -80,8 +84,8 @@ var ( ShutdownTimeout: 5 * time.Minute, Database: config.Database{ MySQL: config.MySQL{ - Database: "renterd", User: "renterd", + Database: "renterd", MetricsDatabase: "renterd_metrics", }, }, @@ -149,20 +153,6 @@ var ( disableStdin bool ) -func mustLoadAPIPassword() { - if cfg.HTTP.Password != "" { - return - } - - fmt.Print("Enter API password: ") - pw, err := term.ReadPassword(int(os.Stdin.Fd())) - fmt.Println() - if err != nil { - log.Fatal(err) - } - cfg.HTTP.Password = string(pw) -} - func mustParseWorkers(workers, password string) { if workers == "" { return @@ -282,6 +272,7 @@ func main() { // worker flag.BoolVar(&cfg.Worker.AllowPrivateIPs, "worker.allowPrivateIPs", cfg.Worker.AllowPrivateIPs, "Allows hosts with private IPs") flag.DurationVar(&cfg.Worker.BusFlushInterval, "worker.busFlushInterval", cfg.Worker.BusFlushInterval, "Interval for flushing data to bus") + flag.Uint64Var(&cfg.Worker.DownloadMaxMemory, "worker.downloadMaxMemory", cfg.Worker.DownloadMaxMemory, "Max amount of RAM the worker allocates for slabs when downloading (overrides with RENTERD_WORKER_DOWNLOAD_MAX_MEMORY)") flag.Uint64Var(&cfg.Worker.DownloadMaxOverdrive, "worker.downloadMaxOverdrive", cfg.Worker.DownloadMaxOverdrive, "Max overdrive workers for downloads") flag.StringVar(&cfg.Worker.ID, "worker.id", cfg.Worker.ID, "Unique ID for worker (overrides with RENTERD_WORKER_ID)") flag.DurationVar(&cfg.Worker.DownloadOverdriveTimeout, "worker.downloadOverdriveTimeout", cfg.Worker.DownloadOverdriveTimeout, "Timeout for overdriving slab downloads") @@ -290,6 +281,7 @@ func main() { flag.DurationVar(&cfg.Worker.UploadOverdriveTimeout, "worker.uploadOverdriveTimeout", cfg.Worker.UploadOverdriveTimeout, "Timeout for overdriving slab uploads") flag.BoolVar(&cfg.Worker.Enabled, "worker.enabled", cfg.Worker.Enabled, "Enables/disables worker (overrides with RENTERD_WORKER_ENABLED)") flag.BoolVar(&cfg.Worker.AllowUnauthenticatedDownloads, "worker.unauthenticatedDownloads", cfg.Worker.AllowUnauthenticatedDownloads, "Allows unauthenticated downloads (overrides with RENTERD_WORKER_UNAUTHENTICATED_DOWNLOADS)") + flag.StringVar(&cfg.Worker.ExternalAddress, "worker.externalAddress", cfg.Worker.ExternalAddress, "Address of the worker on the network, only necessary when the bus is remote (overrides with RENTERD_WORKER_EXTERNAL_ADDR)") // autopilot flag.DurationVar(&cfg.Autopilot.AccountsRefillInterval, "autopilot.accountRefillInterval", cfg.Autopilot.AccountsRefillInterval, "Interval for refilling workers' account balances") @@ -304,35 +296,28 @@ func main() { flag.DurationVar(&cfg.ShutdownTimeout, "node.shutdownTimeout", cfg.ShutdownTimeout, "Timeout for node shutdown") // s3 + var hostBasesStr string flag.StringVar(&cfg.S3.Address, "s3.address", cfg.S3.Address, "Address for serving S3 API (overrides with RENTERD_S3_ADDRESS)") flag.BoolVar(&cfg.S3.DisableAuth, "s3.disableAuth", cfg.S3.DisableAuth, "Disables authentication for S3 API (overrides with RENTERD_S3_DISABLE_AUTH)") flag.BoolVar(&cfg.S3.Enabled, "s3.enabled", cfg.S3.Enabled, "Enables/disables S3 API (requires worker.enabled to be 'true', overrides with RENTERD_S3_ENABLED)") - flag.BoolVar(&cfg.S3.HostBucketEnabled, "s3.hostBucketEnabled", cfg.S3.HostBucketEnabled, "Enables bucket rewriting in the router (overrides with RENTERD_S3_HOST_BUCKET_ENABLED)") + flag.StringVar(&hostBasesStr, "s3.hostBases", "", "Enables bucket rewriting in the router for specific hosts provided via comma-separated list (overrides with RENTERD_S3_HOST_BUCKET_BASES)") + flag.BoolVar(&cfg.S3.HostBucketEnabled, "s3.hostBucketEnabled", cfg.S3.HostBucketEnabled, "Enables bucket rewriting in the router for all hosts (overrides with RENTERD_S3_HOST_BUCKET_ENABLED)") // custom usage flag.Usage = func() { log.Print(usageHeader) flag.PrintDefaults() + log.Print(usageFooter) } flag.Parse() + // NOTE: update the usage header when adding new commands if flag.Arg(0) == "version" { - fmt.Println("renterd", build.Version()) - fmt.Println("Network", build.NetworkName()) - log.Println("Commit:", build.Commit()) - log.Println("Build Date:", build.BuildTime()) + cmdVersion() return } else if flag.Arg(0) == "seed" { - var seed [32]byte - phrase := wallet.NewSeedPhrase() - if err := wallet.SeedFromPhrase(&seed, phrase); err != nil { - println(err.Error()) - os.Exit(1) - } - key := wallet.KeyFromSeed(&seed, 0) - fmt.Println("Recovery Phrase:", phrase) - fmt.Println("Address", types.StandardUnlockHash(key.PublicKey())) + cmdSeed() return } else if flag.Arg(0) == "config" { cmdBuildConfig() @@ -343,8 +328,6 @@ func main() { } // Overwrite flags from environment if set. - parseEnvVar("RENTERD_LOG_PATH", &cfg.Log.Path) - parseEnvVar("RENTERD_BUS_REMOTE_ADDR", &cfg.Bus.RemoteAddr) parseEnvVar("RENTERD_BUS_API_PASSWORD", &cfg.Bus.RemotePassword) parseEnvVar("RENTERD_BUS_GATEWAY_ADDR", &cfg.Bus.GatewayAddr) @@ -360,14 +343,12 @@ func main() { parseEnvVar("RENTERD_DB_LOGGER_LOG_LEVEL", &cfg.Log.Level) parseEnvVar("RENTERD_DB_LOGGER_SLOW_THRESHOLD", &cfg.Database.Log.SlowThreshold) - var depWorkerRemotePassStr string - var depWorkerRemoteAddrsStr string - parseEnvVar("RENTERD_WORKER_REMOTE_ADDRS", &depWorkerRemoteAddrsStr) - parseEnvVar("RENTERD_WORKER_API_PASSWORD", &depWorkerRemotePassStr) parseEnvVar("RENTERD_WORKER_ENABLED", &cfg.Worker.Enabled) parseEnvVar("RENTERD_WORKER_ID", &cfg.Worker.ID) parseEnvVar("RENTERD_WORKER_UNAUTHENTICATED_DOWNLOADS", &cfg.Worker.AllowUnauthenticatedDownloads) + parseEnvVar("RENTERD_WORKER_DOWNLOAD_MAX_MEMORY", &cfg.Worker.DownloadMaxMemory) parseEnvVar("RENTERD_WORKER_UPLOAD_MAX_MEMORY", &cfg.Worker.UploadMaxMemory) + parseEnvVar("RENTERD_WORKER_EXTERNAL_ADDR", &cfg.Worker.ExternalAddress) parseEnvVar("RENTERD_AUTOPILOT_ENABLED", &cfg.Autopilot.Enabled) parseEnvVar("RENTERD_AUTOPILOT_REVISION_BROADCAST_INTERVAL", &cfg.Autopilot.RevisionBroadcastInterval) @@ -377,7 +358,9 @@ func main() { parseEnvVar("RENTERD_S3_ENABLED", &cfg.S3.Enabled) parseEnvVar("RENTERD_S3_DISABLE_AUTH", &cfg.S3.DisableAuth) parseEnvVar("RENTERD_S3_HOST_BUCKET_ENABLED", &cfg.S3.HostBucketEnabled) + parseEnvVar("RENTERD_S3_HOST_BUCKET_BASES", &cfg.S3.HostBucketBases) + parseEnvVar("RENTERD_LOG_PATH", &cfg.Log.Path) parseEnvVar("RENTERD_LOG_LEVEL", &cfg.Log.Level) parseEnvVar("RENTERD_LOG_FILE_ENABLED", &cfg.Log.File.Enabled) parseEnvVar("RENTERD_LOG_FILE_FORMAT", &cfg.Log.File.Format) @@ -390,17 +373,38 @@ func main() { parseEnvVar("RENTERD_LOG_DATABASE_IGNORE_RECORD_NOT_FOUND_ERROR", &cfg.Log.Database.IgnoreRecordNotFoundError) parseEnvVar("RENTERD_LOG_DATABASE_SLOW_THRESHOLD", &cfg.Log.Database.SlowThreshold) + // parse remotes + var workerRemotePassStr string + var workerRemoteAddrsStr string + parseEnvVar("RENTERD_WORKER_REMOTE_ADDRS", &workerRemoteAddrsStr) + parseEnvVar("RENTERD_WORKER_API_PASSWORD", &workerRemotePassStr) + if workerRemoteAddrsStr != "" && workerRemotePassStr != "" { + mustParseWorkers(workerRemoteAddrsStr, workerRemotePassStr) + } + + // disable worker if remotes are set + if len(cfg.Worker.Remotes) > 0 { + cfg.Worker.Enabled = false + } + + // combine host bucket bases + for _, base := range strings.Split(hostBasesStr, ",") { + if trimmed := strings.TrimSpace(base); trimmed != "" { + cfg.S3.HostBucketBases = append(cfg.S3.HostBucketBases, base) + } + } + // check that the API password is set if cfg.HTTP.Password == "" { if disableStdin { stdoutFatalError("API password must be set via environment variable or config file when --env flag is set") return } - setAPIPassword() } + setAPIPassword() // check that the seed is set - if cfg.Seed == "" { + if cfg.Seed == "" && (cfg.Worker.Enabled || cfg.Bus.RemoteAddr == "") { // only worker & bus require a seed if disableStdin { stdoutFatalError("Seed must be set via environment variable or config file when --env flag is set") return @@ -408,12 +412,17 @@ func main() { setSeedPhrase() } - var rawSeed [32]byte - if err := wallet.SeedFromPhrase(&rawSeed, cfg.Seed); err != nil { - log.Fatal("failed to load wallet", zap.Error(err)) + // generate private key from seed + var pk types.PrivateKey + if cfg.Seed != "" { + var rawSeed [32]byte + if err := wallet.SeedFromPhrase(&rawSeed, cfg.Seed); err != nil { + log.Fatal("failed to load wallet", zap.Error(err)) + } + pk = wallet.KeyFromSeed(&rawSeed, 0) } - seed := wallet.KeyFromSeed(&rawSeed, 0) + // parse S3 auth keys if cfg.S3.Enabled { var keyPairsV4 string parseEnvVar("RENTERD_S3_KEYPAIRS_V4", &keyPairsV4) @@ -426,58 +435,7 @@ func main() { } } - mustLoadAPIPassword() - if depWorkerRemoteAddrsStr != "" && depWorkerRemotePassStr != "" { - mustParseWorkers(depWorkerRemoteAddrsStr, depWorkerRemotePassStr) - } - - network, _ := build.Network() - busCfg := node.BusConfig{ - Bus: cfg.Bus, - Network: network, - SlabPruningInterval: time.Hour, - } - // Init db dialector - if cfg.Database.MySQL.URI != "" { - busCfg.DBDialector = stores.NewMySQLConnection( - cfg.Database.MySQL.User, - cfg.Database.MySQL.Password, - cfg.Database.MySQL.URI, - cfg.Database.MySQL.Database, - ) - busCfg.DBMetricsDialector = stores.NewMySQLConnection( - cfg.Database.MySQL.User, - cfg.Database.MySQL.Password, - cfg.Database.MySQL.URI, - cfg.Database.MySQL.MetricsDatabase, - ) - } - - // Log level for db - lvlStr := cfg.Log.Level - if cfg.Log.Database.Level != "" { - lvlStr = cfg.Log.Database.Level - } - var level logger.LogLevel - switch strings.ToLower(lvlStr) { - case "": - level = logger.Warn // default to 'warn' if not set - case "error": - level = logger.Error - case "warn": - level = logger.Warn - case "info": - level = logger.Info - case "debug": - level = logger.Info - default: - log.Fatalf("invalid log level %q, options are: silent, error, warn, info", cfg.Log.Level) - } - if !cfg.Log.Database.Enabled { - level = logger.Silent - } - - // Create logger. + // create logger if cfg.Log.Level == "" { cfg.Log.Level = "info" // default to 'info' if not set } @@ -492,30 +450,32 @@ func main() { logger.Warn("renterd is running on a system without AVX2 support, performance may be degraded") } - // configure database logger - dbLogCfg := cfg.Log.Database - if cfg.Database.Log != (config.DatabaseLog{}) { - dbLogCfg = cfg.Database.Log + if cfg.Log.Database.Level == "" { + cfg.Log.Database.Level = cfg.Log.Level } - busCfg.DBLogger = zapgorm2.Logger{ - ZapLogger: logger.Named("SQL"), - LogLevel: level, - SlowThreshold: dbLogCfg.SlowThreshold, - SkipCallerLookup: false, - IgnoreRecordNotFoundError: dbLogCfg.IgnoreRecordNotFoundError, - Context: nil, + + network, _ := build.Network() + busCfg := node.BusConfig{ + Bus: cfg.Bus, + Database: cfg.Database, + DatabaseLog: cfg.Log.Database, + Logger: logger, + Network: network, } - type shutdownFn struct { + type shutdownFnEntry struct { name string fn func(context.Context) error } - var shutdownFns []shutdownFn + var shutdownFns []shutdownFnEntry - if cfg.Bus.RemoteAddr != "" && len(cfg.Worker.Remotes) != 0 && !cfg.Autopilot.Enabled { + if cfg.Bus.RemoteAddr != "" && !cfg.Worker.Enabled && !cfg.Autopilot.Enabled { logger.Fatal("remote bus, remote worker, and no autopilot -- nothing to do!") } - if len(cfg.Worker.Remotes) == 0 && !cfg.Worker.Enabled && cfg.Autopilot.Enabled { + if cfg.Worker.Enabled && cfg.Bus.RemoteAddr != "" && cfg.Worker.ExternalAddress == "" { + logger.Fatal("can't enable the worker using a remote bus, without configuring the worker's external address") + } + if cfg.Autopilot.Enabled && !cfg.Worker.Enabled && len(cfg.Worker.Remotes) == 0 { logger.Fatal("can't enable autopilot without providing either workers to connect to or creating a worker") } @@ -530,13 +490,13 @@ func main() { cfg.HTTP.Address = "http://" + l.Addr().String() auth := jape.BasicAuth(cfg.HTTP.Password) - mux := &treeMux{ - sub: make(map[string]treeMux), + mux := &utils.TreeMux{ + Sub: make(map[string]utils.TreeMux), } // Create the webserver. srv := &http.Server{Handler: mux} - shutdownFns = append(shutdownFns, shutdownFn{ + shutdownFns = append(shutdownFns, shutdownFnEntry{ name: "HTTP Server", fn: srv.Shutdown, }) @@ -546,22 +506,24 @@ func main() { } busAddr, busPassword := cfg.Bus.RemoteAddr, cfg.Bus.RemotePassword + setupBusFn := node.NoopFn if cfg.Bus.RemoteAddr == "" { - b, fn, err := node.NewBus(busCfg, cfg.Directory, seed, logger) + b, setupFn, shutdownFn, err := node.NewBus(busCfg, cfg.Directory, pk, logger) if err != nil { logger.Fatal("failed to create bus, err: " + err.Error()) } - shutdownFns = append(shutdownFns, shutdownFn{ + setupBusFn = setupFn + shutdownFns = append(shutdownFns, shutdownFnEntry{ name: "Bus", - fn: fn, + fn: shutdownFn, }) - mux.sub["/api/bus"] = treeMux{h: auth(b)} + mux.Sub["/api/bus"] = utils.TreeMux{Handler: auth(b)} busAddr = cfg.HTTP.Address + "/api/bus" busPassword = cfg.HTTP.Password // only serve the UI if a bus is created - mux.h = renterd.Handler() + mux.Handler = renterd.Handler() } else { logger.Info("connecting to remote bus at " + busAddr) } @@ -570,22 +532,34 @@ func main() { var s3Srv *http.Server var s3Listener net.Listener var workers []autopilot.Worker + setupWorkerFn := node.NoopFn if len(cfg.Worker.Remotes) == 0 { if cfg.Worker.Enabled { - w, s3Handler, fn, err := node.NewWorker(cfg.Worker, s3.Opts{ + workerAddr := cfg.HTTP.Address + "/api/worker" + var shutdownFn node.ShutdownFn + w, s3Handler, setupFn, shutdownFn, err := node.NewWorker(cfg.Worker, s3.Opts{ AuthDisabled: cfg.S3.DisableAuth, + HostBucketBases: cfg.S3.HostBucketBases, HostBucketEnabled: cfg.S3.HostBucketEnabled, - }, bc, seed, logger) + }, bc, pk, logger) if err != nil { logger.Fatal("failed to create worker: " + err.Error()) } - shutdownFns = append(shutdownFns, shutdownFn{ + var workerExternAddr string + if cfg.Bus.RemoteAddr != "" { + workerExternAddr = cfg.Worker.ExternalAddress + } else { + workerExternAddr = workerAddr + } + setupWorkerFn = func(ctx context.Context) error { + return setupFn(ctx, workerExternAddr, cfg.HTTP.Password) + } + shutdownFns = append(shutdownFns, shutdownFnEntry{ name: "Worker", - fn: fn, + fn: shutdownFn, }) - mux.sub["/api/worker"] = treeMux{h: workerAuth(cfg.HTTP.Password, cfg.Worker.AllowUnauthenticatedDownloads)(w)} - workerAddr := cfg.HTTP.Address + "/api/worker" + mux.Sub["/api/worker"] = utils.TreeMux{Handler: iworker.Auth(cfg.HTTP.Password, cfg.Worker.AllowUnauthenticatedDownloads)(w)} wc := worker.NewClient(workerAddr, cfg.HTTP.Password) workers = append(workers, wc) @@ -598,7 +572,7 @@ func main() { if err != nil { logger.Fatal("failed to create listener: " + err.Error()) } - shutdownFns = append(shutdownFns, shutdownFn{ + shutdownFns = append(shutdownFns, shutdownFnEntry{ name: "S3", fn: s3Srv.Shutdown, }) @@ -624,18 +598,28 @@ func main() { } // NOTE: the autopilot shutdown function needs to be called first. - shutdownFns = append(shutdownFns, shutdownFn{ + shutdownFns = append(shutdownFns, shutdownFnEntry{ name: "Autopilot", fn: fn, }) go func() { autopilotErr <- runFn() }() - mux.sub["/api/autopilot"] = treeMux{h: auth(ap)} + mux.Sub["/api/autopilot"] = utils.TreeMux{Handler: auth(ap)} } // Start server. go srv.Serve(l) + // Finish bus setup. + if err := setupBusFn(context.Background()); err != nil { + logger.Fatal("failed to setup bus: " + err.Error()) + } + + // Finish worker setup. + if err := setupWorkerFn(context.Background()); err != nil { + logger.Fatal("failed to setup worker: " + err.Error()) + } + // Set initial S3 keys. if cfg.S3.Enabled && !cfg.S3.DisableAuth { as, err := bc.S3AuthenticationSettings(context.Background()) @@ -796,15 +780,3 @@ func runCompatMigrateAutopilotJSONToStore(bc *bus.Client, id, dir string) (err e return nil } - -func workerAuth(password string, unauthenticatedDownloads bool) func(http.Handler) http.Handler { - return func(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - if unauthenticatedDownloads && req.Method == http.MethodGet && strings.HasPrefix(req.URL.Path, "/objects/") { - h.ServeHTTP(w, req) - } else { - jape.BasicAuth(password)(h).ServeHTTP(w, req) - } - }) - } -} diff --git a/config/config.go b/config/config.go index 4a058a62d..5c0f9dc87 100644 --- a/config/config.go +++ b/config/config.go @@ -1,6 +1,7 @@ package config import ( + "os" "time" ) @@ -80,7 +81,13 @@ type ( Database DatabaseLog `yaml:"database,omitempty"` } - // MySQL contains the configuration for an optional MySQL database. + // SQLite contains the configuration for a SQLite database. + SQLite struct { + Database string `yaml:"database,omitempty"` + MetricsDatabase string `yaml:"metricsDatabase,omitempty"` + } + + // MySQL contains the configuration for a MySQL database. MySQL struct { URI string `yaml:"uri,omitempty"` User string `yaml:"user,omitempty"` @@ -98,8 +105,9 @@ type ( Address string `yaml:"address,omitempty"` DisableAuth bool `yaml:"disableAuth,omitempty"` Enabled bool `yaml:"enabled,omitempty"` - KeypairsV4 map[string]string `yaml:"keypairsV4,omitempty"` + KeypairsV4 map[string]string `yaml:"keypairsV4,omitempty"` // deprecated. included for compatibility. HostBucketEnabled bool `yaml:"hostBucketEnabled,omitempty"` + HostBucketBases []string `yaml:"hostBucketBases,omitempty"` } // Worker contains the configuration for a worker. @@ -117,6 +125,7 @@ type ( UploadMaxMemory uint64 `yaml:"uploadMaxMemory,omitempty"` UploadMaxOverdrive uint64 `yaml:"uploadMaxOverdrive,omitempty"` AllowUnauthenticatedDownloads bool `yaml:"allowUnauthenticatedDownloads,omitempty"` + ExternalAddress string `yaml:"externalAddress,omitempty"` } // Autopilot contains the configuration for an autopilot. @@ -133,3 +142,13 @@ type ( MigratorParallelSlabsPerWorker uint64 `yaml:"migratorParallelSlabsPerWorker,omitempty"` } ) + +func MySQLConfigFromEnv() MySQL { + return MySQL{ + URI: os.Getenv("RENTERD_DB_URI"), + User: os.Getenv("RENTERD_DB_USER"), + Password: os.Getenv("RENTERD_DB_PASSWORD"), + Database: os.Getenv("RENTERD_DB_NAME"), + MetricsDatabase: os.Getenv("RENTERD_DB_METRICS_NAME"), + } +} diff --git a/go.mod b/go.mod index 216b80b25..d5c2d8aed 100644 --- a/go.mod +++ b/go.mod @@ -1,69 +1,64 @@ module go.sia.tech/renterd -go 1.21.7 +go 1.21.8 -toolchain go1.21.8 +toolchain go1.22.3 require ( - github.com/gabriel-vasile/mimetype v1.4.3 - github.com/go-gormigrate/gormigrate/v2 v2.1.2 + github.com/gabriel-vasile/mimetype v1.4.4 github.com/google/go-cmp v0.6.0 - github.com/gotd/contrib v0.19.0 + github.com/gotd/contrib v0.20.0 github.com/klauspost/reedsolomon v1.12.1 - github.com/minio/minio-go/v7 v7.0.69 + github.com/minio/minio-go/v7 v7.0.72 github.com/montanaflynn/stats v0.7.1 + github.com/shopspring/decimal v1.4.0 gitlab.com/NebulousLabs/encoding v0.0.0-20200604091946-456c3dc907fe - go.sia.tech/core v0.2.2 - go.sia.tech/coreutils v0.0.3 - go.sia.tech/gofakes3 v0.0.2 - go.sia.tech/hostd v1.0.4 - go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640 + go.sia.tech/core v0.3.0 + go.sia.tech/coreutils v0.1.0 + go.sia.tech/gofakes3 v0.0.4 + go.sia.tech/hostd v1.1.1-beta.1.0.20240618072747-b3f430b4d272 + go.sia.tech/jape v0.11.2-0.20240306154058-9832414a5385 go.sia.tech/mux v1.2.0 go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca - go.sia.tech/web/renterd v0.51.2 + go.sia.tech/web/renterd v0.55.0 go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.22.0 - golang.org/x/sys v0.19.0 - golang.org/x/term v0.19.0 + golang.org/x/crypto v0.24.0 + golang.org/x/sys v0.21.0 + golang.org/x/term v0.21.0 gopkg.in/yaml.v3 v3.0.1 - gorm.io/driver/mysql v1.5.6 - gorm.io/driver/sqlite v1.5.5 - gorm.io/gorm v1.25.9 + gorm.io/driver/mysql v1.5.7 + gorm.io/driver/sqlite v1.5.6 + gorm.io/gorm v1.25.10 lukechampine.com/frand v1.4.2 moul.io/zapgorm2 v1.3.0 ) require ( github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da // indirect - github.com/aws/aws-sdk-go v1.51.21 // indirect - github.com/cloudflare/cloudflare-go v0.91.0 // indirect + github.com/aws/aws-sdk-go v1.54.6 // indirect + github.com/cloudflare/cloudflare-go v0.97.0 // indirect github.com/dchest/threefish v0.0.0-20120919164726-3ecf4c494abf // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/go-sql-driver/mysql v1.7.1 // indirect - github.com/goccy/go-json v0.10.2 // indirect + github.com/goccy/go-json v0.10.3 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/gorilla/websocket v1.5.1 // indirect + github.com/gorilla/websocket v1.5.2 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-retryablehttp v0.7.5 // indirect + github.com/hashicorp/go-retryablehttp v0.7.7 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect - github.com/klauspost/compress v1.17.6 // indirect - github.com/klauspost/cpuid/v2 v2.2.6 // indirect + github.com/klauspost/compress v1.17.7 // indirect + github.com/klauspost/cpuid/v2 v2.2.8 // indirect github.com/mattn/go-sqlite3 v1.14.22 // indirect github.com/minio/md5-simd v1.1.2 // indirect - github.com/minio/sha256-simd v1.0.1 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/rs/xid v1.5.0 // indirect github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df // indirect - github.com/shopspring/decimal v1.3.1 // indirect gitlab.com/NebulousLabs/bolt v1.4.4 // indirect gitlab.com/NebulousLabs/demotemutex v0.0.0-20151003192217-235395f71c40 // indirect gitlab.com/NebulousLabs/entropy-mnemonics v0.0.0-20181018051301-7532f67e3500 // indirect @@ -77,12 +72,12 @@ require ( gitlab.com/NebulousLabs/ratelimit v0.0.0-20200811080431-99b8f0768b2e // indirect gitlab.com/NebulousLabs/siamux v0.0.2-0.20220630142132-142a1443a259 // indirect gitlab.com/NebulousLabs/threadgroup v0.0.0-20200608151952-38921fbef213 // indirect - go.sia.tech/web v0.0.0-20240422221546-c1709d16b6ef // indirect + go.sia.tech/web v0.0.0-20240610131903-5611d44a533e // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/net v0.24.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/net v0.26.0 // indirect + golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.20.0 // indirect + golang.org/x/tools v0.22.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect - nhooyr.io/websocket v1.8.10 // indirect + nhooyr.io/websocket v1.8.11 // indirect ) diff --git a/go.sum b/go.sum index 3712ab233..6c9e2f318 100644 --- a/go.sum +++ b/go.sum @@ -1,1203 +1,28 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= -cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= -cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= -cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= -cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= -cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= -cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw= -cloud.google.com/go v0.110.4/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= -cloud.google.com/go v0.110.6/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= -cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= -cloud.google.com/go v0.110.8/go.mod h1:Iz8AkXJf1qmxC3Oxoep8R1T36w8B92yU29PcBhHO5fk= -cloud.google.com/go v0.110.9/go.mod h1:rpxevX/0Lqvlbc88b7Sc1SPNdyK1riNBTUU6JXhYNpM= -cloud.google.com/go v0.110.10/go.mod h1:v1OoFqYxiBkUrruItNM3eT4lLByNjxmJSV/xDKJNnic= -cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= -cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= -cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= -cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= -cloud.google.com/go/accessapproval v1.7.2/go.mod h1:/gShiq9/kK/h8T/eEn1BTzalDvk0mZxJlhfw0p+Xuc0= -cloud.google.com/go/accessapproval v1.7.3/go.mod h1:4l8+pwIxGTNqSf4T3ds8nLO94NQf0W/KnMNuQ9PbnP8= -cloud.google.com/go/accessapproval v1.7.4/go.mod h1:/aTEh45LzplQgFYdQdwPMR9YdX0UlhBmvB84uAmQKUc= -cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= -cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= -cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= -cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= -cloud.google.com/go/accesscontextmanager v1.8.0/go.mod h1:uI+AI/r1oyWK99NN8cQ3UK76AMelMzgZCvJfsi2c+ps= -cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= -cloud.google.com/go/accesscontextmanager v1.8.2/go.mod h1:E6/SCRM30elQJ2PKtFMs2YhfJpZSNcJyejhuzoId4Zk= -cloud.google.com/go/accesscontextmanager v1.8.3/go.mod h1:4i/JkF2JiFbhLnnpnfoTX5vRXfhf9ukhU1ANOTALTOQ= -cloud.google.com/go/accesscontextmanager v1.8.4/go.mod h1:ParU+WbMpD34s5JFEnGAnPBYAgUHozaTmDJU7aCU9+M= -cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= -cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= -cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= -cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= -cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k= -cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= -cloud.google.com/go/aiplatform v1.45.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= -cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= -cloud.google.com/go/aiplatform v1.50.0/go.mod h1:IRc2b8XAMTa9ZmfJV1BCCQbieWWvDnP1A8znyz5N7y4= -cloud.google.com/go/aiplatform v1.51.0/go.mod h1:IRc2b8XAMTa9ZmfJV1BCCQbieWWvDnP1A8znyz5N7y4= -cloud.google.com/go/aiplatform v1.51.1/go.mod h1:kY3nIMAVQOK2XDqDPHaOuD9e+FdMA6OOpfBjsvaFSOo= -cloud.google.com/go/aiplatform v1.51.2/go.mod h1:hCqVYB3mY45w99TmetEoe8eCQEwZEp9WHxeZdcv9phw= -cloud.google.com/go/aiplatform v1.52.0/go.mod h1:pwZMGvqe0JRkI1GWSZCtnAfrR4K1bv65IHILGA//VEU= -cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= -cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= -cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M= -cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= -cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= -cloud.google.com/go/analytics v0.21.2/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= -cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= -cloud.google.com/go/analytics v0.21.4/go.mod h1:zZgNCxLCy8b2rKKVfC1YkC2vTrpfZmeRCySM3aUbskA= -cloud.google.com/go/analytics v0.21.5/go.mod h1:BQtOBHWTlJ96axpPPnw5CvGJ6i3Ve/qX2fTxR8qWyr8= -cloud.google.com/go/analytics v0.21.6/go.mod h1:eiROFQKosh4hMaNhF85Oc9WO97Cpa7RggD40e/RBy8w= -cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= -cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= -cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= -cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= -cloud.google.com/go/apigateway v1.6.2/go.mod h1:CwMC90nnZElorCW63P2pAYm25AtQrHfuOkbRSHj0bT8= -cloud.google.com/go/apigateway v1.6.3/go.mod h1:k68PXWpEs6BVDTtnLQAyG606Q3mz8pshItwPXjgv44Y= -cloud.google.com/go/apigateway v1.6.4/go.mod h1:0EpJlVGH5HwAN4VF4Iec8TAzGN1aQgbxAWGJsnPCGGY= -cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= -cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= -cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= -cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= -cloud.google.com/go/apigeeconnect v1.6.2/go.mod h1:s6O0CgXT9RgAxlq3DLXvG8riw8PYYbU/v25jqP3Dy18= -cloud.google.com/go/apigeeconnect v1.6.3/go.mod h1:peG0HFQ0si2bN15M6QSjEW/W7Gy3NYkWGz7pFz13cbo= -cloud.google.com/go/apigeeconnect v1.6.4/go.mod h1:CapQCWZ8TCjnU0d7PobxhpOdVz/OVJ2Hr/Zcuu1xFx0= -cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= -cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= -cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= -cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= -cloud.google.com/go/apigeeregistry v0.7.2/go.mod h1:9CA2B2+TGsPKtfi3F7/1ncCCsL62NXBRfM6iPoGSM+8= -cloud.google.com/go/apigeeregistry v0.8.1/go.mod h1:MW4ig1N4JZQsXmBSwH4rwpgDonocz7FPBSw6XPGHmYw= -cloud.google.com/go/apigeeregistry v0.8.2/go.mod h1:h4v11TDGdeXJDJvImtgK2AFVvMIgGWjSb0HRnBSjcX8= -cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= -cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= -cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= -cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= -cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= -cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= -cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A= -cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= -cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= -cloud.google.com/go/appengine v1.8.2/go.mod h1:WMeJV9oZ51pvclqFN2PqHoGnys7rK0rz6s3Mp6yMvDo= -cloud.google.com/go/appengine v1.8.3/go.mod h1:2oUPZ1LVZ5EXi+AF1ihNAF+S8JrzQ3till5m9VQkrsk= -cloud.google.com/go/appengine v1.8.4/go.mod h1:TZ24v+wXBujtkK77CXCpjZbnuTvsFNT41MUaZ28D6vg= -cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= -cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= -cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY= -cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= -cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= -cloud.google.com/go/area120 v0.8.2/go.mod h1:a5qfo+x77SRLXnCynFWPUZhnZGeSgvQ+Y0v1kSItkh4= -cloud.google.com/go/area120 v0.8.3/go.mod h1:5zj6pMzVTH+SVHljdSKC35sriR/CVvQZzG/Icdyriw0= -cloud.google.com/go/area120 v0.8.4/go.mod h1:jfawXjxf29wyBXr48+W+GyX/f8fflxp642D/bb9v68M= -cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= -cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= -cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= -cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= -cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI= -cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= -cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI= -cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= -cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= -cloud.google.com/go/artifactregistry v1.14.2/go.mod h1:Xk+QbsKEb0ElmyeMfdHAey41B+qBq3q5R5f5xD4XT3U= -cloud.google.com/go/artifactregistry v1.14.3/go.mod h1:A2/E9GXnsyXl7GUvQ/2CjHA+mVRoWAXC0brg2os+kNI= -cloud.google.com/go/artifactregistry v1.14.4/go.mod h1:SJJcZTMv6ce0LDMUnihCN7WSrI+kBSFV0KIKo8S8aYU= -cloud.google.com/go/artifactregistry v1.14.6/go.mod h1:np9LSFotNWHcjnOgh8UVK0RFPCTUGbO0ve3384xyHfE= -cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= -cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= -cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= -cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= -cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= -cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= -cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg= -cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= -cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= -cloud.google.com/go/asset v1.15.0/go.mod h1:tpKafV6mEut3+vN9ScGvCHXHj7FALFVta+okxFECHcg= -cloud.google.com/go/asset v1.15.1/go.mod h1:yX/amTvFWRpp5rcFq6XbCxzKT8RJUam1UoboE179jU4= -cloud.google.com/go/asset v1.15.2/go.mod h1:B6H5tclkXvXz7PD22qCA2TDxSVQfasa3iDlM89O2NXs= -cloud.google.com/go/asset v1.15.3/go.mod h1:yYLfUD4wL4X589A9tYrv4rFrba0QlDeag0CMcM5ggXU= -cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= -cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= -cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= -cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= -cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= -cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= -cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= -cloud.google.com/go/assuredworkloads v1.11.2/go.mod h1:O1dfr+oZJMlE6mw0Bp0P1KZSlj5SghMBvTpZqIcUAW4= -cloud.google.com/go/assuredworkloads v1.11.3/go.mod h1:vEjfTKYyRUaIeA0bsGJceFV2JKpVRgyG2op3jfa59Zs= -cloud.google.com/go/assuredworkloads v1.11.4/go.mod h1:4pwwGNwy1RP0m+y12ef3Q/8PaiWrIDQ6nD2E8kvWI9U= -cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= -cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= -cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= -cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= -cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= -cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= -cloud.google.com/go/automl v1.13.2/go.mod h1:gNY/fUmDEN40sP8amAX3MaXkxcqPIn7F1UIIPZpy4Mg= -cloud.google.com/go/automl v1.13.3/go.mod h1:Y8KwvyAZFOsMAPqUCfNu1AyclbC6ivCUF/MTwORymyY= -cloud.google.com/go/automl v1.13.4/go.mod h1:ULqwX/OLZ4hBVfKQaMtxMSTlPx0GqGbWN8uA/1EqCP8= -cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= -cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= -cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= -cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= -cloud.google.com/go/baremetalsolution v1.2.0/go.mod h1:68wi9AwPYkEWIUT4SvSGS9UJwKzNpshjHsH4lzk8iOw= -cloud.google.com/go/baremetalsolution v1.2.1/go.mod h1:3qKpKIw12RPXStwQXcbhfxVj1dqQGEvcmA+SX/mUR88= -cloud.google.com/go/baremetalsolution v1.2.2/go.mod h1:O5V6Uu1vzVelYahKfwEWRMaS3AbCkeYHy3145s1FkhM= -cloud.google.com/go/baremetalsolution v1.2.3/go.mod h1:/UAQ5xG3faDdy180rCUv47e0jvpp3BFxT+Cl0PFjw5g= -cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= -cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= -cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= -cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= -cloud.google.com/go/batch v1.4.1/go.mod h1:KdBmDD61K0ovcxoRHGrN6GmOBWeAOyCgKD0Mugx4Fkk= -cloud.google.com/go/batch v1.5.0/go.mod h1:KdBmDD61K0ovcxoRHGrN6GmOBWeAOyCgKD0Mugx4Fkk= -cloud.google.com/go/batch v1.5.1/go.mod h1:RpBuIYLkQu8+CWDk3dFD/t/jOCGuUpkpX+Y0n1Xccs8= -cloud.google.com/go/batch v1.6.1/go.mod h1:urdpD13zPe6YOK+6iZs/8/x2VBRofvblLpx0t57vM98= -cloud.google.com/go/batch v1.6.3/go.mod h1:J64gD4vsNSA2O5TtDB5AAux3nJ9iV8U3ilg3JDBYejU= -cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= -cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= -cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= -cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= -cloud.google.com/go/beyondcorp v0.6.1/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= -cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= -cloud.google.com/go/beyondcorp v1.0.1/go.mod h1:zl/rWWAFVeV+kx+X2Javly7o1EIQThU4WlkynffL/lk= -cloud.google.com/go/beyondcorp v1.0.2/go.mod h1:m8cpG7caD+5su+1eZr+TSvF6r21NdLJk4f9u4SP2Ntc= -cloud.google.com/go/beyondcorp v1.0.3/go.mod h1:HcBvnEd7eYr+HGDd5ZbuVmBYX019C6CEXBonXbCVwJo= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= -cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= -cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= -cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E= -cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= -cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= -cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= -cloud.google.com/go/bigquery v1.52.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= -cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= -cloud.google.com/go/bigquery v1.55.0/go.mod h1:9Y5I3PN9kQWuid6183JFhOGOW3GcirA5LpsKCUn+2ec= -cloud.google.com/go/bigquery v1.56.0/go.mod h1:KDcsploXTEY7XT3fDQzMUZlpQLHzE4itubHrnmhUrZA= -cloud.google.com/go/bigquery v1.57.1/go.mod h1:iYzC0tGVWt1jqSzBHqCr3lrRn0u13E8e+AqowBsDgug= -cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= -cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= -cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= -cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= -cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= -cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= -cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= -cloud.google.com/go/billing v1.17.0/go.mod h1:Z9+vZXEq+HwH7bhJkyI4OQcR6TSbeMrjlpEjO2vzY64= -cloud.google.com/go/billing v1.17.1/go.mod h1:Z9+vZXEq+HwH7bhJkyI4OQcR6TSbeMrjlpEjO2vzY64= -cloud.google.com/go/billing v1.17.2/go.mod h1:u/AdV/3wr3xoRBk5xvUzYMS1IawOAPwQMuHgHMdljDg= -cloud.google.com/go/billing v1.17.3/go.mod h1:z83AkoZ7mZwBGT3yTnt6rSGI1OOsHSIi6a5M3mJ8NaU= -cloud.google.com/go/billing v1.17.4/go.mod h1:5DOYQStCxquGprqfuid/7haD7th74kyMBHkjO/OvDtk= -cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= -cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= -cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= -cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= -cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= -cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= -cloud.google.com/go/binaryauthorization v1.7.0/go.mod h1:Zn+S6QqTMn6odcMU1zDZCJxPjU2tZPV1oDl45lWY154= -cloud.google.com/go/binaryauthorization v1.7.1/go.mod h1:GTAyfRWYgcbsP3NJogpV3yeunbUIjx2T9xVeYovtURE= -cloud.google.com/go/binaryauthorization v1.7.2/go.mod h1:kFK5fQtxEp97m92ziy+hbu+uKocka1qRRL8MVJIgjv0= -cloud.google.com/go/binaryauthorization v1.7.3/go.mod h1:VQ/nUGRKhrStlGr+8GMS8f6/vznYLkdK5vaKfdCIpvU= -cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= -cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= -cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= -cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= -cloud.google.com/go/certificatemanager v1.7.2/go.mod h1:15SYTDQMd00kdoW0+XY5d9e+JbOPjp24AvF48D8BbcQ= -cloud.google.com/go/certificatemanager v1.7.3/go.mod h1:T/sZYuC30PTag0TLo28VedIRIj1KPGcOQzjWAptHa00= -cloud.google.com/go/certificatemanager v1.7.4/go.mod h1:FHAylPe/6IIKuaRmHbjbdLhGhVQ+CWHSD5Jq0k4+cCE= -cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= -cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= -cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= -cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= -cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= -cloud.google.com/go/channel v1.17.0/go.mod h1:RpbhJsGi/lXWAUM1eF4IbQGbsfVlg2o8Iiy2/YLfVT0= -cloud.google.com/go/channel v1.17.1/go.mod h1:xqfzcOZAcP4b/hUDH0GkGg1Sd5to6di1HOJn/pi5uBQ= -cloud.google.com/go/channel v1.17.2/go.mod h1:aT2LhnftnyfQceFql5I/mP8mIbiiJS4lWqgXA815zMk= -cloud.google.com/go/channel v1.17.3/go.mod h1:QcEBuZLGGrUMm7kNj9IbU1ZfmJq2apotsV83hbxX7eE= -cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= -cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= -cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M= -cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= -cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= -cloud.google.com/go/cloudbuild v1.10.1/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= -cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= -cloud.google.com/go/cloudbuild v1.14.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= -cloud.google.com/go/cloudbuild v1.14.1/go.mod h1:K7wGc/3zfvmYWOWwYTgF/d/UVJhS4pu+HAy7PL7mCsU= -cloud.google.com/go/cloudbuild v1.14.2/go.mod h1:Bn6RO0mBYk8Vlrt+8NLrru7WXlQ9/RDWz2uo5KG1/sg= -cloud.google.com/go/cloudbuild v1.14.3/go.mod h1:eIXYWmRt3UtggLnFGx4JvXcMj4kShhVzGndL1LwleEM= -cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= -cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= -cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= -cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= -cloud.google.com/go/clouddms v1.7.0/go.mod h1:MW1dC6SOtI/tPNCciTsXtsGNEM0i0OccykPvv3hiYeM= -cloud.google.com/go/clouddms v1.7.1/go.mod h1:o4SR8U95+P7gZ/TX+YbJxehOCsM+fe6/brlrFquiszk= -cloud.google.com/go/clouddms v1.7.2/go.mod h1:Rk32TmWmHo64XqDvW7jgkFQet1tUKNVzs7oajtJT3jU= -cloud.google.com/go/clouddms v1.7.3/go.mod h1:fkN2HQQNUYInAU3NQ3vRLkV2iWs8lIdmBKOx4nrL6Hc= -cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= -cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= -cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= -cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= -cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= -cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= -cloud.google.com/go/cloudtasks v1.11.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= -cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= -cloud.google.com/go/cloudtasks v1.12.2/go.mod h1:A7nYkjNlW2gUoROg1kvJrQGhJP/38UaWwsnuBDOBVUk= -cloud.google.com/go/cloudtasks v1.12.3/go.mod h1:GPVXhIOSGEaR+3xT4Fp72ScI+HjHffSS4B8+BaBB5Ys= -cloud.google.com/go/cloudtasks v1.12.4/go.mod h1:BEPu0Gtt2dU6FxZHNqqNdGqIG86qyWKBPGnsb7udGY0= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= -cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= -cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= -cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= -cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= -cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= -cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= -cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= -cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= -cloud.google.com/go/compute v1.19.3/go.mod h1:qxvISKp/gYnXkSAD1ppcSOveRAmzxicEv/JlizULFrI= -cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= -cloud.google.com/go/compute v1.21.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= -cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= -cloud.google.com/go/compute v1.23.1/go.mod h1:CqB3xpmPKKt3OJpW2ndFIXnA9A4xAy/F3Xp1ixncW78= -cloud.google.com/go/compute v1.23.2/go.mod h1:JJ0atRC0J/oWYiiVBmsSsrRnh92DhZPG4hFDcR04Rns= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= -cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= -cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= -cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= -cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= -cloud.google.com/go/contactcenterinsights v1.9.1/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= -cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= -cloud.google.com/go/contactcenterinsights v1.11.0/go.mod h1:hutBdImE4XNZ1NV4vbPJKSFOnQruhC5Lj9bZqWMTKiU= -cloud.google.com/go/contactcenterinsights v1.11.1/go.mod h1:FeNP3Kg8iteKM80lMwSk3zZZKVxr+PGnAId6soKuXwE= -cloud.google.com/go/contactcenterinsights v1.11.2/go.mod h1:A9PIR5ov5cRcd28KlDbmmXE8Aay+Gccer2h4wzkYFso= -cloud.google.com/go/contactcenterinsights v1.11.3/go.mod h1:HHX5wrz5LHVAwfI2smIotQG9x8Qd6gYilaHcLLLmNis= -cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= -cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= -cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= -cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM= -cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= -cloud.google.com/go/container v1.22.1/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= -cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= -cloud.google.com/go/container v1.26.0/go.mod h1:YJCmRet6+6jnYYRS000T6k0D0xUXQgBSaJ7VwI8FBj4= -cloud.google.com/go/container v1.26.1/go.mod h1:5smONjPRUxeEpDG7bMKWfDL4sauswqEtnBK1/KKpR04= -cloud.google.com/go/container v1.26.2/go.mod h1:YlO84xCt5xupVbLaMY4s3XNE79MUJ+49VmkInr6HvF4= -cloud.google.com/go/container v1.27.1/go.mod h1:b1A1gJeTBXVLQ6GGw9/9M4FG94BEGsqJ5+t4d/3N7O4= -cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= -cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= -cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= -cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= -cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= -cloud.google.com/go/containeranalysis v0.11.0/go.mod h1:4n2e99ZwpGxpNcz+YsFT1dfOHPQFGcAC8FN2M2/ne/U= -cloud.google.com/go/containeranalysis v0.11.1/go.mod h1:rYlUOM7nem1OJMKwE1SadufX0JP3wnXj844EtZAwWLY= -cloud.google.com/go/containeranalysis v0.11.2/go.mod h1:xibioGBC1MD2j4reTyV1xY1/MvKaz+fyM9ENWhmIeP8= -cloud.google.com/go/containeranalysis v0.11.3/go.mod h1:kMeST7yWFQMGjiG9K7Eov+fPNQcGhb8mXj/UcTiWw9U= -cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= -cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= -cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= -cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= -cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= -cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= -cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= -cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= -cloud.google.com/go/datacatalog v1.14.0/go.mod h1:h0PrGtlihoutNMp/uvwhawLQ9+c63Kz65UFqh49Yo+E= -cloud.google.com/go/datacatalog v1.14.1/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= -cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= -cloud.google.com/go/datacatalog v1.17.1/go.mod h1:nCSYFHgtxh2MiEktWIz71s/X+7ds/UT9kp0PC7waCzE= -cloud.google.com/go/datacatalog v1.18.0/go.mod h1:nCSYFHgtxh2MiEktWIz71s/X+7ds/UT9kp0PC7waCzE= -cloud.google.com/go/datacatalog v1.18.1/go.mod h1:TzAWaz+ON1tkNr4MOcak8EBHX7wIRX/gZKM+yTVsv+A= -cloud.google.com/go/datacatalog v1.18.2/go.mod h1:SPVgWW2WEMuWHA+fHodYjmxPiMqcOiWfhc9OD5msigk= -cloud.google.com/go/datacatalog v1.18.3/go.mod h1:5FR6ZIF8RZrtml0VUao22FxhdjkoG+a0866rEnObryM= -cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= -cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= -cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= -cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= -cloud.google.com/go/dataflow v0.9.2/go.mod h1:vBfdBZ/ejlTaYIGB3zB4T08UshH70vbtZeMD+urnUSo= -cloud.google.com/go/dataflow v0.9.3/go.mod h1:HI4kMVjcHGTs3jTHW/kv3501YW+eloiJSLxkJa/vqFE= -cloud.google.com/go/dataflow v0.9.4/go.mod h1:4G8vAkHYCSzU8b/kmsoR2lWyHJD85oMJPHMtan40K8w= -cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= -cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= -cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= -cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= -cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= -cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= -cloud.google.com/go/dataform v0.8.2/go.mod h1:X9RIqDs6NbGPLR80tnYoPNiO1w0wenKTb8PxxlhTMKM= -cloud.google.com/go/dataform v0.8.3/go.mod h1:8nI/tvv5Fso0drO3pEjtowz58lodx8MVkdV2q0aPlqg= -cloud.google.com/go/dataform v0.9.1/go.mod h1:pWTg+zGQ7i16pyn0bS1ruqIE91SdL2FDMvEYu/8oQxs= -cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= -cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= -cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= -cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= -cloud.google.com/go/datafusion v1.7.2/go.mod h1:62K2NEC6DRlpNmI43WHMWf9Vg/YvN6QVi8EVwifElI0= -cloud.google.com/go/datafusion v1.7.3/go.mod h1:eoLt1uFXKGBq48jy9LZ+Is8EAVLnmn50lNncLzwYokE= -cloud.google.com/go/datafusion v1.7.4/go.mod h1:BBs78WTOLYkT4GVZIXQCZT3GFpkpDN4aBY4NDX/jVlM= -cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= -cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= -cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= -cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= -cloud.google.com/go/datalabeling v0.8.2/go.mod h1:cyDvGHuJWu9U/cLDA7d8sb9a0tWLEletStu2sTmg3BE= -cloud.google.com/go/datalabeling v0.8.3/go.mod h1:tvPhpGyS/V7lqjmb3V0TaDdGvhzgR1JoW7G2bpi2UTI= -cloud.google.com/go/datalabeling v0.8.4/go.mod h1:Z1z3E6LHtffBGrNUkKwbwbDxTiXEApLzIgmymj8A3S8= -cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= -cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= -cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= -cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= -cloud.google.com/go/dataplex v1.8.1/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= -cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= -cloud.google.com/go/dataplex v1.9.1/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= -cloud.google.com/go/dataplex v1.10.1/go.mod h1:1MzmBv8FvjYfc7vDdxhnLFNskikkB+3vl475/XdCDhs= -cloud.google.com/go/dataplex v1.10.2/go.mod h1:xdC8URdTrCrZMW6keY779ZT1cTOfV8KEPNsw+LTRT1Y= -cloud.google.com/go/dataplex v1.11.1/go.mod h1:mHJYQQ2VEJHsyoC0OdNyy988DvEbPhqFs5OOLffLX0c= -cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= -cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= -cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= -cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= -cloud.google.com/go/dataproc/v2 v2.2.0/go.mod h1:lZR7AQtwZPvmINx5J87DSOOpTfof9LVZju6/Qo4lmcY= -cloud.google.com/go/dataproc/v2 v2.2.1/go.mod h1:QdAJLaBjh+l4PVlVZcmrmhGccosY/omC1qwfQ61Zv/o= -cloud.google.com/go/dataproc/v2 v2.2.2/go.mod h1:aocQywVmQVF4i8CL740rNI/ZRpsaaC1Wh2++BJ7HEJ4= -cloud.google.com/go/dataproc/v2 v2.2.3/go.mod h1:G5R6GBc9r36SXv/RtZIVfB8SipI+xVn0bX5SxUzVYbY= -cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= -cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= -cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= -cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= -cloud.google.com/go/dataqna v0.8.2/go.mod h1:KNEqgx8TTmUipnQsScOoDpq/VlXVptUqVMZnt30WAPs= -cloud.google.com/go/dataqna v0.8.3/go.mod h1:wXNBW2uvc9e7Gl5k8adyAMnLush1KVV6lZUhB+rqNu4= -cloud.google.com/go/dataqna v0.8.4/go.mod h1:mySRKjKg5Lz784P6sCov3p1QD+RZQONRMRjzGNcFd0c= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= -cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= -cloud.google.com/go/datastore v1.12.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= -cloud.google.com/go/datastore v1.12.1/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= -cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= -cloud.google.com/go/datastore v1.14.0/go.mod h1:GAeStMBIt9bPS7jMJA85kgkpsMkvseWWXiaHya9Jes8= -cloud.google.com/go/datastore v1.15.0/go.mod h1:GAeStMBIt9bPS7jMJA85kgkpsMkvseWWXiaHya9Jes8= -cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= -cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= -cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= -cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= -cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= -cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= -cloud.google.com/go/datastream v1.9.1/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= -cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= -cloud.google.com/go/datastream v1.10.1/go.mod h1:7ngSYwnw95YFyTd5tOGBxHlOZiL+OtpjheqU7t2/s/c= -cloud.google.com/go/datastream v1.10.2/go.mod h1:W42TFgKAs/om6x/CdXX5E4oiAsKlH+e8MTGy81zdYt0= -cloud.google.com/go/datastream v1.10.3/go.mod h1:YR0USzgjhqA/Id0Ycu1VvZe8hEWwrkjuXrGbzeDOSEA= -cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= -cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= -cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= -cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= -cloud.google.com/go/deploy v1.11.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= -cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= -cloud.google.com/go/deploy v1.13.1/go.mod h1:8jeadyLkH9qu9xgO3hVWw8jVr29N1mnW42gRJT8GY6g= -cloud.google.com/go/deploy v1.14.1/go.mod h1:N8S0b+aIHSEeSr5ORVoC0+/mOPUysVt8ae4QkZYolAw= -cloud.google.com/go/deploy v1.14.2/go.mod h1:e5XOUI5D+YGldyLNZ21wbp9S8otJbBE4i88PtO9x/2g= -cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= -cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= -cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= -cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= -cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= -cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= -cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= -cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= -cloud.google.com/go/dialogflow v1.38.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= -cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= -cloud.google.com/go/dialogflow v1.43.0/go.mod h1:pDUJdi4elL0MFmt1REMvFkdsUTYSHq+rTCS8wg0S3+M= -cloud.google.com/go/dialogflow v1.44.0/go.mod h1:pDUJdi4elL0MFmt1REMvFkdsUTYSHq+rTCS8wg0S3+M= -cloud.google.com/go/dialogflow v1.44.1/go.mod h1:n/h+/N2ouKOO+rbe/ZnI186xImpqvCVj2DdsWS/0EAk= -cloud.google.com/go/dialogflow v1.44.2/go.mod h1:QzFYndeJhpVPElnFkUXxdlptx0wPnBWLCBT9BvtC3/c= -cloud.google.com/go/dialogflow v1.44.3/go.mod h1:mHly4vU7cPXVweuB5R0zsYKPMzy240aQdAu06SqBbAQ= -cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= -cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= -cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= -cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= -cloud.google.com/go/dlp v1.10.2/go.mod h1:ZbdKIhcnyhILgccwVDzkwqybthh7+MplGC3kZVZsIOQ= -cloud.google.com/go/dlp v1.10.3/go.mod h1:iUaTc/ln8I+QT6Ai5vmuwfw8fqTk2kaz0FvCwhLCom0= -cloud.google.com/go/dlp v1.11.1/go.mod h1:/PA2EnioBeXTL/0hInwgj0rfsQb3lpE3R8XUJxqUNKI= -cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= -cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= -cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= -cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= -cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= -cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= -cloud.google.com/go/documentai v1.20.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= -cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= -cloud.google.com/go/documentai v1.22.1/go.mod h1:LKs22aDHbJv7ufXuPypzRO7rG3ALLJxzdCXDPutw4Qc= -cloud.google.com/go/documentai v1.23.0/go.mod h1:LKs22aDHbJv7ufXuPypzRO7rG3ALLJxzdCXDPutw4Qc= -cloud.google.com/go/documentai v1.23.2/go.mod h1:Q/wcRT+qnuXOpjAkvOV4A+IeQl04q2/ReT7SSbytLSo= -cloud.google.com/go/documentai v1.23.4/go.mod h1:4MYAaEMnADPN1LPN5xboDR5QVB6AgsaxgFdJhitlE2Y= -cloud.google.com/go/documentai v1.23.5/go.mod h1:ghzBsyVTiVdkfKaUCum/9bGBEyBjDO4GfooEcYKhN+g= -cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= -cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= -cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= -cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= -cloud.google.com/go/domains v0.9.2/go.mod h1:3YvXGYzZG1Temjbk7EyGCuGGiXHJwVNmwIf+E/cUp5I= -cloud.google.com/go/domains v0.9.3/go.mod h1:29k66YNDLDY9LCFKpGFeh6Nj9r62ZKm5EsUJxAl84KU= -cloud.google.com/go/domains v0.9.4/go.mod h1:27jmJGShuXYdUNjyDG0SodTfT5RwLi7xmH334Gvi3fY= -cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= -cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= -cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= -cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= -cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= -cloud.google.com/go/edgecontainer v1.1.2/go.mod h1:wQRjIzqxEs9e9wrtle4hQPSR1Y51kqN75dgF7UllZZ4= -cloud.google.com/go/edgecontainer v1.1.3/go.mod h1:Ll2DtIABzEfaxaVSbwj3QHFaOOovlDFiWVDu349jSsA= -cloud.google.com/go/edgecontainer v1.1.4/go.mod h1:AvFdVuZuVGdgaE5YvlL1faAoa1ndRR/5XhXZvPBHbsE= -cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= -cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= -cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= -cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= -cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= -cloud.google.com/go/essentialcontacts v1.6.3/go.mod h1:yiPCD7f2TkP82oJEFXFTou8Jl8L6LBRPeBEkTaO0Ggo= -cloud.google.com/go/essentialcontacts v1.6.4/go.mod h1:iju5Vy3d9tJUg0PYMd1nHhjV7xoCXaOAVabrwLaPBEM= -cloud.google.com/go/essentialcontacts v1.6.5/go.mod h1:jjYbPzw0x+yglXC890l6ECJWdYeZ5dlYACTFL0U/VuM= -cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= -cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= -cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= -cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= -cloud.google.com/go/eventarc v1.12.1/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= -cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= -cloud.google.com/go/eventarc v1.13.1/go.mod h1:EqBxmGHFrruIara4FUQ3RHlgfCn7yo1HYsu2Hpt/C3Y= -cloud.google.com/go/eventarc v1.13.2/go.mod h1:X9A80ShVu19fb4e5sc/OLV7mpFUKZMwfJFeeWhcIObM= -cloud.google.com/go/eventarc v1.13.3/go.mod h1:RWH10IAZIRcj1s/vClXkBgMHwh59ts7hSWcqD3kaclg= -cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= -cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= -cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= -cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= -cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= -cloud.google.com/go/filestore v1.7.2/go.mod h1:TYOlyJs25f/omgj+vY7/tIG/E7BX369triSPzE4LdgE= -cloud.google.com/go/filestore v1.7.3/go.mod h1:Qp8WaEERR3cSkxToxFPHh/b8AACkSut+4qlCjAmKTV0= -cloud.google.com/go/filestore v1.7.4/go.mod h1:S5JCxIbFjeBhWMTfIYH2Jx24J6BqjwpkkPl+nBA5DlI= -cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= -cloud.google.com/go/firestore v1.11.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= -cloud.google.com/go/firestore v1.12.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= -cloud.google.com/go/firestore v1.13.0/go.mod h1:QojqqOh8IntInDUSTAh0c8ZsPYAr68Ma8c5DWOy8xb8= -cloud.google.com/go/firestore v1.14.0/go.mod h1:96MVaHLsEhbvkBEdZgfN+AS/GIkco1LRpH9Xp9YZfzQ= -cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= -cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= -cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= -cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= -cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= -cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA= -cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= -cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= -cloud.google.com/go/functions v1.15.2/go.mod h1:CHAjtcR6OU4XF2HuiVeriEdELNcnvRZSk1Q8RMqy4lE= -cloud.google.com/go/functions v1.15.3/go.mod h1:r/AMHwBheapkkySEhiZYLDBwVJCdlRwsm4ieJu35/Ug= -cloud.google.com/go/functions v1.15.4/go.mod h1:CAsTc3VlRMVvx+XqXxKqVevguqJpnVip4DdonFsX28I= -cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= -cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= -cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= -cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= -cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= -cloud.google.com/go/gaming v1.10.1/go.mod h1:XQQvtfP8Rb9Rxnxm5wFVpAp9zCQkJi2bLIb7iHGwB3s= -cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= -cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= -cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= -cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= -cloud.google.com/go/gkebackup v1.3.1/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= -cloud.google.com/go/gkebackup v1.3.2/go.mod h1:OMZbXzEJloyXMC7gqdSB+EOEQ1AKcpGYvO3s1ec5ixk= -cloud.google.com/go/gkebackup v1.3.3/go.mod h1:eMk7/wVV5P22KBakhQnJxWSVftL1p4VBFLpv0kIft7I= -cloud.google.com/go/gkebackup v1.3.4/go.mod h1:gLVlbM8h/nHIs09ns1qx3q3eaXcGSELgNu1DWXYz1HI= -cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= -cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= -cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= -cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= -cloud.google.com/go/gkeconnect v0.8.2/go.mod h1:6nAVhwchBJYgQCXD2pHBFQNiJNyAd/wyxljpaa6ZPrY= -cloud.google.com/go/gkeconnect v0.8.3/go.mod h1:i9GDTrfzBSUZGCe98qSu1B8YB8qfapT57PenIb820Jo= -cloud.google.com/go/gkeconnect v0.8.4/go.mod h1:84hZz4UMlDCKl8ifVW8layK4WHlMAFeq8vbzjU0yJkw= -cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= -cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= -cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= -cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= -cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= -cloud.google.com/go/gkehub v0.14.2/go.mod h1:iyjYH23XzAxSdhrbmfoQdePnlMj2EWcvnR+tHdBQsCY= -cloud.google.com/go/gkehub v0.14.3/go.mod h1:jAl6WafkHHW18qgq7kqcrXYzN08hXeK/Va3utN8VKg8= -cloud.google.com/go/gkehub v0.14.4/go.mod h1:Xispfu2MqnnFt8rV/2/3o73SK1snL8s9dYJ9G2oQMfc= -cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= -cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= -cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= -cloud.google.com/go/gkemulticloud v0.6.1/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= -cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= -cloud.google.com/go/gkemulticloud v1.0.1/go.mod h1:AcrGoin6VLKT/fwZEYuqvVominLriQBCKmbjtnbMjG8= -cloud.google.com/go/gkemulticloud v1.0.2/go.mod h1:+ee5VXxKb3H1l4LZAcgWB/rvI16VTNTrInWxDjAGsGo= -cloud.google.com/go/gkemulticloud v1.0.3/go.mod h1:7NpJBN94U6DY1xHIbsDqB2+TFZUfjLUKLjUX8NGLor0= -cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= -cloud.google.com/go/grafeas v0.3.0/go.mod h1:P7hgN24EyONOTMyeJH6DxG4zD7fwiYa5Q6GUgyFSOU8= -cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= -cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= -cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= -cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= -cloud.google.com/go/gsuiteaddons v1.6.2/go.mod h1:K65m9XSgs8hTF3X9nNTPi8IQueljSdYo9F+Mi+s4MyU= -cloud.google.com/go/gsuiteaddons v1.6.3/go.mod h1:sCFJkZoMrLZT3JTb8uJqgKPNshH2tfXeCwTFRebTq48= -cloud.google.com/go/gsuiteaddons v1.6.4/go.mod h1:rxtstw7Fx22uLOXBpsvb9DUbC+fiXs7rF4U29KHM/pE= -cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= -cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= -cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= -cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= -cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= -cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= -cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= -cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= -cloud.google.com/go/iam v1.0.1/go.mod h1:yR3tmSL8BcZB4bxByRv2jkSIahVmCtfKZwLYGBalRE8= -cloud.google.com/go/iam v1.1.0/go.mod h1:nxdHjaKfCr7fNYx/HJMM8LgiMugmveWlkatear5gVyk= -cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= -cloud.google.com/go/iam v1.1.2/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= -cloud.google.com/go/iam v1.1.3/go.mod h1:3khUlaBXfPKKe7huYgEpDn6FtgRyMEqbkvBxrQyY5SE= -cloud.google.com/go/iam v1.1.4/go.mod h1:l/rg8l1AaA+VFMho/HYx2Vv6xinPSLMF8qfhRPIZ0L8= -cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= -cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= -cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= -cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= -cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo= -cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= -cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= -cloud.google.com/go/iap v1.9.0/go.mod h1:01OFxd1R+NFrg78S+hoPV5PxEzv22HXaNqUUlmNHFuY= -cloud.google.com/go/iap v1.9.1/go.mod h1:SIAkY7cGMLohLSdBR25BuIxO+I4fXJiL06IBL7cy/5Q= -cloud.google.com/go/iap v1.9.2/go.mod h1:GwDTOs047PPSnwRD0Us5FKf4WDRcVvHg1q9WVkKBhdI= -cloud.google.com/go/iap v1.9.3/go.mod h1:DTdutSZBqkkOm2HEOTBzhZxh2mwwxshfD/h3yofAiCw= -cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= -cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= -cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= -cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= -cloud.google.com/go/ids v1.4.2/go.mod h1:3vw8DX6YddRu9BncxuzMyWn0g8+ooUjI2gslJ7FH3vk= -cloud.google.com/go/ids v1.4.3/go.mod h1:9CXPqI3GedjmkjbMWCUhMZ2P2N7TUMzAkVXYEH2orYU= -cloud.google.com/go/ids v1.4.4/go.mod h1:z+WUc2eEl6S/1aZWzwtVNWoSZslgzPxAboS0lZX0HjI= -cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= -cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= -cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= -cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= -cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= -cloud.google.com/go/iot v1.7.2/go.mod h1:q+0P5zr1wRFpw7/MOgDXrG/HVA+l+cSwdObffkrpnSg= -cloud.google.com/go/iot v1.7.3/go.mod h1:t8itFchkol4VgNbHnIq9lXoOOtHNR3uAACQMYbN9N4I= -cloud.google.com/go/iot v1.7.4/go.mod h1:3TWqDVvsddYBG++nHSZmluoCAVGr1hAcabbWZNKEZLk= -cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= -cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= -cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= -cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg= -cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= -cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= -cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= -cloud.google.com/go/kms v1.11.0/go.mod h1:hwdiYC0xjnWsKQQCQQmIQnS9asjYVSK6jtXm+zFqXLM= -cloud.google.com/go/kms v1.12.1/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= -cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= -cloud.google.com/go/kms v1.15.2/go.mod h1:3hopT4+7ooWRCjc2DxgnpESFxhIraaI2IpAVUEhbT/w= -cloud.google.com/go/kms v1.15.3/go.mod h1:AJdXqHxS2GlPyduM99s9iGqi2nwbviBbhV/hdmt4iOQ= -cloud.google.com/go/kms v1.15.4/go.mod h1:L3Sdj6QTHK8dfwK5D1JLsAyELsNMnd3tAIwGS4ltKpc= -cloud.google.com/go/kms v1.15.5/go.mod h1:cU2H5jnp6G2TDpUGZyqTCoy1n16fbubHZjmVXSMtwDI= -cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= -cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= -cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= -cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= -cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= -cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= -cloud.google.com/go/language v1.11.0/go.mod h1:uDx+pFDdAKTY8ehpWbiXyQdz8tDSYLJbQcXsCkjYyvQ= -cloud.google.com/go/language v1.11.1/go.mod h1:Xyid9MG9WOX3utvDbpX7j3tXDmmDooMyMDqgUVpH17U= -cloud.google.com/go/language v1.12.1/go.mod h1:zQhalE2QlQIxbKIZt54IASBzmZpN/aDASea5zl1l+J4= -cloud.google.com/go/language v1.12.2/go.mod h1:9idWapzr/JKXBBQ4lWqVX/hcadxB194ry20m/bTrhWc= -cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= -cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= -cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= -cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= -cloud.google.com/go/lifesciences v0.9.2/go.mod h1:QHEOO4tDzcSAzeJg7s2qwnLM2ji8IRpQl4p6m5Z9yTA= -cloud.google.com/go/lifesciences v0.9.3/go.mod h1:gNGBOJV80IWZdkd+xz4GQj4mbqaz737SCLHn2aRhQKM= -cloud.google.com/go/lifesciences v0.9.4/go.mod h1:bhm64duKhMi7s9jR9WYJYvjAFJwRqNj+Nia7hF0Z7JA= -cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= -cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= -cloud.google.com/go/logging v1.8.1/go.mod h1:TJjR+SimHwuC8MZ9cjByQulAMgni+RkXeI3wwctHJEI= -cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= -cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= -cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= -cloud.google.com/go/longrunning v0.4.2/go.mod h1:OHrnaYyLUV6oqwh0xiS7e5sLQhP1m0QU9R+WhGDMgIQ= -cloud.google.com/go/longrunning v0.5.0/go.mod h1:0JNuqRShmscVAhIACGtskSAWtqtOoPkwP0YF1oVEchc= -cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= -cloud.google.com/go/longrunning v0.5.2/go.mod h1:nqo6DQbNV2pXhGDbDMoN2bWz68MjZUzqv2YttZiveCs= -cloud.google.com/go/longrunning v0.5.3/go.mod h1:y/0ga59EYu58J6SHmmQOvekvND2qODbu8ywBBW7EK7Y= -cloud.google.com/go/longrunning v0.5.4/go.mod h1:zqNVncI0BOP8ST6XQD1+VcvuShMmq7+xFSzOL++V0dI= -cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= -cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= -cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= -cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= -cloud.google.com/go/managedidentities v1.6.2/go.mod h1:5c2VG66eCa0WIq6IylRk3TBW83l161zkFvCj28X7jn8= -cloud.google.com/go/managedidentities v1.6.3/go.mod h1:tewiat9WLyFN0Fi7q1fDD5+0N4VUoL0SCX0OTCthZq4= -cloud.google.com/go/managedidentities v1.6.4/go.mod h1:WgyaECfHmF00t/1Uk8Oun3CQ2PGUtjc3e9Alh79wyiM= -cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= -cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= -cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= -cloud.google.com/go/maps v1.3.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= -cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= -cloud.google.com/go/maps v1.4.1/go.mod h1:BxSa0BnW1g2U2gNdbq5zikLlHUuHW0GFWh7sgML2kIY= -cloud.google.com/go/maps v1.5.1/go.mod h1:NPMZw1LJwQZYCfz4y+EIw+SI+24A4bpdFJqdKVr0lt4= -cloud.google.com/go/maps v1.6.1/go.mod h1:4+buOHhYXFBp58Zj/K+Lc1rCmJssxxF4pJ5CJnhdz18= -cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= -cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= -cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= -cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= -cloud.google.com/go/mediatranslation v0.8.2/go.mod h1:c9pUaDRLkgHRx3irYE5ZC8tfXGrMYwNZdmDqKMSfFp8= -cloud.google.com/go/mediatranslation v0.8.3/go.mod h1:F9OnXTy336rteOEywtY7FOqCk+J43o2RF638hkOQl4Y= -cloud.google.com/go/mediatranslation v0.8.4/go.mod h1:9WstgtNVAdN53m6TQa5GjIjLqKQPXe74hwSCxUP6nj4= -cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= -cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= -cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= -cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= -cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= -cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= -cloud.google.com/go/memcache v1.10.2/go.mod h1:f9ZzJHLBrmd4BkguIAa/l/Vle6uTHzHokdnzSWOdQ6A= -cloud.google.com/go/memcache v1.10.3/go.mod h1:6z89A41MT2DVAW0P4iIRdu5cmRTsbsFn4cyiIx8gbwo= -cloud.google.com/go/memcache v1.10.4/go.mod h1:v/d8PuC8d1gD6Yn5+I3INzLR01IDn0N4Ym56RgikSI0= -cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= -cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= -cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= -cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= -cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= -cloud.google.com/go/metastore v1.11.1/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= -cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= -cloud.google.com/go/metastore v1.13.0/go.mod h1:URDhpG6XLeh5K+Glq0NOt74OfrPKTwS62gEPZzb5SOk= -cloud.google.com/go/metastore v1.13.1/go.mod h1:IbF62JLxuZmhItCppcIfzBBfUFq0DIB9HPDoLgWrVOU= -cloud.google.com/go/metastore v1.13.2/go.mod h1:KS59dD+unBji/kFebVp8XU/quNSyo8b6N6tPGspKszA= -cloud.google.com/go/metastore v1.13.3/go.mod h1:K+wdjXdtkdk7AQg4+sXS8bRrQa9gcOr+foOMF2tqINE= -cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= -cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= -cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= -cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= -cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= -cloud.google.com/go/monitoring v1.16.0/go.mod h1:Ptp15HgAyM1fNICAojDMoNc/wUmn67mLHQfyqbw+poY= -cloud.google.com/go/monitoring v1.16.1/go.mod h1:6HsxddR+3y9j+o/cMJH6q/KJ/CBTvM/38L/1m7bTRJ4= -cloud.google.com/go/monitoring v1.16.2/go.mod h1:B44KGwi4ZCF8Rk/5n+FWeispDXoKSk9oss2QNlXJBgc= -cloud.google.com/go/monitoring v1.16.3/go.mod h1:KwSsX5+8PnXv5NJnICZzW2R8pWTis8ypC4zmdRD63Tw= -cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= -cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= -cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= -cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= -cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= -cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= -cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= -cloud.google.com/go/networkconnectivity v1.13.0/go.mod h1:SAnGPes88pl7QRLUen2HmcBSE9AowVAcdug8c0RSBFk= -cloud.google.com/go/networkconnectivity v1.14.0/go.mod h1:SAnGPes88pl7QRLUen2HmcBSE9AowVAcdug8c0RSBFk= -cloud.google.com/go/networkconnectivity v1.14.1/go.mod h1:LyGPXR742uQcDxZ/wv4EI0Vu5N6NKJ77ZYVnDe69Zug= -cloud.google.com/go/networkconnectivity v1.14.2/go.mod h1:5UFlwIisZylSkGG1AdwK/WZUaoz12PKu6wODwIbFzJo= -cloud.google.com/go/networkconnectivity v1.14.3/go.mod h1:4aoeFdrJpYEXNvrnfyD5kIzs8YtHg945Og4koAjHQek= -cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= -cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= -cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= -cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= -cloud.google.com/go/networkmanagement v1.9.0/go.mod h1:UTUaEU9YwbCAhhz3jEOHr+2/K/MrBk2XxOLS89LQzFw= -cloud.google.com/go/networkmanagement v1.9.1/go.mod h1:CCSYgrQQvW73EJawO2QamemYcOb57LvrDdDU51F0mcI= -cloud.google.com/go/networkmanagement v1.9.2/go.mod h1:iDGvGzAoYRghhp4j2Cji7sF899GnfGQcQRQwgVOWnDw= -cloud.google.com/go/networkmanagement v1.9.3/go.mod h1:y7WMO1bRLaP5h3Obm4tey+NquUvB93Co1oh4wpL+XcU= -cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= -cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= -cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= -cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= -cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= -cloud.google.com/go/networksecurity v0.9.2/go.mod h1:jG0SeAttWzPMUILEHDUvFYdQTl8L/E/KC8iZDj85lEI= -cloud.google.com/go/networksecurity v0.9.3/go.mod h1:l+C0ynM6P+KV9YjOnx+kk5IZqMSLccdBqW6GUoF4p/0= -cloud.google.com/go/networksecurity v0.9.4/go.mod h1:E9CeMZ2zDsNBkr8axKSYm8XyTqNhiCHf1JO/Vb8mD1w= -cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= -cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= -cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= -cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= -cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= -cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= -cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= -cloud.google.com/go/notebooks v1.10.0/go.mod h1:SOPYMZnttHxqot0SGSFSkRrwE29eqnKPBJFqgWmiK2k= -cloud.google.com/go/notebooks v1.10.1/go.mod h1:5PdJc2SgAybE76kFQCWrTfJolCOUQXF97e+gteUUA6A= -cloud.google.com/go/notebooks v1.11.1/go.mod h1:V2Zkv8wX9kDCGRJqYoI+bQAaoVeE5kSiz4yYHd2yJwQ= -cloud.google.com/go/notebooks v1.11.2/go.mod h1:z0tlHI/lREXC8BS2mIsUeR3agM1AkgLiS+Isov3SS70= -cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= -cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= -cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= -cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= -cloud.google.com/go/optimization v1.5.0/go.mod h1:evo1OvTxeBRBu6ydPlrIRizKY/LJKo/drDMMRKqGEUU= -cloud.google.com/go/optimization v1.5.1/go.mod h1:NC0gnUD5MWVAF7XLdoYVPmYYVth93Q6BUzqAq3ZwtV8= -cloud.google.com/go/optimization v1.6.1/go.mod h1:hH2RYPTTM9e9zOiTaYPTiGPcGdNZVnBSBxjIAJzUkqo= -cloud.google.com/go/optimization v1.6.2/go.mod h1:mWNZ7B9/EyMCcwNl1frUGEuY6CPijSkz88Fz2vwKPOY= -cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= -cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= -cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= -cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= -cloud.google.com/go/orchestration v1.8.2/go.mod h1:T1cP+6WyTmh6LSZzeUhvGf0uZVmJyTx7t8z7Vg87+A0= -cloud.google.com/go/orchestration v1.8.3/go.mod h1:xhgWAYqlbYjlz2ftbFghdyqENYW+JXuhBx9KsjMoGHs= -cloud.google.com/go/orchestration v1.8.4/go.mod h1:d0lywZSVYtIoSZXb0iFjv9SaL13PGyVOKDxqGxEf/qI= -cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= -cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= -cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= -cloud.google.com/go/orgpolicy v1.11.0/go.mod h1:2RK748+FtVvnfuynxBzdnyu7sygtoZa1za/0ZfpOs1M= -cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= -cloud.google.com/go/orgpolicy v1.11.2/go.mod h1:biRDpNwfyytYnmCRWZWxrKF22Nkz9eNVj9zyaBdpm1o= -cloud.google.com/go/orgpolicy v1.11.3/go.mod h1:oKAtJ/gkMjum5icv2aujkP4CxROxPXsBbYGCDbPO8MM= -cloud.google.com/go/orgpolicy v1.11.4/go.mod h1:0+aNV/nrfoTQ4Mytv+Aw+stBDBjNf4d8fYRA9herfJI= -cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= -cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= -cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= -cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= -cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= -cloud.google.com/go/osconfig v1.12.0/go.mod h1:8f/PaYzoS3JMVfdfTubkowZYGmAhUCjjwnjqWI7NVBc= -cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= -cloud.google.com/go/osconfig v1.12.2/go.mod h1:eh9GPaMZpI6mEJEuhEjUJmaxvQ3gav+fFEJon1Y8Iw0= -cloud.google.com/go/osconfig v1.12.3/go.mod h1:L/fPS8LL6bEYUi1au832WtMnPeQNT94Zo3FwwV1/xGM= -cloud.google.com/go/osconfig v1.12.4/go.mod h1:B1qEwJ/jzqSRslvdOCI8Kdnp0gSng0xW4LOnIebQomA= -cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= -cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= -cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= -cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= -cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= -cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= -cloud.google.com/go/oslogin v1.11.0/go.mod h1:8GMTJs4X2nOAUVJiPGqIWVcDaF0eniEto3xlOxaboXE= -cloud.google.com/go/oslogin v1.11.1/go.mod h1:OhD2icArCVNUxKqtK0mcSmKL7lgr0LVlQz+v9s1ujTg= -cloud.google.com/go/oslogin v1.12.1/go.mod h1:VfwTeFJGbnakxAY236eN8fsnglLiVXndlbcNomY4iZU= -cloud.google.com/go/oslogin v1.12.2/go.mod h1:CQ3V8Jvw4Qo4WRhNPF0o+HAM4DiLuE27Ul9CX9g2QdY= -cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= -cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= -cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= -cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= -cloud.google.com/go/phishingprotection v0.8.2/go.mod h1:LhJ91uyVHEYKSKcMGhOa14zMMWfbEdxG032oT6ECbC8= -cloud.google.com/go/phishingprotection v0.8.3/go.mod h1:3B01yO7T2Ra/TMojifn8EoGd4G9jts/6cIO0DgDY9J8= -cloud.google.com/go/phishingprotection v0.8.4/go.mod h1:6b3kNPAc2AQ6jZfFHioZKg9MQNybDg4ixFd4RPZZ2nE= -cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= -cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= -cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= -cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= -cloud.google.com/go/policytroubleshooter v1.7.1/go.mod h1:0NaT5v3Ag1M7U5r0GfDCpUFkWd9YqpubBWsQlhanRv0= -cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= -cloud.google.com/go/policytroubleshooter v1.9.0/go.mod h1:+E2Lga7TycpeSTj2FsH4oXxTnrbHJGRlKhVZBLGgU64= -cloud.google.com/go/policytroubleshooter v1.9.1/go.mod h1:MYI8i0bCrL8cW+VHN1PoiBTyNZTstCg2WUw2eVC4c4U= -cloud.google.com/go/policytroubleshooter v1.10.1/go.mod h1:5C0rhT3TDZVxAu8813bwmTvd57Phbl8mr9F4ipOsxEs= -cloud.google.com/go/policytroubleshooter v1.10.2/go.mod h1:m4uF3f6LseVEnMV6nknlN2vYGRb+75ylQwJdnOXfnv0= -cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= -cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= -cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= -cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= -cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= -cloud.google.com/go/privatecatalog v0.9.2/go.mod h1:RMA4ATa8IXfzvjrhhK8J6H4wwcztab+oZph3c6WmtFc= -cloud.google.com/go/privatecatalog v0.9.3/go.mod h1:K5pn2GrVmOPjXz3T26mzwXLcKivfIJ9R5N79AFCF9UE= -cloud.google.com/go/privatecatalog v0.9.4/go.mod h1:SOjm93f+5hp/U3PqMZAHTtBtluqLygrDrVO8X8tYtG0= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= -cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= -cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= -cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= -cloud.google.com/go/pubsub v1.32.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= -cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= -cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= -cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= -cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= -cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= -cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= -cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= -cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= -cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= -cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= -cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= -cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= -cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= -cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= -cloud.google.com/go/recaptchaenterprise/v2 v2.8.0/go.mod h1:QuE8EdU9dEnesG8/kG3XuJyNsjEqMlMzg3v3scCJ46c= -cloud.google.com/go/recaptchaenterprise/v2 v2.8.1/go.mod h1:JZYZJOeZjgSSTGP4uz7NlQ4/d1w5hGmksVgM0lbEij0= -cloud.google.com/go/recaptchaenterprise/v2 v2.8.2/go.mod h1:kpaDBOpkwD4G0GVMzG1W6Doy1tFFC97XAV3xy+Rd/pw= -cloud.google.com/go/recaptchaenterprise/v2 v2.8.3/go.mod h1:Dak54rw6lC2gBY8FBznpOCAR58wKf+R+ZSJRoeJok4w= -cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= -cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= -cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= -cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= -cloud.google.com/go/recommendationengine v0.8.2/go.mod h1:QIybYHPK58qir9CV2ix/re/M//Ty10OxjnnhWdaKS1Y= -cloud.google.com/go/recommendationengine v0.8.3/go.mod h1:m3b0RZV02BnODE9FeSvGv1qibFo8g0OnmB/RMwYy4V8= -cloud.google.com/go/recommendationengine v0.8.4/go.mod h1:GEteCf1PATl5v5ZsQ60sTClUE0phbWmo3rQ1Js8louU= -cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= -cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= -cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= -cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= -cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= -cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= -cloud.google.com/go/recommender v1.11.0/go.mod h1:kPiRQhPyTJ9kyXPCG6u/dlPLbYfFlkwHNRwdzPVAoII= -cloud.google.com/go/recommender v1.11.1/go.mod h1:sGwFFAyI57v2Hc5LbIj+lTwXipGu9NW015rkaEM5B18= -cloud.google.com/go/recommender v1.11.2/go.mod h1:AeoJuzOvFR/emIcXdVFkspVXVTYpliRCmKNYDnyBv6Y= -cloud.google.com/go/recommender v1.11.3/go.mod h1:+FJosKKJSId1MBFeJ/TTyoGQZiEelQQIZMKYYD8ruK4= -cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= -cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= -cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= -cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= -cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= -cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= -cloud.google.com/go/redis v1.13.2/go.mod h1:0Hg7pCMXS9uz02q+LoEVl5dNHUkIQv+C/3L76fandSA= -cloud.google.com/go/redis v1.13.3/go.mod h1:vbUpCKUAZSYzFcWKmICnYgRAhTFg9r+djWqFxDYXi4U= -cloud.google.com/go/redis v1.14.1/go.mod h1:MbmBxN8bEnQI4doZPC1BzADU4HGocHBk2de3SbgOkqs= -cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= -cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= -cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= -cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo= -cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= -cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= -cloud.google.com/go/resourcemanager v1.9.2/go.mod h1:OujkBg1UZg5lX2yIyMo5Vz9O5hf7XQOSV7WxqxxMtQE= -cloud.google.com/go/resourcemanager v1.9.3/go.mod h1:IqrY+g0ZgLsihcfcmqSe+RKp1hzjXwG904B92AwBz6U= -cloud.google.com/go/resourcemanager v1.9.4/go.mod h1:N1dhP9RFvo3lUfwtfLWVxfUWq8+KUQ+XLlHLH3BoFJ0= -cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= -cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= -cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= -cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= -cloud.google.com/go/resourcesettings v1.6.2/go.mod h1:mJIEDd9MobzunWMeniaMp6tzg4I2GvD3TTmPkc8vBXk= -cloud.google.com/go/resourcesettings v1.6.3/go.mod h1:pno5D+7oDYkMWZ5BpPsb4SO0ewg3IXcmmrUZaMJrFic= -cloud.google.com/go/resourcesettings v1.6.4/go.mod h1:pYTTkWdv2lmQcjsthbZLNBP4QW140cs7wqA3DuqErVI= -cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= -cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= -cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= -cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= -cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= -cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= -cloud.google.com/go/retail v1.14.2/go.mod h1:W7rrNRChAEChX336QF7bnMxbsjugcOCPU44i5kbLiL8= -cloud.google.com/go/retail v1.14.3/go.mod h1:Omz2akDHeSlfCq8ArPKiBxlnRpKEBjUH386JYFLUvXo= -cloud.google.com/go/retail v1.14.4/go.mod h1:l/N7cMtY78yRnJqp5JW8emy7MB1nz8E4t2yfOmklYfg= -cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= -cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= -cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= -cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= -cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= -cloud.google.com/go/run v1.3.0/go.mod h1:S/osX/4jIPZGg+ssuqh6GNgg7syixKe3YnprwehzHKU= -cloud.google.com/go/run v1.3.1/go.mod h1:cymddtZOzdwLIAsmS6s+Asl4JoXIDm/K1cpZTxV4Q5s= -cloud.google.com/go/run v1.3.2/go.mod h1:SIhmqArbjdU/D9M6JoHaAqnAMKLFtXaVdNeq04NjnVE= -cloud.google.com/go/run v1.3.3/go.mod h1:WSM5pGyJ7cfYyYbONVQBN4buz42zFqwG67Q3ch07iK4= -cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= -cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= -cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= -cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= -cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= -cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= -cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= -cloud.google.com/go/scheduler v1.10.2/go.mod h1:O3jX6HRH5eKCA3FutMw375XHZJudNIKVonSCHv7ropY= -cloud.google.com/go/scheduler v1.10.3/go.mod h1:8ANskEM33+sIbpJ+R4xRfw/jzOG+ZFE8WVLy7/yGvbc= -cloud.google.com/go/scheduler v1.10.4/go.mod h1:MTuXcrJC9tqOHhixdbHDFSIuh7xZF2IysiINDuiq6NI= -cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= -cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= -cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= -cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= -cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= -cloud.google.com/go/secretmanager v1.11.2/go.mod h1:MQm4t3deoSub7+WNwiC4/tRYgDBHJgJPvswqQVB1Vss= -cloud.google.com/go/secretmanager v1.11.3/go.mod h1:0bA2o6FabmShrEy328i67aV+65XoUFFSmVeLBn/51jI= -cloud.google.com/go/secretmanager v1.11.4/go.mod h1:wreJlbS9Zdq21lMzWmJ0XhWW2ZxgPeahsqeV/vZoJ3w= -cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= -cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= -cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= -cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= -cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= -cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= -cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= -cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= -cloud.google.com/go/security v1.15.2/go.mod h1:2GVE/v1oixIRHDaClVbHuPcZwAqFM28mXuAKCfMgYIg= -cloud.google.com/go/security v1.15.3/go.mod h1:gQ/7Q2JYUZZgOzqKtw9McShH+MjNvtDpL40J1cT+vBs= -cloud.google.com/go/security v1.15.4/go.mod h1:oN7C2uIZKhxCLiAAijKUCuHLZbIt/ghYEo8MqwD/Ty4= -cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= -cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= -cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= -cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= -cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= -cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= -cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= -cloud.google.com/go/securitycenter v1.23.1/go.mod h1:w2HV3Mv/yKhbXKwOCu2i8bCuLtNP1IMHuiYQn4HJq5s= -cloud.google.com/go/securitycenter v1.24.1/go.mod h1:3h9IdjjHhVMXdQnmqzVnM7b0wMn/1O/U20eWVpMpZjI= -cloud.google.com/go/securitycenter v1.24.2/go.mod h1:l1XejOngggzqwr4Fa2Cn+iWZGf+aBLTXtB/vXjy5vXM= -cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= -cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= -cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA= -cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= -cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk= -cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= -cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= -cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= -cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= -cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= -cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= -cloud.google.com/go/servicedirectory v1.10.1/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= -cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= -cloud.google.com/go/servicedirectory v1.11.1/go.mod h1:tJywXimEWzNzw9FvtNjsQxxJ3/41jseeILgwU/QLrGI= -cloud.google.com/go/servicedirectory v1.11.2/go.mod h1:KD9hCLhncWRV5jJphwIpugKwM5bn1x0GyVVD4NO8mGg= -cloud.google.com/go/servicedirectory v1.11.3/go.mod h1:LV+cHkomRLr67YoQy3Xq2tUXBGOs5z5bPofdq7qtiAw= -cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= -cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= -cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= -cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4= -cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= -cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= -cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= -cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA= -cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= -cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= -cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= -cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= -cloud.google.com/go/shell v1.7.2/go.mod h1:KqRPKwBV0UyLickMn0+BY1qIyE98kKyI216sH/TuHmc= -cloud.google.com/go/shell v1.7.3/go.mod h1:cTTEz/JdaBsQAeTQ3B6HHldZudFoYBOqjteev07FbIc= -cloud.google.com/go/shell v1.7.4/go.mod h1:yLeXB8eKLxw0dpEmXQ/FjriYrBijNsONpwnWsdPqlKM= -cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= -cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= -cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= -cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= -cloud.google.com/go/spanner v1.49.0/go.mod h1:eGj9mQGK8+hkgSVbHNQ06pQ4oS+cyc4tXXd6Dif1KoM= -cloud.google.com/go/spanner v1.50.0/go.mod h1:eGj9mQGK8+hkgSVbHNQ06pQ4oS+cyc4tXXd6Dif1KoM= -cloud.google.com/go/spanner v1.51.0/go.mod h1:c5KNo5LQ1X5tJwma9rSQZsXNBDNvj4/n8BVc3LNahq0= -cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= -cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= -cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= -cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= -cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= -cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= -cloud.google.com/go/speech v1.17.1/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= -cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= -cloud.google.com/go/speech v1.19.1/go.mod h1:WcuaWz/3hOlzPFOVo9DUsblMIHwxP589y6ZMtaG+iAA= -cloud.google.com/go/speech v1.19.2/go.mod h1:2OYFfj+Ch5LWjsaSINuCZsre/789zlcCI3SY4oAi2oI= -cloud.google.com/go/speech v1.20.1/go.mod h1:wwolycgONvfz2EDU8rKuHRW3+wc9ILPsAWoikBEWavY= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= -cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= -cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= -cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= -cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= -cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= -cloud.google.com/go/storage v1.35.1/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8= -cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= -cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= -cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= -cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= -cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= -cloud.google.com/go/storagetransfer v1.10.1/go.mod h1:rS7Sy0BtPviWYTTJVWCSV4QrbBitgPeuK4/FKa4IdLs= -cloud.google.com/go/storagetransfer v1.10.2/go.mod h1:meIhYQup5rg9juQJdyppnA/WLQCOguxtk1pr3/vBWzA= -cloud.google.com/go/storagetransfer v1.10.3/go.mod h1:Up8LY2p6X68SZ+WToswpQbQHnJpOty/ACcMafuey8gc= -cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= -cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= -cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= -cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= -cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= -cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= -cloud.google.com/go/talent v1.6.3/go.mod h1:xoDO97Qd4AK43rGjJvyBHMskiEf3KulgYzcH6YWOVoo= -cloud.google.com/go/talent v1.6.4/go.mod h1:QsWvi5eKeh6gG2DlBkpMaFYZYrYUnIpo34f6/V5QykY= -cloud.google.com/go/talent v1.6.5/go.mod h1:Mf5cma696HmE+P2BWJ/ZwYqeJXEeU0UqjHFXVLadEDI= -cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= -cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= -cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= -cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= -cloud.google.com/go/texttospeech v1.7.2/go.mod h1:VYPT6aTOEl3herQjFHYErTlSZJ4vB00Q2ZTmuVgluD4= -cloud.google.com/go/texttospeech v1.7.3/go.mod h1:Av/zpkcgWfXlDLRYob17lqMstGZ3GqlvJXqKMp2u8so= -cloud.google.com/go/texttospeech v1.7.4/go.mod h1:vgv0002WvR4liGuSd5BJbWy4nDn5Ozco0uJymY5+U74= -cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= -cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= -cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= -cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= -cloud.google.com/go/tpu v1.6.2/go.mod h1:NXh3NDwt71TsPZdtGWgAG5ThDfGd32X1mJ2cMaRlVgU= -cloud.google.com/go/tpu v1.6.3/go.mod h1:lxiueqfVMlSToZY1151IaZqp89ELPSrk+3HIQ5HRkbY= -cloud.google.com/go/tpu v1.6.4/go.mod h1:NAm9q3Rq2wIlGnOhpYICNI7+bpBebMJbh0yyp3aNw1Y= -cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= -cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= -cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= -cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= -cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= -cloud.google.com/go/trace v1.10.2/go.mod h1:NPXemMi6MToRFcSxRl2uDnu/qAlAQ3oULUphcHGh1vA= -cloud.google.com/go/trace v1.10.3/go.mod h1:Ke1bgfc73RV3wUFml+uQp7EsDw4dGaETLxB7Iq/r4CY= -cloud.google.com/go/trace v1.10.4/go.mod h1:Nso99EDIK8Mj5/zmB+iGr9dosS/bzWCJ8wGmE6TXNWY= -cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= -cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= -cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= -cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= -cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= -cloud.google.com/go/translate v1.8.1/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= -cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= -cloud.google.com/go/translate v1.9.0/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= -cloud.google.com/go/translate v1.9.1/go.mod h1:TWIgDZknq2+JD4iRcojgeDtqGEp154HN/uL6hMvylS8= -cloud.google.com/go/translate v1.9.2/go.mod h1:E3Tc6rUTsQkVrXW6avbUhKJSr7ZE3j7zNmqzXKHqRrY= -cloud.google.com/go/translate v1.9.3/go.mod h1:Kbq9RggWsbqZ9W5YpM94Q1Xv4dshw/gr/SHfsl5yCZ0= -cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= -cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= -cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg= -cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= -cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= -cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= -cloud.google.com/go/video v1.17.1/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= -cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= -cloud.google.com/go/video v1.20.0/go.mod h1:U3G3FTnsvAGqglq9LxgqzOiBc/Nt8zis8S+850N2DUM= -cloud.google.com/go/video v1.20.1/go.mod h1:3gJS+iDprnj8SY6pe0SwLeC5BUW80NjhwX7INWEuWGU= -cloud.google.com/go/video v1.20.2/go.mod h1:lrixr5JeKNThsgfM9gqtwb6Okuqzfo4VrY2xynaViTA= -cloud.google.com/go/video v1.20.3/go.mod h1:TnH/mNZKVHeNtpamsSPygSR0iHtvrR/cW1/GDjN5+GU= -cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= -cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= -cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= -cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= -cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= -cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= -cloud.google.com/go/videointelligence v1.11.2/go.mod h1:ocfIGYtIVmIcWk1DsSGOoDiXca4vaZQII1C85qtoplc= -cloud.google.com/go/videointelligence v1.11.3/go.mod h1:tf0NUaGTjU1iS2KEkGWvO5hRHeCkFK3nPo0/cOZhZAo= -cloud.google.com/go/videointelligence v1.11.4/go.mod h1:kPBMAYsTPFiQxMLmmjpcZUMklJp3nC9+ipJJtprccD8= -cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= -cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= -cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= -cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= -cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= -cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= -cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= -cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= -cloud.google.com/go/vision/v2 v2.7.3/go.mod h1:V0IcLCY7W+hpMKXK1JYE0LV5llEqVmj+UJChjvA1WsM= -cloud.google.com/go/vision/v2 v2.7.4/go.mod h1:ynDKnsDN/0RtqkKxQZ2iatv3Dm9O+HfRb5djl7l4Vvw= -cloud.google.com/go/vision/v2 v2.7.5/go.mod h1:GcviprJLFfK9OLf0z8Gm6lQb6ZFUulvpZws+mm6yPLM= -cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= -cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= -cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= -cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= -cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= -cloud.google.com/go/vmmigration v1.7.2/go.mod h1:iA2hVj22sm2LLYXGPT1pB63mXHhrH1m/ruux9TwWLd8= -cloud.google.com/go/vmmigration v1.7.3/go.mod h1:ZCQC7cENwmSWlwyTrZcWivchn78YnFniEQYRWQ65tBo= -cloud.google.com/go/vmmigration v1.7.4/go.mod h1:yBXCmiLaB99hEl/G9ZooNx2GyzgsjKnw5fWcINRgD70= -cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= -cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= -cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= -cloud.google.com/go/vmwareengine v0.4.1/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= -cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= -cloud.google.com/go/vmwareengine v1.0.1/go.mod h1:aT3Xsm5sNx0QShk1Jc1B8OddrxAScYLwzVoaiXfdzzk= -cloud.google.com/go/vmwareengine v1.0.2/go.mod h1:xMSNjIk8/itYrz1JA8nV3Ajg4L4n3N+ugP8JKzk3OaA= -cloud.google.com/go/vmwareengine v1.0.3/go.mod h1:QSpdZ1stlbfKtyt6Iu19M6XRxjmXO+vb5a/R6Fvy2y4= -cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= -cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= -cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= -cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= -cloud.google.com/go/vpcaccess v1.7.2/go.mod h1:mmg/MnRHv+3e8FJUjeSibVFvQF1cCy2MsFaFqxeY1HU= -cloud.google.com/go/vpcaccess v1.7.3/go.mod h1:YX4skyfW3NC8vI3Fk+EegJnlYFatA+dXK4o236EUCUc= -cloud.google.com/go/vpcaccess v1.7.4/go.mod h1:lA0KTvhtEOb/VOdnH/gwPuOzGgM+CWsmGu6bb4IoMKk= -cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= -cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= -cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= -cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= -cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= -cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= -cloud.google.com/go/webrisk v1.9.2/go.mod h1:pY9kfDgAqxUpDBOrG4w8deLfhvJmejKB0qd/5uQIPBc= -cloud.google.com/go/webrisk v1.9.3/go.mod h1:RUYXe9X/wBDXhVilss7EDLW9ZNa06aowPuinUOPCXH8= -cloud.google.com/go/webrisk v1.9.4/go.mod h1:w7m4Ib4C+OseSr2GL66m0zMBywdrVNTDKsdEsfMl7X0= -cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= -cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= -cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= -cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= -cloud.google.com/go/websecurityscanner v1.6.2/go.mod h1:7YgjuU5tun7Eg2kpKgGnDuEOXWIrh8x8lWrJT4zfmas= -cloud.google.com/go/websecurityscanner v1.6.3/go.mod h1:x9XANObUFR+83Cya3g/B9M/yoHVqzxPnFtgF8yYGAXw= -cloud.google.com/go/websecurityscanner v1.6.4/go.mod h1:mUiyMQ+dGpPPRkHgknIZeCzSHJ45+fY4F52nZFDHm2o= -cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= -cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= -cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= -cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= -cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= -cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= -cloud.google.com/go/workflows v1.12.0/go.mod h1:PYhSk2b6DhZ508tj8HXKaBh+OFe+xdl0dHF/tJdzPQM= -cloud.google.com/go/workflows v1.12.1/go.mod h1:5A95OhD/edtOhQd/O741NSfIMezNTbCwLM1P1tBRGHM= -cloud.google.com/go/workflows v1.12.2/go.mod h1:+OmBIgNqYJPVggnMo9nqmizW0qEXHhmnAzK/CnBqsHc= -cloud.google.com/go/workflows v1.12.3/go.mod h1:fmOUeeqEwPzIU81foMjTRQIdwQHADi/vEr1cx9R1m5g= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= -git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da h1:KjTM2ks9d14ZYCvmHS9iAKVt9AyzRSqNU1qabPih5BY= github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da/go.mod h1:eHEWzANqSiWQsof+nXEI9bUVUyV6F53Fp89EuCh2EAA= -github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= -github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= -github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= -github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= -github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= -github.com/apache/arrow/go/v12 v12.0.0/go.mod h1:d+tV/eHZZ7Dz7RPrFKtPK02tpr+c9/PEd/zm8mDS9Vg= -github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/aws/aws-sdk-go v1.51.21 h1:UrT6JC9R9PkYYXDZBV0qDKTualMr+bfK2eboTknMgbs= -github.com/aws/aws-sdk-go v1.51.21/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.54.6 h1:HEYUib3yTt8E6vxjMWM3yAq5b+qjj/6aKA62mkgux9g= +github.com/aws/aws-sdk-go v1.54.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/cloudflare-go v0.91.0 h1:L7IR+86qrZuEMSjGFg4cwRwtHqC8uCPmMUkP7BD4CPw= -github.com/cloudflare/cloudflare-go v0.91.0/go.mod h1:nUqvBUUDRxNzsDSQjbqUNWHEIYAoUlgRmcAzMKlFdKs= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cloudflare/cloudflare-go v0.97.0 h1:feZRGiRF1EbljnNIYdt8014FnOLtC3CCvgkLXu915ks= +github.com/cloudflare/cloudflare-go v0.97.0/go.mod h1:JXRwuTfHpe5xFg8xytc2w0XC6LcrFsBVMS4WlVaiGg8= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -1205,214 +30,59 @@ github.com/dchest/threefish v0.0.0-20120919164726-3ecf4c494abf h1:K5VXW9LjmJv/xh github.com/dchest/threefish v0.0.0-20120919164726-3ecf4c494abf/go.mod h1:bXVurdTuvOiJu7NHALemFe0JMvC2UmwYHW+7fcZaZ2M= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= -github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= -github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= -github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= -github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= -github.com/envoyproxy/protoc-gen-validate v1.0.1/go.mod h1:0vj8bNkYbSTNS2PIyH87KZaeN4x9zpL9Qt8fQC7d+vs= -github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= -github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= +github.com/gabriel-vasile/mimetype v1.4.4 h1:QjV6pZ7/XZ7ryI2KuyeEDE8wnh7fHP9YnQy+R0LnH8I= +github.com/gabriel-vasile/mimetype v1.4.4/go.mod h1:JwLei5XPtWdGiMFB5Pjle1oEeoSeEuJfJE+TtfvdB/s= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= -github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= -github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= -github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= -github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gormigrate/gormigrate/v2 v2.1.2 h1:F/d1hpHbRAvKezziV2CC5KUE82cVe9zTgHSBoOOZ4CY= -github.com/go-gormigrate/gormigrate/v2 v2.1.2/go.mod h1:9nHVX6z3FCMCQPA7PThGcA55t22yKQfK/Dnsf5i7hUo= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= -github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= -github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= -github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= -github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= -github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-pkcs11 v0.2.0/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= -github.com/google/go-pkcs11 v0.2.1-0.20230907215043-c6f79328ddf9/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM= -github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= -github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= -github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= -github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= -github.com/googleapis/enterprise-certificate-proxy v0.2.4/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= -github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= -github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= -github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= -github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= -github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= -github.com/googleapis/gax-go/v2 v2.10.0/go.mod h1:4UOEnMCrxsSqQ940WnTiD6qJ63le2ev3xfyagutxiPw= -github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= -github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= -github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/googleapis/google-cloud-go-testing v0.0.0-20210719221736-1c9a4c676720/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= -github.com/gotd/contrib v0.19.0 h1:O6GvMrRVeFslIHLUcpaHVzcl9/5PcgR2jQTIIeTyds0= -github.com/gotd/contrib v0.19.0/go.mod h1:LzPxzRF0FvtpBt/WyODWQnPpk0tm/G9z6RHUoPqMakU= +github.com/gorilla/websocket v1.5.2 h1:qoW6V1GT3aZxybsbC6oLnailWnB+qTMVwMreOso9XUw= +github.com/gorilla/websocket v1.5.2/go.mod h1:0n9H61RBAcf5/38py2MCYbxzPIY9rOkpvvMT24Rqs30= +github.com/gotd/contrib v0.20.0 h1:1Wc4+HMQiIKYQuGHVwVksIx152HFTP6B5n88dDe0ZYw= +github.com/gotd/contrib v0.20.0/go.mod h1:P6o8W4niqhDPHLA0U+SA/L7l3BQHYLULpeHfRSePn9o= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/hanwen/go-fuse v1.0.0/go.mod h1:unqXarDXqzAk0rt98O2tVndEPIpUgLD9+rwFisZH3Ok= github.com/hanwen/go-fuse/v2 v2.1.0/go.mod h1:oRyA5eK+pvJyv5otpO/DgccS8y/RvYMaO00GgRLGryc= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v1.2.0 h1:La19f8d7WIlm4ogzNHB0JGqs5AUDAZ2UfCY4sJXcJdM= -github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M= -github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf/go.mod h1:hyb9oH7vZsitZCiBt0ZvifOrB+qc8PS5IiilCIb87rg= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= @@ -1425,37 +95,24 @@ github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHW github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= -github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI= -github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= +github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid v1.2.2/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= -github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= +github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/klauspost/reedsolomon v1.9.3/go.mod h1:CwCi+NUr9pqSVktrkN+Ondf06rkhYZ/pcNv7fu+8Un4= github.com/klauspost/reedsolomon v1.12.1 h1:NhWgum1efX1x58daOBGCFWcxtEhOhXKKl1HAPQUp03Q= github.com/klauspost/reedsolomon v1.12.1/go.mod h1:nEi5Kjb6QqtbofI6s+cbG/j1da11c96IBYBSnVGtuBs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -1463,99 +120,57 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= -github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= -github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= -github.com/lyft/protoc-gen-star/v2 v2.0.3/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= -github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.69 h1:l8AnsQFyY1xiwa/DaQskY4NXSLA2yrGsW5iD9nRPVS0= -github.com/minio/minio-go/v7 v7.0.69/go.mod h1:XAvOPJQ5Xlzk5o3o/ArO2NMbhSGkimC+bpW/ngRKDmQ= -github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= -github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= +github.com/minio/minio-go/v7 v7.0.72 h1:ZSbxs2BfJensLyHdVOgHv+pfmvxYraaUy07ER04dWnA= +github.com/minio/minio-go/v7 v7.0.72/go.mod h1:4yBA8v80xGA30cfM3fz0DKYMXunWl/AV/6tWEs9ryzo= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE= github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= -github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= -github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= -github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= -github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8= github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8= -github.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500/go.mod h1:+njLrG5wSeoG4Ds61rFgEzKvenR2UHbjMoDHsczxly0= github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df h1:S77Pf5fIGMa7oSwp8SQPp7Hb4ZiI38K3RNBKD2LLeEM= github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df/go.mod h1:dcuzJZ83w/SqN9k4eQqwKYMgmKWzg/KzJAURBhRL1tc= -github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= -github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= -github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -1563,19 +178,9 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -1583,15 +188,7 @@ github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGr github.com/vbauerster/mpb/v5 v5.0.3/go.mod h1:h3YxU5CSr8rZP4Q3xZPVB3jJLhWPou63lHEdr9ytH4Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= -github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= gitlab.com/NebulousLabs/bolt v1.4.4 h1:3UhpR2qtHs87dJBE3CIzhw48GYSoUUNByJmic0cbu1w= gitlab.com/NebulousLabs/bolt v1.4.4/go.mod h1:ZL02cwhpLNif6aruxvUMqu/Bdy0/lFY21jMFfNAA+O8= gitlab.com/NebulousLabs/demotemutex v0.0.0-20151003192217-235395f71c40 h1:IbucNi8u1a1ErgVFVgg8pERhSyzYe5l+o8krDMnNjWA= @@ -1628,37 +225,26 @@ gitlab.com/NebulousLabs/threadgroup v0.0.0-20200608151952-38921fbef213 h1:owERlK gitlab.com/NebulousLabs/threadgroup v0.0.0-20200608151952-38921fbef213/go.mod h1:vIutAvl7lmJqLVYTCBY5WDdJomP+V74At8LCeEYoH8w= gitlab.com/NebulousLabs/writeaheadlog v0.0.0-20200618142844-c59a90f49130/go.mod h1:SxigdS5Q1ui+OMgGAXt1E/Fg3RB6PvKXMov2O3gvIzs= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE= -go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.sia.tech/core v0.2.2 h1:33RJrt08o7KyUOY4tITH6ECmRq1lhtapqc/SncIF/2A= -go.sia.tech/core v0.2.2/go.mod h1:Zk7HaybEPgkPC1p6e6tTQr8PIeZClTgNcLNGYDLQJeE= -go.sia.tech/coreutils v0.0.3 h1:ZxuzovRpQMvfy/pCOV4om1cPF6sE15GyJyK36kIrF1Y= -go.sia.tech/coreutils v0.0.3/go.mod h1:UBFc77wXiE//eyilO5HLOncIEj7F69j0Nv2OkFujtP0= -go.sia.tech/gofakes3 v0.0.2 h1:oWnsYjHvSyf4ddtEH6XO76xeXC10N2cRqduI6B0d/EU= -go.sia.tech/gofakes3 v0.0.2/go.mod h1:+NLzpsL6M0WJvdRGL3q7SWo9O1DdBaBrPGm++Ue9WHo= -go.sia.tech/hostd v1.0.4 h1:rFzuNJ7sSFQfdrTHKSNYyMX+wlHyei/vZcVbXmrUl6I= -go.sia.tech/hostd v1.0.4/go.mod h1:s1W4/Okfcs2rGM3sC7xL95HY+I/oJ0Dsix3zTER+hpQ= -go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640 h1:mSaJ622P7T/M97dAK8iPV+IRIC9M5vV28NHeceoWO3M= -go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640/go.mod h1:4QqmBB+t3W7cNplXPj++ZqpoUb2PeiS66RLpXmEGap4= +go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0= +go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ= +go.sia.tech/core v0.3.0 h1:PDfAQh9z8PYD+oeVS7rS9SEnTMOZzwwFfAH45yktmko= +go.sia.tech/core v0.3.0/go.mod h1:BMgT/reXtgv6XbDgUYTCPY7wSMbspDRDs7KMi1vL6Iw= +go.sia.tech/coreutils v0.1.0 h1:WQL7iT+jK1BiMx87bASXrZJZf4N2fbQkIOW8rS7wkh4= +go.sia.tech/coreutils v0.1.0/go.mod h1:ybaFgewKXrlxFW71LqsyQlxjG6yWL6BSePrbZYnrprU= +go.sia.tech/gofakes3 v0.0.4 h1:Kvo8j5cVdJRBXvV1KBJ69bocY23twG8ao/HCdwuPMeI= +go.sia.tech/gofakes3 v0.0.4/go.mod h1:6hh4lETCMbyFFNWp3FRE838geY6vh1Aeas7LtYDpQdc= +go.sia.tech/hostd v1.1.1-beta.1.0.20240618072747-b3f430b4d272 h1:RJmZ1Y9PoqpHjYHT5nr6Vmo6tTUpB2AIyd8zFge2JAs= +go.sia.tech/hostd v1.1.1-beta.1.0.20240618072747-b3f430b4d272/go.mod h1:bM0ldLiCPAQenZcczN5I6Iw43iNcCTQqK3aLZlAQ/rc= +go.sia.tech/jape v0.11.2-0.20240306154058-9832414a5385 h1:Gho1g6pkv56o6Ut9cez/Yu5o4xlA8WNkDbPn6RWXL7g= +go.sia.tech/jape v0.11.2-0.20240306154058-9832414a5385/go.mod h1:wU+h6Wh5olDjkPXjF0tbZ1GDgoZ6VTi4naFw91yyWC4= go.sia.tech/mux v1.2.0 h1:ofa1Us9mdymBbGMY2XH/lSpY8itFsKIo/Aq8zwe+GHU= go.sia.tech/mux v1.2.0/go.mod h1:Yyo6wZelOYTyvrHmJZ6aQfRoer3o4xyKQ4NmQLJrBSo= go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca h1:aZMg2AKevn7jKx+wlusWQfwSM5pNU9aGtRZme29q3O4= go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca/go.mod h1:h/1afFwpxzff6/gG5i1XdAgPK7dEY6FaibhK7N5F86Y= -go.sia.tech/web v0.0.0-20240422221546-c1709d16b6ef h1:X0Xm9AQYHhdd85yi9gqkkCZMb9/WtLwC0nDgv65N90Y= -go.sia.tech/web v0.0.0-20240422221546-c1709d16b6ef/go.mod h1:nGEhGmI8zV/BcC3LOCC5JLVYpidNYJIvLGIqVRWQBCg= -go.sia.tech/web/renterd v0.51.2 h1:+TioJYm4eo5MnMbtDDCUGgGmUkOe+mIk1cbl/9EkAYo= -go.sia.tech/web/renterd v0.51.2/go.mod h1:SWwKoAJvLxiHjTXsNPKX3RLiQzJb/vxwcpku3F78MO8= +go.sia.tech/web v0.0.0-20240610131903-5611d44a533e h1:oKDz6rUExM4a4o6n/EXDppsEka2y/+/PgFOZmHWQRSI= +go.sia.tech/web v0.0.0-20240610131903-5611d44a533e/go.mod h1:4nyDlycPKxTlCqvOeRO0wUfXxyzWCEE7+2BRrdNqvWk= +go.sia.tech/web/renterd v0.55.0 h1:xjHF0TudolsrQbguNR6+J/OPeXVf+ekodVtLB3y/dyU= +go.sia.tech/web/renterd v0.55.0/go.mod h1:SWwKoAJvLxiHjTXsNPKX3RLiQzJb/vxwcpku3F78MO8= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= @@ -1675,229 +261,45 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191105034135-c7e5f84aec59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200109152110-61a87790db17/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= -golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= -golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= -golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= -golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= -golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= -golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= -golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= -golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= -golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= -golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= +golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= -golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= -golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= -golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= -golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= -golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= -golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= -golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= -golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= -golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= -golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= -golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM= -golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1905,576 +307,59 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210421210424-b80969c67360/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= -golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= -golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= -golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190829051458-42f498d34c4d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= -golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= -golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM= -golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= -golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY= -golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= +golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= -gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= -gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= -gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= -google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= -google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= -google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= -google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= -google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= -google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= -google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= -google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= -google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= -google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= -google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= -google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= -google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= -google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= -google.golang.org/api v0.118.0/go.mod h1:76TtD3vkgmZ66zZzp72bUUklpmQmKlhh6sYtIjYK+5E= -google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZOyms= -google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4= -google.golang.org/api v0.125.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= -google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= -google.golang.org/api v0.128.0/go.mod h1:Y611qgqaE92On/7g65MQgxYul3c0rEB894kniWLY750= -google.golang.org/api v0.139.0/go.mod h1:CVagp6Eekz9CjGZ718Z+sloknzkDJE7Vc1Ckj9+viBk= -google.golang.org/api v0.149.0/go.mod h1:Mwn1B7JTXrzXtnvmzQE2BD6bYZQ8DShKZDZbeN9I7qI= -google.golang.org/api v0.150.0/go.mod h1:ccy+MJ6nrYFgE3WgRx/AMXOxOmU8Q4hSa+jjibzhxcg= -google.golang.org/api v0.152.0/go.mod h1:3qNJX5eOmhiWYc67jRA/3GsDw97UFb5ivv7Y2PrriAY= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= -google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= -google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= -google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= -google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= -google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= -google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= -google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= -google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= -google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= -google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= -google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= -google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= -google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= -google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= -google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= -google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= -google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= -google.golang.org/genproto v0.0.0-20230629202037-9506855d4529/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= -google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51iE/nOGvQaDUuadVYqovW56s5emA88lQnj6Y= -google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98/go.mod h1:S7mY02OqCJTD0E1OiQy1F72PWFB4bZJ87cAtLPYgDR0= -google.golang.org/genproto v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:0ggbjUrZYpy1q+ANUS30SEoGZ53cdfwtbuG7Ptgy108= -google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= -google.golang.org/genproto v0.0.0-20230821184602-ccc8af3d0e93/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= -google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= -google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= -google.golang.org/genproto v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:CCviP9RmpZ1mxVr8MUjCnSiY09IbAXZxhLE6EhHIdPU= -google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97/go.mod h1:t1VqOqqvce95G3hIDCT5FeO3YUc6Q4Oe24L/+rNMxRk= -google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:EMfReVxb80Dq1hhioy0sOsY9jCE46YDgHlJ7fWVUWRE= -google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:CgAqfJo+Xmu0GwA0411Ht3OU3OntXwsGmrmjI8ioGXI= -google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405/go.mod h1:3WDQMjmJk36UQhjQ89emUzb1mdaHcPeeAh4SCBKznB4= -google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/api v0.0.0-20230629202037-9506855d4529/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:mPBs5jNgx2GuQGvFwUvVKqtn6HsUw9nP64BedgvqEsQ= -google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= -google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= -google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q= -google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= -google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= -google.golang.org/genproto/googleapis/api v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:RdyHbowztCGQySiCvQPgWQWgWhGnouTdCflKoDBt32U= -google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97/go.mod h1:iargEX0SFPm3xcfMI0d1domjg0ZF4Aa0p2awqyxhvF0= -google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:SUBoKXbI1Efip18FClrQVGjWcyd0QZd8KkvdP34t7ww= -google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870= -google.golang.org/genproto/googleapis/api v0.0.0-20231030173426-d783a09b4405/go.mod h1:oT32Z4o8Zv2xPQTg0pbVaPr0MPOH6f14RgXt7zfIpwg= -google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4= -google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= -google.golang.org/genproto/googleapis/bytestream v0.0.0-20230807174057-1744710a1577/go.mod h1:NjCQG/D8JandXxM57PZbAJL1DCNL6EypA0vPPwfsc7c= -google.golang.org/genproto/googleapis/bytestream v0.0.0-20231030173426-d783a09b4405/go.mod h1:GRUCuLdzVqZte8+Dl/D4N25yLzcGqqWaYkeVOwulFqw= -google.golang.org/genproto/googleapis/bytestream v0.0.0-20231120223509-83a465c0220f/go.mod h1:iIgEblxoG4klcXsG0d9cpoxJ4xndv6+1FkDROCHhPRI= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230629202037-9506855d4529/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:8mL13HKkDa+IuJ8yruA3ci0q+0vsUz4m//+ottjwS5o= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230731190214-cbb8c96f2d6d/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230803162519-f966b187b2e5/go.mod h1:zBEcrKX2ZOcEkHWxBPAIvYUWOKKMIhYcmNiUIu2ji3I= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230920183334-c177e329c48b/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:swOH3j0KzcDDgGUWr+SNpyTen5YrXjS3eyPzFYKc6lc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405/go.mod h1:67X1fPuzjcrkymZzZV1vvkFeTn2Rvc6lYF9MYFGCcwE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= -google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= -google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= -google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= -google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= -google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= -google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= -google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= -google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= -google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= -google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= @@ -2482,80 +367,18 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gorm.io/driver/mysql v1.5.6 h1:Ld4mkIickM+EliaQZQx3uOJDJHtrd70MxAUqWqlx3Y8= -gorm.io/driver/mysql v1.5.6/go.mod h1:sEtPWMiqiN1N1cMXoXmBbd8C6/l+TESwriotuRRpkDM= -gorm.io/driver/sqlite v1.5.5 h1:7MDMtUZhV065SilG62E0MquljeArQZNfJnjd9i9gx3E= -gorm.io/driver/sqlite v1.5.5/go.mod h1:6NgQ7sQWAIFsPrJJl1lSNSu2TABh0ZZ/zm5fosATavE= +gorm.io/driver/mysql v1.5.7 h1:MndhOPYOfEp2rHKgkZIhJ16eVUIRf2HmzgoPmh7FCWo= +gorm.io/driver/mysql v1.5.7/go.mod h1:sEtPWMiqiN1N1cMXoXmBbd8C6/l+TESwriotuRRpkDM= +gorm.io/driver/sqlite v1.5.6 h1:fO/X46qn5NUEEOZtnjJRWRzZMe8nqJiQ9E+0hi+hKQE= +gorm.io/driver/sqlite v1.5.6/go.mod h1:U+J8craQU6Fzkcvu8oLeAQmi50TkwPEhHDEjQZXDah4= gorm.io/gorm v1.23.6/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= -gorm.io/gorm v1.25.9 h1:wct0gxZIELDk8+ZqF/MVnHLkA1rvYlBWUMv2EdsK1g8= -gorm.io/gorm v1.25.9/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= +gorm.io/gorm v1.25.10 h1:dQpO+33KalOA+aFYGlK+EfxcI5MbO7EP2yYygwh9h+s= +gorm.io/gorm v1.25.10/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= lukechampine.com/frand v1.4.2 h1:RzFIpOvkMXuPMBb9maa4ND4wjBn71E1Jpf8BzJHMaVw= lukechampine.com/frand v1.4.2/go.mod h1:4S/TM2ZgrKejMcKMbeLjISpJMO+/eZ1zu3vYX9dtj3s= -lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= -lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= -modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= -modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= -modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= -modernc.org/cc/v3 v3.37.0/go.mod h1:vtL+3mdHx/wcj3iEGz84rQa8vEqR6XM84v5Lcvfph20= -modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0= -modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= -modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= -modernc.org/ccgo/v3 v3.0.0-20220904174949-82d86e1b6d56/go.mod h1:YSXjPL62P2AMSxBphRHPn7IkzhVHqkvOnRKAKh+W6ZI= -modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= -modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= -modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= -modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= -modernc.org/ccgo/v3 v3.16.13-0.20221017192402-261537637ce8/go.mod h1:fUB3Vn0nVPReA+7IG7yZDfjv1TMWjhQP8gCxrFAtL5g= -modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY= -modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= -modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= -modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= -modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= -modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= -modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= -modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= -modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= -modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= -modernc.org/libc v1.17.4/go.mod h1:WNg2ZH56rDEwdropAJeZPQkXmDwh+JCA1s/htl6r2fA= -modernc.org/libc v1.18.0/go.mod h1:vj6zehR5bfc98ipowQOM2nIDUZnVew/wNC/2tOGS+q0= -modernc.org/libc v1.20.3/go.mod h1:ZRfIaEkgrYgZDl6pa4W39HgN5G/yDW+NRmNKZBDFrk0= -modernc.org/libc v1.21.4/go.mod h1:przBsL5RDOZajTVslkugzLBj1evTue36jEomFQOoYuI= -modernc.org/libc v1.22.2/go.mod h1:uvQavJ1pZ0hIoC/jfqNoMLURIMhKzINIWypNM17puug= -modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= -modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= -modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= -modernc.org/memory v1.3.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= -modernc.org/memory v1.4.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= -modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= -modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= -modernc.org/sqlite v1.18.2/go.mod h1:kvrTLEWgxUcHa2GfHBQtanR1H9ht3hTJNtKpzH9k1u0= -modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= -modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= -modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= -modernc.org/tcl v1.13.2/go.mod h1:7CLiGIPo1M8Rv1Mitpv5akc2+8fxUd2y2UzC/MfMzy0= -modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= -modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= -modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= -modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= moul.io/zapgorm2 v1.3.0 h1:+CzUTMIcnafd0d/BvBce8T4uPn6DQnpIrz64cyixlkk= moul.io/zapgorm2 v1.3.0/go.mod h1:nPVy6U9goFKHR4s+zfSo1xVFaoU7Qgd5DoCdOfzoCqs= -nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q= -nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +nhooyr.io/websocket v1.8.11 h1:f/qXNc2/3DpoSZkHt1DQu6rj4zGC8JmkkLkWss0MgN0= +nhooyr.io/websocket v1.8.11/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= diff --git a/internal/bus/forex.go b/internal/bus/forex.go new file mode 100644 index 000000000..b6544b911 --- /dev/null +++ b/internal/bus/forex.go @@ -0,0 +1,51 @@ +package bus + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" +) + +type ( + client struct { + url string + } +) + +func NewForexClient(url string) *client { + return &client{url: url} +} + +func (f *client) SiacoinExchangeRate(ctx context.Context, currency string) (rate float64, err error) { + // create request + req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("%s/%s", f.url, currency), http.NoBody) + if err != nil { + return 0, fmt.Errorf("failed to create request: %w", err) + } + req.Header.Set("Accept", "application/json") + + // create http client + resp, err := http.DefaultClient.Do(req) + if err != nil { + return 0, fmt.Errorf("failed to send request: %w", err) + } + defer resp.Body.Close() + + // check status code + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + var errorMessage string + if err := json.NewDecoder(io.LimitReader(resp.Body, 1024)).Decode(&errorMessage); err != nil { + return 0, fmt.Errorf("unexpected status code: %d", resp.StatusCode) + } + return 0, errors.New(errorMessage) + } + + // decode exchange rate + if err := json.NewDecoder(resp.Body).Decode(&rate); err != nil { + return 0, fmt.Errorf("failed to decode response: %w", err) + } + return +} diff --git a/internal/bus/pinmanager.go b/internal/bus/pinmanager.go new file mode 100644 index 000000000..02e4df79b --- /dev/null +++ b/internal/bus/pinmanager.go @@ -0,0 +1,438 @@ +package bus + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "sync" + "time" + + "github.com/montanaflynn/stats" + "github.com/shopspring/decimal" + "go.sia.tech/core/types" + "go.sia.tech/renterd/api" + "go.sia.tech/renterd/webhooks" + "go.uber.org/zap" +) + +type ( + // An AutopilotStore stores autopilots. + AutopilotStore interface { + Autopilot(ctx context.Context, id string) (api.Autopilot, error) + UpdateAutopilot(ctx context.Context, ap api.Autopilot) error + } + + // PinManager is a service that manages price pinning. + PinManager interface { + Close(context.Context) error + Run(context.Context) error + TriggerUpdate() + } + + // A SettingStore stores settings. + SettingStore interface { + Setting(ctx context.Context, key string) (string, error) + UpdateSetting(ctx context.Context, key, value string) error + } +) + +type ( + pinManager struct { + as AutopilotStore + ss SettingStore + broadcaster webhooks.Broadcaster + + updateInterval time.Duration + rateWindow time.Duration + + triggerChan chan struct{} + closedChan chan struct{} + wg sync.WaitGroup + + logger *zap.SugaredLogger + + mu sync.Mutex + rates []float64 + ratesCurrency string + } +) + +func NewPinManager(broadcaster webhooks.Broadcaster, as AutopilotStore, ss SettingStore, updateInterval, rateWindow time.Duration, l *zap.Logger) *pinManager { + return &pinManager{ + as: as, + ss: ss, + broadcaster: broadcaster, + + logger: l.Sugar().Named("pricemanager"), + + updateInterval: updateInterval, + rateWindow: rateWindow, + + triggerChan: make(chan struct{}, 1), + closedChan: make(chan struct{}), + } +} + +func (pm *pinManager) Close(ctx context.Context) error { + close(pm.closedChan) + + doneChan := make(chan struct{}) + go func() { + pm.wg.Wait() + close(doneChan) + }() + + select { + case <-doneChan: + return nil + case <-ctx.Done(): + return context.Cause(ctx) + } +} + +func (pm *pinManager) Run(ctx context.Context) error { + // try to update prices + if err := pm.updatePrices(ctx, true); err != nil { + return err + } + + // start the update loop + pm.wg.Add(1) + go func() { + defer pm.wg.Done() + + t := time.NewTicker(pm.updateInterval) + defer t.Stop() + + var forced bool + for { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + err := pm.updatePrices(ctx, forced) + if err != nil { + pm.logger.Warn("failed to update prices", zap.Error(err)) + } + cancel() + + forced = false + select { + case <-pm.closedChan: + return + case <-pm.triggerChan: + forced = true + case <-t.C: + } + } + }() + + return nil +} + +func (pm *pinManager) TriggerUpdate() { + select { + case pm.triggerChan <- struct{}{}: + default: + } +} + +func (pm *pinManager) averageRate() decimal.Decimal { + pm.mu.Lock() + defer pm.mu.Unlock() + + median, _ := stats.Median(pm.rates) + return decimal.NewFromFloat(median) +} + +func (pm *pinManager) pinnedSettings(ctx context.Context) (api.PricePinSettings, error) { + var ps api.PricePinSettings + if pss, err := pm.ss.Setting(ctx, api.SettingPricePinning); err != nil { + return api.PricePinSettings{}, err + } else if err := json.Unmarshal([]byte(pss), &ps); err != nil { + pm.logger.Panicf("failed to unmarshal pinned settings '%s': %v", pss, err) + } + return ps, nil +} + +func (pm *pinManager) rateExceedsThreshold(threshold float64) bool { + pm.mu.Lock() + defer pm.mu.Unlock() + + // calculate mean + mean, err := stats.Mean(pm.rates) + if err != nil { + pm.logger.Warnw("failed to calculate average rate", zap.Error(err)) + return false + } + + // convert to decimals + avg := decimal.NewFromFloat(mean) + pct := decimal.NewFromFloat(threshold) + cur := decimal.NewFromFloat(pm.rates[len(pm.rates)-1]) + + // calculate whether the current rate exceeds the given threshold + delta := cur.Sub(avg).Abs() + exceeded := delta.GreaterThan(cur.Mul(pct)) + + // log the result + pm.logger.Debugw("rate exceeds threshold", + "last", cur, + "average", avg, + "percentage", threshold, + "delta", delta, + "threshold", cur.Mul(pct), + "exceeded", exceeded, + ) + return exceeded +} + +func (pm *pinManager) updateAutopilotSettings(ctx context.Context, autopilotID string, pins api.AutopilotPins, rate decimal.Decimal) error { + var updated bool + + ap, err := pm.as.Autopilot(ctx, autopilotID) + if err != nil { + return err + } + + // update allowance + if pins.Allowance.IsPinned() { + update, err := convertCurrencyToSC(decimal.NewFromFloat(pins.Allowance.Value), rate) + if err != nil { + pm.logger.Warnw("failed to convert allowance to currency", zap.Error(err)) + } else { + bkp := ap.Config.Contracts.Allowance + ap.Config.Contracts.Allowance = update + if err := ap.Config.Validate(); err != nil { + pm.logger.Warnw("failed to update autopilot setting, new allowance makes the setting invalid", zap.Error(err)) + ap.Config.Contracts.Allowance = bkp + } else { + pm.logger.Infow("updating autopilot allowance", "old", bkp, "new", ap.Config.Contracts.Allowance, "rate", rate, "autopilot", autopilotID) + updated = true + } + } + } + + // return early if no updates took place + if !updated { + pm.logger.Infow("autopilots did not require price update", "rate", rate) + return nil + } + + // validate config + err = ap.Config.Validate() + if err != nil { + pm.logger.Warnw("failed to update autopilot setting, new settings make the setting invalid", zap.Error(err)) + return err + } + + // update autopilto + return pm.as.UpdateAutopilot(ctx, ap) +} + +func (pm *pinManager) updateExchangeRates(currency string, rate float64) error { + pm.mu.Lock() + defer pm.mu.Unlock() + + // update last currency + if pm.ratesCurrency != currency { + pm.ratesCurrency = currency + pm.rates = nil + } + + // update last rate + pm.rates = append(pm.rates, rate) + if len(pm.rates) >= int(pm.rateWindow/pm.updateInterval) { + pm.rates = pm.rates[1:] + } + + return nil +} + +func (pm *pinManager) updateGougingSettings(ctx context.Context, pins api.GougingSettingsPins, rate decimal.Decimal) error { + var updated bool + + // fetch gouging settings + var gs api.GougingSettings + if gss, err := pm.ss.Setting(ctx, api.SettingGouging); err != nil { + return err + } else if err := json.Unmarshal([]byte(gss), &gs); err != nil { + pm.logger.Panicf("failed to unmarshal gouging settings '%s': %v", gss, err) + return err + } + + // update max download price + if pins.MaxDownload.IsPinned() { + update, err := convertCurrencyToSC(decimal.NewFromFloat(pins.MaxDownload.Value), rate) + if err != nil { + pm.logger.Warn("failed to convert max download price to currency") + } else if !gs.MaxDownloadPrice.Equals(update) { + bkp := gs.MaxDownloadPrice + gs.MaxDownloadPrice = update + if err := gs.Validate(); err != nil { + pm.logger.Warn("failed to update gouging setting, new download price makes the setting invalid", zap.Error(err)) + gs.MaxDownloadPrice = bkp + } else { + pm.logger.Infow("updating max download price", "old", bkp, "new", gs.MaxDownloadPrice, "rate", rate) + updated = true + } + } + } + + // update max RPC price + if pins.MaxRPCPrice.IsPinned() { + update, err := convertCurrencyToSC(decimal.NewFromFloat(pins.MaxRPCPrice.Value), rate) + if err != nil { + pm.logger.Warnw("failed to convert max RPC price to currency", zap.Error(err)) + } else if !gs.MaxRPCPrice.Equals(update) { + bkp := gs.MaxRPCPrice + gs.MaxRPCPrice = update + if err := gs.Validate(); err != nil { + pm.logger.Warnw("failed to update gouging setting, new RPC price makes the setting invalid", zap.Error(err)) + gs.MaxRPCPrice = bkp + } else { + pm.logger.Infow("updating max RPC price", "old", bkp, "new", gs.MaxRPCPrice, "rate", rate) + updated = true + } + } + } + + // update max storage price + if pins.MaxStorage.IsPinned() { + update, err := convertCurrencyToSC(decimal.NewFromFloat(pins.MaxStorage.Value), rate) + if err != nil { + pm.logger.Warnw("failed to convert max storage price to currency", zap.Error(err)) + } else if !gs.MaxStoragePrice.Equals(update) { + bkp := gs.MaxStoragePrice + gs.MaxStoragePrice = update + if err := gs.Validate(); err != nil { + pm.logger.Warnw("failed to update gouging setting, new storage price makes the setting invalid", zap.Error(err)) + gs.MaxStoragePrice = bkp + } else { + pm.logger.Infow("updating max storage price", "old", bkp, "new", gs.MaxStoragePrice, "rate", rate) + updated = true + } + } + } + + // update max upload price + if pins.MaxUpload.IsPinned() { + update, err := convertCurrencyToSC(decimal.NewFromFloat(pins.MaxUpload.Value), rate) + if err != nil { + pm.logger.Warnw("failed to convert max upload price to currency", zap.Error(err)) + } else if !gs.MaxUploadPrice.Equals(update) { + bkp := gs.MaxUploadPrice + gs.MaxUploadPrice = update + if err := gs.Validate(); err != nil { + pm.logger.Warnw("failed to update gouging setting, new upload price makes the setting invalid", zap.Error(err)) + gs.MaxUploadPrice = bkp + } else { + pm.logger.Infow("updating max upload price", "old", bkp, "new", gs.MaxUploadPrice, "rate", rate) + updated = true + } + } + } + + // return early if no updates took place + if !updated { + pm.logger.Infow("gouging prices did not require price update", "rate", rate) + return nil + } + + // validate settings + err := gs.Validate() + if err != nil { + pm.logger.Warnw("failed to update gouging setting, new settings make the setting invalid", zap.Error(err)) + return err + } + + // update settings + bytes, _ := json.Marshal(gs) + err = pm.ss.UpdateSetting(ctx, api.SettingGouging, string(bytes)) + + // broadcast event + if err == nil { + pm.broadcaster.BroadcastAction(ctx, webhooks.Event{ + Module: api.ModuleSetting, + Event: api.EventUpdate, + Payload: api.EventSettingUpdate{ + Key: api.SettingGouging, + Update: string(bytes), + Timestamp: time.Now().UTC(), + }, + }) + } + + return err +} + +func (pm *pinManager) updatePrices(ctx context.Context, forced bool) error { + pm.logger.Debugw("updating prices", zap.Bool("forced", forced)) + + // fetch pinned settings + settings, err := pm.pinnedSettings(ctx) + if errors.Is(err, api.ErrSettingNotFound) { + pm.logger.Debug("price pinning not configured, skipping price update") + return nil + } else if err != nil { + return fmt.Errorf("failed to fetch pinned settings: %w", err) + } else if !settings.Enabled { + pm.logger.Debug("price pinning is disabled, skipping price update") + return nil + } + + // fetch exchange rate + rate, err := NewForexClient(settings.ForexEndpointURL).SiacoinExchangeRate(ctx, settings.Currency) + if err != nil { + return fmt.Errorf("failed to fetch exchange rate for '%s': %w", settings.Currency, err) + } else if rate <= 0 { + return fmt.Errorf("exchange rate for '%s' must be positive: %f", settings.Currency, rate) + } + + // update exchange rates + err = pm.updateExchangeRates(settings.Currency, rate) + if err != nil { + return err + } + + // return early if the rate does not exceed the threshold + if !forced && !pm.rateExceedsThreshold(settings.Threshold) { + pm.logger.Debug( + "rate does not exceed threshold, skipping price update", + zap.Stringer("threshold", decimal.NewFromFloat(settings.Threshold)), + zap.Stringer("rate", decimal.NewFromFloat(rate)), + ) + return nil + } + + // update gouging settings + update := pm.averageRate() + err = pm.updateGougingSettings(ctx, settings.GougingSettingsPins, update) + if err != nil { + pm.logger.Warnw("failed to update gouging settings", zap.Error(err)) + } + + // update autopilot settings + for ap, pins := range settings.Autopilots { + err = pm.updateAutopilotSettings(ctx, ap, pins, update) + if err != nil { + pm.logger.Warnw("failed to update autopilot settings", zap.String("autopilot", ap), zap.Error(err)) + } + } + + return nil +} + +// convertCurrencyToSC converts a value in an external currency and an exchange +// rate to Siacoins. +func convertCurrencyToSC(target decimal.Decimal, rate decimal.Decimal) (types.Currency, error) { + if rate.IsZero() { + return types.Currency{}, nil + } + + i := target.Div(rate).Mul(decimal.New(1, 24)).BigInt() + if i.Sign() < 0 { + return types.Currency{}, errors.New("negative currency") + } else if i.BitLen() > 128 { + return types.Currency{}, errors.New("currency overflow") + } + return types.NewCurrency(i.Uint64(), i.Rsh(i, 64).Uint64()), nil +} diff --git a/internal/bus/pinmanager_test.go b/internal/bus/pinmanager_test.go new file mode 100644 index 000000000..a2af6e137 --- /dev/null +++ b/internal/bus/pinmanager_test.go @@ -0,0 +1,291 @@ +package bus + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "net/http/httptest" + "reflect" + "sync" + "testing" + "time" + + "github.com/shopspring/decimal" + "go.sia.tech/core/types" + "go.sia.tech/hostd/host/settings/pin" + "go.sia.tech/renterd/api" + "go.sia.tech/renterd/build" + "go.sia.tech/renterd/webhooks" + "go.uber.org/zap" +) + +const ( + testAutopilotID = "default" + testUpdateInterval = 100 * time.Millisecond +) + +type mockBroadcaster struct { + events []webhooks.Event +} + +func (meb *mockBroadcaster) BroadcastAction(ctx context.Context, e webhooks.Event) error { + meb.events = append(meb.events, e) + return nil +} + +type mockForexAPI struct { + s *httptest.Server + + mu sync.Mutex + rate float64 +} + +func newTestForexAPI() *mockForexAPI { + api := &mockForexAPI{rate: 1} + api.s = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + api.mu.Lock() + defer api.mu.Unlock() + json.NewEncoder(w).Encode(api.rate) + })) + return api +} + +func (api *mockForexAPI) Close() { + api.s.Close() +} + +func (api *mockForexAPI) updateRate(rate float64) { + api.mu.Lock() + defer api.mu.Unlock() + api.rate = rate +} + +type mockStore struct { + mu sync.Mutex + settings map[string]string + autopilots map[string]api.Autopilot +} + +func newTestStore() *mockStore { + s := &mockStore{ + autopilots: make(map[string]api.Autopilot), + settings: make(map[string]string), + } + + // add default price pin - and gouging settings + b, _ := json.Marshal(build.DefaultPricePinSettings) + s.settings[api.SettingPricePinning] = string(b) + b, _ = json.Marshal(build.DefaultGougingSettings) + s.settings[api.SettingGouging] = string(b) + + // add default autopilot + s.autopilots[testAutopilotID] = api.Autopilot{ + ID: testAutopilotID, + Config: api.AutopilotConfig{ + Contracts: api.ContractsConfig{ + Allowance: types.Siacoins(1), + }, + }, + } + + return s +} + +func (ms *mockStore) gougingSettings() api.GougingSettings { + val, err := ms.Setting(context.Background(), api.SettingGouging) + if err != nil { + panic(err) + } + var gs api.GougingSettings + if err := json.Unmarshal([]byte(val), &gs); err != nil { + panic(err) + } + return gs +} + +func (ms *mockStore) updatPinnedSettings(pps api.PricePinSettings) { + b, _ := json.Marshal(pps) + ms.UpdateSetting(context.Background(), api.SettingPricePinning, string(b)) + time.Sleep(2 * testUpdateInterval) +} + +func (ms *mockStore) Setting(ctx context.Context, key string) (string, error) { + ms.mu.Lock() + defer ms.mu.Unlock() + return ms.settings[key], nil +} + +func (ms *mockStore) UpdateSetting(ctx context.Context, key, value string) error { + ms.mu.Lock() + defer ms.mu.Unlock() + ms.settings[key] = value + return nil +} + +func (ms *mockStore) Autopilot(ctx context.Context, id string) (api.Autopilot, error) { + ms.mu.Lock() + defer ms.mu.Unlock() + return ms.autopilots[id], nil +} + +func (ms *mockStore) UpdateAutopilot(ctx context.Context, autopilot api.Autopilot) error { + ms.mu.Lock() + defer ms.mu.Unlock() + ms.autopilots[autopilot.ID] = autopilot + return nil +} + +func TestPinManager(t *testing.T) { + // mock dependencies + ms := newTestStore() + eb := &mockBroadcaster{} + + // mock forex api + forex := newTestForexAPI() + defer forex.Close() + + // start a pinmanager + pm := NewPinManager(eb, ms, ms, testUpdateInterval, time.Minute, zap.NewNop()) + if err := pm.Run(context.Background()); err != nil { + t.Fatal(err) + } + defer func() { + if err := pm.Close(context.Background()); err != nil { + t.Fatal(err) + } + }() + + // define a small helper to fetch the price manager's rates + rates := func() []float64 { + t.Helper() + pm.mu.Lock() + defer pm.mu.Unlock() + return pm.rates + } + + // assert price manager is disabled by default + if cnt := len(rates()); cnt != 0 { + t.Fatalf("expected no rates, got %d", cnt) + } + + // enable price pinning + pps := build.DefaultPricePinSettings + pps.Enabled = true + pps.Currency = "usd" + pps.Threshold = 0.5 + pps.ForexEndpointURL = forex.s.URL + ms.updatPinnedSettings(pps) + + // assert price manager is running now + if cnt := len(rates()); cnt < 1 { + t.Fatal("expected at least one rate") + } + + // update exchange rate and fetch current gouging settings + forex.updateRate(2.5) + gs := ms.gougingSettings() + + // configure all pins but disable them for now + pps.GougingSettingsPins.MaxDownload = api.Pin{Value: 3, Pinned: false} + pps.GougingSettingsPins.MaxRPCPrice = api.Pin{Value: 3, Pinned: false} + pps.GougingSettingsPins.MaxStorage = api.Pin{Value: 3, Pinned: false} + pps.GougingSettingsPins.MaxUpload = api.Pin{Value: 3, Pinned: false} + ms.updatPinnedSettings(pps) + + // assert gouging settings are unchanged + if gss := ms.gougingSettings(); !reflect.DeepEqual(gs, gss) { + t.Fatalf("expected gouging settings to be the same, got %v", gss) + } + + // enable the max download pin, with the threshold at 0.5 it should remain unchanged + pps.GougingSettingsPins.MaxDownload.Pinned = true + ms.updatPinnedSettings(pps) + if gss := ms.gougingSettings(); !reflect.DeepEqual(gs, gss) { + t.Fatalf("expected gouging settings to be the same, got %v", gss) + } + + // lower the threshold, gouging settings should be updated + pps.Threshold = 0.05 + ms.updatPinnedSettings(pps) + if gss := ms.gougingSettings(); gss.MaxContractPrice.Equals(gs.MaxDownloadPrice) { + t.Fatalf("expected gouging settings to be updated, got %v = %v", gss.MaxDownloadPrice, gs.MaxDownloadPrice) + } + + // enable the rest of the pins + pps.GougingSettingsPins.MaxDownload.Pinned = true + pps.GougingSettingsPins.MaxRPCPrice.Pinned = true + pps.GougingSettingsPins.MaxStorage.Pinned = true + pps.GougingSettingsPins.MaxUpload.Pinned = true + ms.updatPinnedSettings(pps) + + // assert they're all updated + if gss := ms.gougingSettings(); gss.MaxDownloadPrice.Equals(gs.MaxDownloadPrice) || + gss.MaxRPCPrice.Equals(gs.MaxRPCPrice) || + gss.MaxStoragePrice.Equals(gs.MaxStoragePrice) || + gss.MaxUploadPrice.Equals(gs.MaxUploadPrice) { + t.Fatalf("expected gouging settings to be updated, got %v = %v", gss, gs) + } + + // increase rate so average isn't catching up to us + forex.updateRate(3) + + // fetch autopilot + ap, _ := ms.Autopilot(context.Background(), testAutopilotID) + + // add autopilot pin, but disable it + pins := api.AutopilotPins{ + Allowance: api.Pin{ + Pinned: false, + Value: 2, + }, + } + pps.Autopilots = map[string]api.AutopilotPins{testAutopilotID: pins} + ms.updatPinnedSettings(pps) + + // assert autopilot was not updated + if app, _ := ms.Autopilot(context.Background(), testAutopilotID); !app.Config.Contracts.Allowance.Equals(ap.Config.Contracts.Allowance) { + t.Fatalf("expected autopilot to not be updated, got %v = %v", app.Config.Contracts.Allowance, ap.Config.Contracts.Allowance) + } + + // enable the pin + pins.Allowance.Pinned = true + pps.Autopilots[testAutopilotID] = pins + ms.updatPinnedSettings(pps) + + // assert autopilot was updated + if app, _ := ms.Autopilot(context.Background(), testAutopilotID); app.Config.Contracts.Allowance.Equals(ap.Config.Contracts.Allowance) { + t.Fatalf("expected autopilot to be updated, got %v = %v", app.Config.Contracts.Allowance, ap.Config.Contracts.Allowance) + } +} + +// TestConvertConvertCurrencyToSC tests the conversion of a currency to Siacoins. +func TestConvertConvertCurrencyToSC(t *testing.T) { + tests := []struct { + target decimal.Decimal + rate decimal.Decimal + expected types.Currency + err error + }{ + {decimal.NewFromFloat(1), decimal.NewFromFloat(1), types.Siacoins(1), nil}, + {decimal.NewFromFloat(1), decimal.NewFromFloat(2), types.Siacoins(1).Div64(2), nil}, + {decimal.NewFromFloat(1), decimal.NewFromFloat(0.5), types.Siacoins(2), nil}, + {decimal.NewFromFloat(0.5), decimal.NewFromFloat(0.5), types.Siacoins(1), nil}, + {decimal.NewFromFloat(1), decimal.NewFromFloat(0.001), types.Siacoins(1000), nil}, + {decimal.NewFromFloat(1), decimal.NewFromFloat(0), types.Currency{}, nil}, + {decimal.NewFromFloat(1), decimal.NewFromFloat(-1), types.Currency{}, errors.New("negative currency")}, + {decimal.NewFromFloat(-1), decimal.NewFromFloat(1), types.Currency{}, errors.New("negative currency")}, + {decimal.New(1, 50), decimal.NewFromFloat(0.1), types.Currency{}, errors.New("currency overflow")}, + } + for i, test := range tests { + if result, err := pin.ConvertCurrencyToSC(test.target, test.rate); test.err != nil { + if err == nil { + t.Fatalf("%d: expected error, got nil", i) + } else if err.Error() != test.err.Error() { + t.Fatalf("%d: expected %v, got %v", i, test.err, err) + } + } else if !test.expected.Equals(result) { + t.Fatalf("%d: expected %d, got %d", i, test.expected, result) + } + } +} diff --git a/internal/node/chainmanager.go b/internal/node/chainmanager.go index d0f27bed3..6eaf91a53 100644 --- a/internal/node/chainmanager.go +++ b/internal/node/chainmanager.go @@ -9,6 +9,7 @@ import ( "go.sia.tech/core/consensus" "go.sia.tech/core/types" + "go.sia.tech/renterd/bus" "go.sia.tech/siad/modules" stypes "go.sia.tech/siad/types" ) @@ -24,26 +25,31 @@ var ( type chainManager struct { cs modules.ConsensusSet + tp bus.TransactionPool network *consensus.Network - close chan struct{} - mu sync.Mutex - tip consensus.State - synced bool + close chan struct{} + mu sync.Mutex + lastBlockTime time.Time + tip consensus.State + synced bool } // ProcessConsensusChange implements the modules.ConsensusSetSubscriber interface. func (m *chainManager) ProcessConsensusChange(cc modules.ConsensusChange) { m.mu.Lock() defer m.mu.Unlock() + + b := cc.AppliedBlocks[len(cc.AppliedBlocks)-1] m.tip = consensus.State{ Network: m.network, Index: types.ChainIndex{ - ID: types.BlockID(cc.AppliedBlocks[len(cc.AppliedBlocks)-1].ID()), + ID: types.BlockID(b.ID()), Height: uint64(cc.BlockHeight), }, } - m.synced = synced(cc.AppliedBlocks[len(cc.AppliedBlocks)-1].Timestamp) + m.synced = synced(b.Timestamp) + m.lastBlockTime = time.Unix(int64(b.Timestamp), 0) } // Network returns the network name. @@ -85,7 +91,9 @@ func (m *chainManager) BlockAtHeight(height uint64) (types.Block, bool) { } func (m *chainManager) LastBlockTime() time.Time { - return time.Unix(int64(m.cs.CurrentBlock().Timestamp), 0) + m.mu.Lock() + defer m.mu.Unlock() + return m.lastBlockTime } // IndexAtHeight return the chain index at the given height. @@ -125,12 +133,17 @@ func (m *chainManager) Subscribe(s modules.ConsensusSetSubscriber, ccID modules. return nil } +// PoolTransactions returns all transactions in the transaction pool +func (m *chainManager) PoolTransactions() []types.Transaction { + return m.tp.Transactions() +} + func synced(timestamp stypes.Timestamp) bool { return time.Since(time.Unix(int64(timestamp), 0)) <= maxSyncTime } // NewManager creates a new chain manager. -func NewChainManager(cs modules.ConsensusSet, network *consensus.Network) (*chainManager, error) { +func NewChainManager(cs modules.ConsensusSet, tp bus.TransactionPool, network *consensus.Network) (*chainManager, error) { height := cs.Height() block, ok := cs.BlockAtHeight(height) if !ok { @@ -139,6 +152,7 @@ func NewChainManager(cs modules.ConsensusSet, network *consensus.Network) (*chai m := &chainManager{ cs: cs, + tp: tp, network: network, tip: consensus.State{ Network: network, @@ -147,8 +161,9 @@ func NewChainManager(cs modules.ConsensusSet, network *consensus.Network) (*chai Height: uint64(height), }, }, - synced: synced(block.Timestamp), - close: make(chan struct{}), + synced: synced(block.Timestamp), + lastBlockTime: time.Unix(int64(block.Timestamp), 0), + close: make(chan struct{}), } if err := cs.ConsensusSetSubscribe(m, modules.ConsensusChangeRecent, m.close); err != nil { diff --git a/internal/node/node.go b/internal/node/node.go index 293363653..f305441cf 100644 --- a/internal/node/node.go +++ b/internal/node/node.go @@ -8,6 +8,7 @@ import ( "net/http" "os" "path/filepath" + "strings" "time" "go.sia.tech/core/consensus" @@ -17,6 +18,9 @@ import ( "go.sia.tech/renterd/bus" "go.sia.tech/renterd/config" "go.sia.tech/renterd/stores" + "go.sia.tech/renterd/stores/sql" + "go.sia.tech/renterd/stores/sql/mysql" + "go.sia.tech/renterd/stores/sql/sqlite" "go.sia.tech/renterd/wallet" "go.sia.tech/renterd/webhooks" "go.sia.tech/renterd/worker" @@ -30,6 +34,7 @@ import ( "golang.org/x/crypto/blake2b" "gorm.io/gorm" "gorm.io/gorm/logger" + "moul.io/zapgorm2" ) type Bus interface { @@ -39,12 +44,11 @@ type Bus interface { type BusConfig struct { config.Bus - Network *consensus.Network - Miner *Miner - DBLogger logger.Interface - DBDialector gorm.Dialector - DBMetricsDialector gorm.Dialector - SlabPruningInterval time.Duration + Database config.Database + DatabaseLog config.DatabaseLog + Network *consensus.Network + Logger *zap.Logger + Miner *Miner } type AutopilotConfig struct { @@ -53,28 +57,32 @@ type AutopilotConfig struct { } type ( - RunFn = func() error - ShutdownFn = func(context.Context) error + RunFn = func() error + BusSetupFn = func(context.Context) error + WorkerSetupFn = func(context.Context, string, string) error + ShutdownFn = func(context.Context) error ) -func NewBus(cfg BusConfig, dir string, seed types.PrivateKey, l *zap.Logger) (http.Handler, ShutdownFn, error) { +var NoopFn = func(context.Context) error { return nil } + +func NewBus(cfg BusConfig, dir string, seed types.PrivateKey, l *zap.Logger) (http.Handler, BusSetupFn, ShutdownFn, error) { gatewayDir := filepath.Join(dir, "gateway") if err := os.MkdirAll(gatewayDir, 0700); err != nil { - return nil, nil, err + return nil, nil, nil, err } g, err := gateway.New(cfg.GatewayAddr, cfg.Bootstrap, gatewayDir) if err != nil { - return nil, nil, err + return nil, nil, nil, err } consensusDir := filepath.Join(dir, "consensus") if err := os.MkdirAll(consensusDir, 0700); err != nil { - return nil, nil, err + return nil, nil, nil, err } cs, errCh := mconsensus.New(g, cfg.Bootstrap, consensusDir) select { case err := <-errCh: if err != nil { - return nil, nil, err + return nil, nil, nil, err } default: go func() { @@ -85,29 +93,65 @@ func NewBus(cfg BusConfig, dir string, seed types.PrivateKey, l *zap.Logger) (ht } tpoolDir := filepath.Join(dir, "transactionpool") if err := os.MkdirAll(tpoolDir, 0700); err != nil { - return nil, nil, err + return nil, nil, nil, err } tp, err := transactionpool.New(cs, g, tpoolDir) if err != nil { - return nil, nil, err + return nil, nil, nil, err } - // If no DB dialector was provided, use SQLite. - dbConn := cfg.DBDialector - if dbConn == nil { + // create database connections + var dbConn gorm.Dialector + var dbMetrics sql.MetricsDatabase + if cfg.Database.MySQL.URI != "" { + // create MySQL connections + dbConn = stores.NewMySQLConnection( + cfg.Database.MySQL.User, + cfg.Database.MySQL.Password, + cfg.Database.MySQL.URI, + cfg.Database.MySQL.Database, + ) + dbm, err := mysql.Open( + cfg.Database.MySQL.User, + cfg.Database.MySQL.Password, + cfg.Database.MySQL.URI, + cfg.Database.MySQL.MetricsDatabase, + ) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to open MySQL metrics database: %w", err) + } + dbMetrics, err = mysql.NewMetricsDatabase(dbm, l.Named("metrics").Sugar(), cfg.DatabaseLog.SlowThreshold, cfg.DatabaseLog.SlowThreshold) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to create MySQL metrics database: %w", err) + } + } else { + // create database directory dbDir := filepath.Join(dir, "db") if err := os.MkdirAll(dbDir, 0700); err != nil { - return nil, nil, err + return nil, nil, nil, err } + + // create SQLite connections dbConn = stores.NewSQLiteConnection(filepath.Join(dbDir, "db.sqlite")) - } - dbMetricsConn := cfg.DBMetricsDialector - if dbMetricsConn == nil { - dbDir := filepath.Join(dir, "db") - if err := os.MkdirAll(dbDir, 0700); err != nil { - return nil, nil, err + + dbm, err := sqlite.Open(filepath.Join(dbDir, "metrics.sqlite")) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to open SQLite metrics database: %w", err) + } + dbMetrics, err = sqlite.NewMetricsDatabase(dbm, l.Named("metrics").Sugar(), cfg.DatabaseLog.SlowThreshold, cfg.DatabaseLog.SlowThreshold) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to create SQLite metrics database: %w", err) } - dbMetricsConn = stores.NewSQLiteConnection(filepath.Join(dbDir, "metrics.sqlite")) + } + + // create database logger + dbLogger := zapgorm2.Logger{ + ZapLogger: cfg.Logger.Named("SQL"), + LogLevel: gormLogLevel(cfg.DatabaseLog), + SlowThreshold: cfg.DatabaseLog.SlowThreshold, + SkipCallerLookup: false, + IgnoreRecordNotFoundError: cfg.DatabaseLog.IgnoreRecordNotFoundError, + Context: nil, } alertsMgr := alerts.NewManager() @@ -116,8 +160,8 @@ func NewBus(cfg BusConfig, dir string, seed types.PrivateKey, l *zap.Logger) (ht announcementMaxAge := time.Duration(cfg.AnnouncementMaxAgeHours) * time.Hour sqlStore, ccid, err := stores.NewSQLStore(stores.Config{ Conn: dbConn, - ConnMetrics: dbMetricsConn, Alerts: alerts.WithOrigin(alertsMgr, "bus"), + DBMetrics: dbMetrics, PartialSlabDir: sqlStoreDir, Migrate: true, AnnouncementMaxAge: announcementMaxAge, @@ -125,15 +169,17 @@ func NewBus(cfg BusConfig, dir string, seed types.PrivateKey, l *zap.Logger) (ht WalletAddress: walletAddr, SlabBufferCompletionThreshold: cfg.SlabBufferCompletionThreshold, Logger: l.Sugar(), - GormLogger: cfg.DBLogger, + GormLogger: dbLogger, RetryTransactionIntervals: []time.Duration{200 * time.Millisecond, 500 * time.Millisecond, time.Second, 3 * time.Second, 10 * time.Second, 10 * time.Second}, + LongQueryDuration: cfg.DatabaseLog.SlowThreshold, + LongTxDuration: cfg.DatabaseLog.SlowThreshold, }) if err != nil { - return nil, nil, err + return nil, nil, nil, err } hooksMgr, err := webhooks.NewManager(l.Named("webhooks").Sugar(), sqlStore) if err != nil { - return nil, nil, err + return nil, nil, nil, err } // Hook up webhooks to alerts. @@ -163,24 +209,24 @@ func NewBus(cfg BusConfig, dir string, seed types.PrivateKey, l *zap.Logger) (ht w := wallet.NewSingleAddressWallet(seed, sqlStore, cfg.UsedUTXOExpiry, zap.NewNop().Sugar()) tp.TransactionPoolSubscribe(w) if err := cs.ConsensusSetSubscribe(w, modules.ConsensusChangeRecent, nil); err != nil { - return nil, nil, err + return nil, nil, nil, err } if m := cfg.Miner; m != nil { if err := cs.ConsensusSetSubscribe(m, ccid, nil); err != nil { - return nil, nil, err + return nil, nil, nil, err } tp.TransactionPoolSubscribe(m) } - cm, err := NewChainManager(cs, cfg.Network) + cm, err := NewChainManager(cs, NewTransactionPool(tp), cfg.Network) if err != nil { - return nil, nil, err + return nil, nil, nil, err } b, err := bus.New(syncer{g, tp}, alertsMgr, hooksMgr, cm, NewTransactionPool(tp), w, sqlStore, sqlStore, sqlStore, sqlStore, sqlStore, sqlStore, l) if err != nil { - return nil, nil, err + return nil, nil, nil, err } shutdownFn := func(ctx context.Context) error { @@ -193,21 +239,21 @@ func NewBus(cfg BusConfig, dir string, seed types.PrivateKey, l *zap.Logger) (ht sqlStore.Close(), ) } - return b.Handler(), shutdownFn, nil + return b.Handler(), b.Setup, shutdownFn, nil } -func NewWorker(cfg config.Worker, s3Opts s3.Opts, b Bus, seed types.PrivateKey, l *zap.Logger) (http.Handler, http.Handler, ShutdownFn, error) { +func NewWorker(cfg config.Worker, s3Opts s3.Opts, b Bus, seed types.PrivateKey, l *zap.Logger) (http.Handler, http.Handler, WorkerSetupFn, ShutdownFn, error) { workerKey := blake2b.Sum256(append([]byte("worker"), seed...)) w, err := worker.New(workerKey, cfg.ID, b, cfg.ContractLockTimeout, cfg.BusFlushInterval, cfg.DownloadOverdriveTimeout, cfg.UploadOverdriveTimeout, cfg.DownloadMaxOverdrive, cfg.UploadMaxOverdrive, cfg.DownloadMaxMemory, cfg.UploadMaxMemory, cfg.AllowPrivateIPs, l) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, nil, err } s3Handler, err := s3.New(b, w, l.Named("s3").Sugar(), s3Opts) if err != nil { err = errors.Join(err, w.Shutdown(context.Background())) - return nil, nil, nil, fmt.Errorf("failed to create s3 handler: %w", err) + return nil, nil, nil, nil, fmt.Errorf("failed to create s3 handler: %w", err) } - return w.Handler(), s3Handler, w.Shutdown, nil + return w.Handler(), s3Handler, w.Setup, w.Shutdown, nil } func NewAutopilot(cfg AutopilotConfig, b autopilot.Bus, workers []autopilot.Worker, l *zap.Logger) (http.Handler, RunFn, ShutdownFn, error) { @@ -217,3 +263,24 @@ func NewAutopilot(cfg AutopilotConfig, b autopilot.Bus, workers []autopilot.Work } return ap.Handler(), ap.Run, ap.Shutdown, nil } + +func gormLogLevel(cfg config.DatabaseLog) logger.LogLevel { + level := logger.Silent + if cfg.Enabled { + switch strings.ToLower(cfg.Level) { + case "": + level = logger.Warn // default to 'warn' if not set + case "error": + level = logger.Error + case "warn": + level = logger.Warn + case "info": + level = logger.Info + case "debug": + level = logger.Info + default: + log.Fatalf("invalid log level %q, options are: silent, error, warn, info", cfg.Level) + } + } + return level +} diff --git a/internal/sql/log.go b/internal/sql/log.go new file mode 100644 index 000000000..b13cf83a1 --- /dev/null +++ b/internal/sql/log.go @@ -0,0 +1,148 @@ +package sql + +import ( + "context" + "database/sql" + "time" + + "go.uber.org/zap" +) + +// The following types are wrappers for the sql package types, adding logging +// capabilities. +type ( + LoggedStmt struct { + *sql.Stmt + query string + log *zap.Logger + longQueryDuration time.Duration + } + + loggedTxn struct { + *sql.Tx + log *zap.Logger + longQueryDuration time.Duration + } + + LoggedRow struct { + *sql.Row + log *zap.Logger + longQueryDuration time.Duration + } + + LoggedRows struct { + *sql.Rows + log *zap.Logger + longQueryDuration time.Duration + } +) + +func (lr *LoggedRows) Next() bool { + start := time.Now() + next := lr.Rows.Next() + if dur := time.Since(start); dur > lr.longQueryDuration { + lr.log.Warn("slow next", zap.Duration("elapsed", dur), zap.Stack("stack")) + } + return next +} + +func (lr *LoggedRows) Scan(dest ...any) error { + start := time.Now() + err := lr.Rows.Scan(dest...) + if dur := time.Since(start); dur > lr.longQueryDuration { + lr.log.Warn("slow scan", zap.Duration("elapsed", dur), zap.Stack("stack")) + } + return err +} + +func (lr *LoggedRow) Scan(dest ...any) error { + start := time.Now() + err := lr.Row.Scan(dest...) + if dur := time.Since(start); dur > lr.longQueryDuration { + lr.log.Warn("slow scan", zap.Duration("elapsed", dur), zap.Stack("stack")) + } + return err +} + +func (ls *LoggedStmt) Exec(ctx context.Context, args ...any) (sql.Result, error) { + start := time.Now() + result, err := ls.Stmt.ExecContext(ctx, args...) + if dur := time.Since(start); dur > ls.longQueryDuration { + ls.log.Warn("slow exec", zap.String("query", ls.query), zap.Duration("elapsed", dur), zap.Stack("stack")) + } + return result, err +} + +func (ls *LoggedStmt) Query(ctx context.Context, args ...any) (*LoggedRows, error) { + start := time.Now() + rows, err := ls.Stmt.QueryContext(ctx, args...) + if dur := time.Since(start); dur > ls.longQueryDuration { + ls.log.Warn("slow query", zap.String("query", ls.query), zap.Duration("elapsed", dur), zap.Stack("stack")) + } + return &LoggedRows{rows, ls.log.Named("rows"), ls.longQueryDuration}, err +} + +func (ls *LoggedStmt) QueryRow(ctx context.Context, args ...any) *LoggedRow { + start := time.Now() + row := ls.Stmt.QueryRowContext(ctx, args...) + if dur := time.Since(start); dur > ls.longQueryDuration { + ls.log.Warn("slow query row", zap.String("query", ls.query), zap.Duration("elapsed", dur), zap.Stack("stack")) + } + return &LoggedRow{row, ls.log.Named("row"), ls.longQueryDuration} +} + +// Exec executes a query without returning any rows. The args are for +// any placeholder parameters in the query. +func (lt *loggedTxn) Exec(ctx context.Context, query string, args ...any) (sql.Result, error) { + start := time.Now() + result, err := lt.Tx.ExecContext(ctx, query, args...) + if dur := time.Since(start); dur > lt.longQueryDuration { + lt.log.Warn("slow exec", zap.String("query", query), zap.Duration("elapsed", dur), zap.Stack("stack")) + } + return result, err +} + +// Prepare creates a prepared statement for later queries or executions. +// Multiple queries or executions may be run concurrently from the +// returned statement. The caller must call the statement's Close method +// when the statement is no longer needed. +func (lt *loggedTxn) Prepare(ctx context.Context, query string) (*LoggedStmt, error) { + start := time.Now() + stmt, err := lt.Tx.PrepareContext(ctx, query) + if err != nil { + return nil, err + } else if dur := time.Since(start); dur > lt.longQueryDuration { + lt.log.Warn("slow prepare", zap.String("query", query), zap.Duration("elapsed", dur), zap.Stack("stack")) + } + return &LoggedStmt{ + Stmt: stmt, + query: query, + log: lt.log.Named("statement"), + longQueryDuration: lt.longQueryDuration, + }, nil +} + +// Query executes a query that returns rows, typically a SELECT. The +// args are for any placeholder parameters in the query. +func (lt *loggedTxn) Query(ctx context.Context, query string, args ...any) (*LoggedRows, error) { + start := time.Now() + rows, err := lt.Tx.QueryContext(ctx, query, args...) + if dur := time.Since(start); dur > lt.longQueryDuration { + lt.log.Warn("slow query", zap.String("query", query), zap.Duration("elapsed", dur), zap.Stack("stack")) + } + return &LoggedRows{rows, lt.log.Named("rows"), lt.longQueryDuration}, err +} + +// QueryRow executes a query that is expected to return at most one row. +// QueryRow always returns a non-nil value. Errors are deferred until +// Row's Scan method is called. If the query selects no rows, the *Row's +// Scan will return ErrNoRows. Otherwise, the *Row's Scan scans the +// first selected row and discards the rest. +func (lt *loggedTxn) QueryRow(ctx context.Context, query string, args ...any) *LoggedRow { + start := time.Now() + row := lt.Tx.QueryRowContext(ctx, query, args...) + if dur := time.Since(start); dur > lt.longQueryDuration { + lt.log.Warn("slow query row", zap.String("query", query), zap.Duration("elapsed", dur), zap.Stack("stack")) + } + return &LoggedRow{row, lt.log.Named("row"), lt.longQueryDuration} +} diff --git a/internal/sql/migrations.go b/internal/sql/migrations.go new file mode 100644 index 000000000..d29e88fe3 --- /dev/null +++ b/internal/sql/migrations.go @@ -0,0 +1,284 @@ +package sql + +import ( + "context" + "embed" + "fmt" + "strings" + "unicode/utf8" + + "go.sia.tech/renterd/internal/utils" + "go.uber.org/zap" +) + +type ( + Migration struct { + ID string + Migrate func(tx Tx) error + } + + // Migrator is an interface for defining database-specific helper methods + // required during migrations + Migrator interface { + ApplyMigration(ctx context.Context, fn func(tx Tx) (bool, error)) error + CreateMigrationTable(ctx context.Context) error + DB() *DB + } + + MainMigrator interface { + Migrator + MakeDirsForPath(ctx context.Context, tx Tx, path string) (int64, error) + } +) + +var ( + MainMigrations = func(ctx context.Context, m MainMigrator, migrationsFs embed.FS, log *zap.SugaredLogger) []Migration { + dbIdentifier := "main" + return []Migration{ + { + ID: "00001_init", + Migrate: func(tx Tx) error { return ErrRunV072 }, + }, + { + ID: "00001_object_metadata", + Migrate: func(tx Tx) error { + return performMigration(ctx, tx, migrationsFs, dbIdentifier, "00001_object_metadata", log) + }, + }, + { + ID: "00002_prune_slabs_trigger", + Migrate: func(tx Tx) error { + err := performMigration(ctx, tx, migrationsFs, dbIdentifier, "00002_prune_slabs_trigger", log) + if utils.IsErr(err, ErrMySQLNoSuperPrivilege) { + log.Warn("migration 00002_prune_slabs_trigger requires the user to have the SUPER privilege to register triggers") + } + return err + }, + }, + { + ID: "00003_idx_objects_size", + Migrate: func(tx Tx) error { + return performMigration(ctx, tx, migrationsFs, dbIdentifier, "00003_idx_objects_size", log) + }, + }, + { + ID: "00004_prune_slabs_cascade", + Migrate: func(tx Tx) error { + return performMigration(ctx, tx, migrationsFs, dbIdentifier, "00004_prune_slabs_cascade", log) + }, + }, + { + ID: "00005_zero_size_object_health", + Migrate: func(tx Tx) error { + return performMigration(ctx, tx, migrationsFs, dbIdentifier, "00005_zero_size_object_health", log) + }, + }, + { + ID: "00006_idx_objects_created_at", + Migrate: func(tx Tx) error { + return performMigration(ctx, tx, migrationsFs, dbIdentifier, "00006_idx_objects_created_at", log) + }, + }, + { + ID: "00007_host_checks", + Migrate: func(tx Tx) error { + return performMigration(ctx, tx, migrationsFs, dbIdentifier, "00007_host_checks", log) + }, + }, + { + ID: "00008_directories", + Migrate: func(tx Tx) error { + if err := performMigration(ctx, tx, migrationsFs, dbIdentifier, "00008_directories_1", log); err != nil { + return fmt.Errorf("failed to migrate: %v", err) + } + // helper type + type obj struct { + ID uint + ObjectID string + } + // loop over all objects and deduplicate dirs to create + log.Info("beginning post-migration directory creation, this might take a while") + batchSize := 10000 + processedDirs := make(map[string]struct{}) + for offset := 0; ; offset += batchSize { + if offset > 0 && offset%batchSize == 0 { + log.Infof("processed %v objects", offset) + } + var objBatch []obj + rows, err := tx.Query(ctx, "SELECT id, object_id FROM objects ORDER BY id LIMIT ? OFFSET ?", batchSize, offset) + if err != nil { + return fmt.Errorf("failed to fetch objects: %v", err) + } + for rows.Next() { + var o obj + if err := rows.Scan(&o.ID, &o.ObjectID); err != nil { + _ = rows.Close() + return fmt.Errorf("failed to scan object: %v", err) + } + objBatch = append(objBatch, o) + } + if err := rows.Close(); err != nil { + return fmt.Errorf("failed to close rows: %v", err) + } + if len(objBatch) == 0 { + break // done + } + for _, obj := range objBatch { + // check if dir was processed + dir := "" // root + if i := strings.LastIndex(obj.ObjectID, "/"); i > -1 { + dir = obj.ObjectID[:i+1] + } + _, exists := processedDirs[dir] + if exists { + continue // already processed + } + processedDirs[dir] = struct{}{} + + // process + dirID, err := m.MakeDirsForPath(ctx, tx, obj.ObjectID) + if err != nil { + return fmt.Errorf("failed to create directory %s: %w", obj.ObjectID, err) + } + + if _, err := tx.Exec(ctx, ` + UPDATE objects + SET db_directory_id = ? + WHERE object_id LIKE ? AND + SUBSTR(object_id, 1, ?) = ? AND + INSTR(SUBSTR(object_id, ?), '/') = 0 + `, + dirID, + dir+"%", + utf8.RuneCountInString(dir), dir, + utf8.RuneCountInString(dir)+1); err != nil { + return fmt.Errorf("failed to update object %s: %w", obj.ObjectID, err) + } + } + } + log.Info("post-migration directory creation complete") + if err := performMigration(ctx, tx, migrationsFs, dbIdentifier, "00008_directories_2", log); err != nil { + return fmt.Errorf("failed to migrate: %v", err) + } + return nil + }, + }, + { + ID: "00009_json_settings", + Migrate: func(tx Tx) error { + return performMigration(ctx, tx, migrationsFs, dbIdentifier, "00009_json_settings", log) + }, + }, + { + ID: "00010_webhook_headers", + Migrate: func(tx Tx) error { + return performMigration(ctx, tx, migrationsFs, dbIdentifier, "00010_webhook_headers", log) + }, + }, + { + ID: "00011_host_subnets", + Migrate: func(tx Tx) error { + return performMigration(ctx, tx, migrationsFs, dbIdentifier, "00011_host_subnets", log) + }, + }, + } + } + MetricsMigrations = func(ctx context.Context, migrationsFs embed.FS, log *zap.SugaredLogger) []Migration { + dbIdentifier := "metrics" + return []Migration{ + { + ID: "00001_init", + Migrate: func(tx Tx) error { return ErrRunV072 }, + }, + { + ID: "00001_idx_contracts_fcid_timestamp", + Migrate: func(tx Tx) error { + return performMigration(ctx, tx, migrationsFs, dbIdentifier, "00001_idx_contracts_fcid_timestamp", log) + }, + }, + } + } +) + +func PerformMigrations(ctx context.Context, m Migrator, fs embed.FS, identifier string, migrations []Migration) error { + // try to create migrations table + err := m.CreateMigrationTable(ctx) + if err != nil { + return fmt.Errorf("failed to create migrations table: %w", err) + } + + // check if the migrations table is empty + var isEmpty bool + if err := m.DB().QueryRow(ctx, "SELECT COUNT(*) = 0 FROM migrations").Scan(&isEmpty); err != nil { + return fmt.Errorf("failed to count rows in migrations table: %w", err) + } else if isEmpty { + // table is empty, init schema + return initSchema(ctx, m.DB(), fs, identifier, migrations) + } + + // apply missing migrations + for _, migration := range migrations { + if err := m.ApplyMigration(ctx, func(tx Tx) (bool, error) { + // check if migration was already applied + var applied bool + if err := tx.QueryRow(ctx, "SELECT EXISTS (SELECT 1 FROM migrations WHERE id = ?)", migration.ID).Scan(&applied); err != nil { + return false, fmt.Errorf("failed to check if migration '%s' was already applied: %w", migration.ID, err) + } else if applied { + return false, nil + } + // run migration + if err := migration.Migrate(tx); err != nil { + return false, fmt.Errorf("migration '%s' failed: %w", migration.ID, err) + } + // insert migration + if _, err := tx.Exec(ctx, "INSERT INTO migrations (id) VALUES (?)", migration.ID); err != nil { + return false, fmt.Errorf("failed to insert migration '%s': %w", migration.ID, err) + } + return true, nil + }); err != nil { + return fmt.Errorf("migration '%s' failed: %w", migration.ID, err) + } + } + return nil +} + +func execSQLFile(ctx context.Context, tx Tx, fs embed.FS, folder, filename string) error { + path := fmt.Sprintf("migrations/%s/%s.sql", folder, filename) + + // read file + file, err := fs.ReadFile(path) + if err != nil { + return fmt.Errorf("failed to read %s: %w", path, err) + } + + // execute it + if _, err := tx.Exec(ctx, string(file)); err != nil { + return fmt.Errorf("failed to execute %s: %w", path, err) + } + return nil +} + +func initSchema(ctx context.Context, db *DB, fs embed.FS, identifier string, migrations []Migration) error { + return db.Transaction(ctx, func(tx Tx) error { + // init schema + if err := execSQLFile(ctx, tx, fs, identifier, "schema"); err != nil { + return fmt.Errorf("failed to execute schema: %w", err) + } + // insert migration ids + for _, migration := range migrations { + if _, err := tx.Exec(ctx, "INSERT INTO migrations (id) VALUES (?)", migration.ID); err != nil { + return fmt.Errorf("failed to insert migration '%s': %w", migration.ID, err) + } + } + return nil + }) +} + +func performMigration(ctx context.Context, tx Tx, fs embed.FS, kind, migration string, logger *zap.SugaredLogger) error { + logger.Infof("performing %s migration '%s'", kind, migration) + if err := execSQLFile(ctx, tx, fs, kind, fmt.Sprintf("migration_%s", migration)); err != nil { + return err + } + logger.Infof("migration '%s' complete", migration) + return nil +} diff --git a/internal/sql/sql.go b/internal/sql/sql.go new file mode 100644 index 000000000..23b499213 --- /dev/null +++ b/internal/sql/sql.go @@ -0,0 +1,228 @@ +package sql + +import ( + "context" + "database/sql" + "encoding/hex" + "errors" + "fmt" + "math" + "math/rand" + "strings" + "time" + + "go.uber.org/zap" + "lukechampine.com/frand" +) + +const ( + maxRetryAttempts = 30 // 30 attempts + factor = 1.8 // factor ^ retryAttempts = backoff time in milliseconds + maxBackoff = 15 * time.Second + + DirectoriesRootID = 1 +) + +var ( + ErrInvalidNumberOfShards = errors.New("slab has invalid number of shards") + ErrShardRootChanged = errors.New("shard root changed") + + ErrRunV072 = errors.New("can't upgrade to >=v1.0.0 from your current version - please upgrade to v0.7.2 first (https://github.com/SiaFoundation/renterd/releases/tag/v0.7.2)") + ErrMySQLNoSuperPrivilege = errors.New("You do not have the SUPER privilege and binary logging is enabled") +) + +type ( + // A DB is a wrapper around a *sql.DB that provides additional utility + DB struct { + dbLockedMsgs []string + db *sql.DB + log *zap.Logger + longQueryDuration time.Duration + longTxDuration time.Duration + } + + // A txn is an interface for executing queries within a transaction. + Tx interface { + // Exec executes a query without returning any rows. The args are for + // any placeholder parameters in the query. + Exec(ctx context.Context, query string, args ...any) (sql.Result, error) + // Prepare creates a prepared statement for later queries or executions. + // Multiple queries or executions may be run concurrently from the + // returned statement. The caller must call the statement's Close method + // when the statement is no longer needed. + Prepare(ctx context.Context, query string) (*LoggedStmt, error) + // Query executes a query that returns rows, typically a SELECT. The + // args are for any placeholder parameters in the query. + Query(ctx context.Context, query string, args ...any) (*LoggedRows, error) + // QueryRow executes a query that is expected to return at most one row. + // QueryRow always returns a non-nil value. Errors are deferred until + // Row's Scan method is called. If the query selects no rows, the *Row's + // Scan will return ErrNoRows. Otherwise, the *Row's Scan scans the + // first selected row and discards the rest. + QueryRow(ctx context.Context, query string, args ...any) *LoggedRow + } +) + +func NewDB(db *sql.DB, log *zap.Logger, dbLockedMsgs []string, longQueryDuration, longTxDuration time.Duration) (*DB, error) { + if longQueryDuration == 0 || longTxDuration == 0 { + return nil, fmt.Errorf("longQueryDuration and longTxDuration must be non-zero: %d %d", longQueryDuration, longTxDuration) + } + return &DB{ + dbLockedMsgs: dbLockedMsgs, + db: db, + log: log, + longQueryDuration: longQueryDuration, + longTxDuration: longTxDuration, + }, nil +} + +// exec executes a query without returning any rows. The args are for +// any placeholder parameters in the query. +func (s *DB) Exec(ctx context.Context, query string, args ...any) (sql.Result, error) { + start := time.Now() + result, err := s.db.ExecContext(ctx, query, args...) + if dur := time.Since(start); dur > s.longQueryDuration { + s.log.Debug("slow exec", zap.String("query", query), zap.Duration("elapsed", dur), zap.Stack("stack")) + } + return result, err +} + +// prepare creates a prepared statement for later queries or executions. +// Multiple queries or executions may be run concurrently from the +// returned statement. The caller must call the statement's Close method +// when the statement is no longer needed. +func (s *DB) Prepare(ctx context.Context, query string) (*LoggedStmt, error) { + start := time.Now() + stmt, err := s.db.PrepareContext(ctx, query) + if err != nil { + return nil, err + } else if dur := time.Since(start); dur > s.longQueryDuration { + s.log.Debug("slow prepare", zap.String("query", query), zap.Duration("elapsed", dur), zap.Stack("stack")) + } + return &LoggedStmt{ + Stmt: stmt, + query: query, + log: s.log.Named("statement"), + longQueryDuration: s.longQueryDuration, + }, nil +} + +// query executes a query that returns rows, typically a SELECT. The +// args are for any placeholder parameters in the query. +func (s *DB) Query(ctx context.Context, query string, args ...any) (*LoggedRows, error) { + start := time.Now() + rows, err := s.db.QueryContext(ctx, query, args...) + if dur := time.Since(start); dur > s.longQueryDuration { + s.log.Debug("slow query", zap.String("query", query), zap.Duration("elapsed", dur), zap.Stack("stack")) + } + return &LoggedRows{rows, s.log.Named("rows"), s.longQueryDuration}, err +} + +// queryRow executes a query that is expected to return at most one row. +// QueryRow always returns a non-nil value. Errors are deferred until +// Row's Scan method is called. If the query selects no rows, the *Row's +// Scan will return ErrNoRows. Otherwise, the *Row's Scan scans the +// first selected row and discards the rest. +func (s *DB) QueryRow(ctx context.Context, query string, args ...any) *LoggedRow { + start := time.Now() + row := s.db.QueryRowContext(ctx, query, args...) + if dur := time.Since(start); dur > s.longQueryDuration { + s.log.Debug("slow query row", zap.String("query", query), zap.Duration("elapsed", dur), zap.Stack("stack")) + } + return &LoggedRow{row, s.log.Named("row"), s.longQueryDuration} +} + +// transaction executes a function within a database transaction. If the +// function returns an error, the transaction is rolled back. Otherwise, the +// transaction is committed. If the transaction fails due to a busy error, it is +// retried up to 'maxRetryAttempts' times before returning. +func (s *DB) Transaction(ctx context.Context, fn func(Tx) error) error { + var err error + txnID := hex.EncodeToString(frand.Bytes(4)) + log := s.log.Named("transaction").With(zap.String("id", txnID)) + start := time.Now() + attempt := 1 +LOOP: + for ; attempt < maxRetryAttempts; attempt++ { + attemptStart := time.Now() + log := log.With(zap.Int("attempt", attempt)) + err = s.transaction(ctx, fn) + if errors.Is(err, context.Canceled) && context.Cause(ctx) != nil { + err = context.Cause(ctx) + break LOOP + } else if err == nil { + // no error, break out of the loop + return nil + } + + // return immediately if the error is not a busy error + var locked bool + for _, msg := range s.dbLockedMsgs { + if strings.Contains(err.Error(), msg) { + locked = true + break + } + } + if !locked { + break LOOP + } + // exponential backoff + sleep := time.Duration(math.Pow(factor, float64(attempt))) * time.Millisecond + if sleep > maxBackoff { + sleep = maxBackoff + } + log.Warn("database locked", zap.Duration("elapsed", time.Since(attemptStart)), zap.Duration("totalElapsed", time.Since(start)), zap.Stack("stack"), zap.Duration("retry", sleep)) + + select { + case <-ctx.Done(): + err = errors.Join(err, context.Cause(ctx)) + break LOOP + case <-jitterAfter(sleep): + } + } + return fmt.Errorf("transaction failed (attempt %d): %w", attempt, err) +} + +// Close closes the underlying database. +func (s *DB) Close() error { + return s.db.Close() +} + +// transaction is a helper function to execute a function within a transaction. +// If fn returns an error, the transaction is rolled back. Otherwise, the +// transaction is committed. +func (s *DB) transaction(ctx context.Context, fn func(tx Tx) error) error { + start := time.Now() + tx, err := s.db.BeginTx(ctx, nil) + if err != nil { + return fmt.Errorf("failed to begin transaction: %w", err) + } + defer func() { + if err := tx.Rollback(); err != nil && !errors.Is(err, sql.ErrTxDone) { + s.log.Error("failed to roll back transaction", zap.Error(err)) + } + }() + defer func() { + // log the transaction if it took longer than txn duration + if time.Since(start) > s.longTxDuration { + s.log.Debug("long transaction", zap.Duration("elapsed", time.Since(start)), zap.Stack("stack"), zap.Bool("failed", err != nil)) + } + }() + + ltx := &loggedTxn{ + Tx: tx, + log: s.log, + longQueryDuration: s.longQueryDuration, + } + if err := fn(ltx); err != nil { + return err + } else if err = tx.Commit(); err != nil { + return fmt.Errorf("failed to commit transaction: %w", err) + } + return nil +} + +// jitterSleep sleeps for a random duration between t and t*1.5. +func jitterAfter(t time.Duration) <-chan time.Time { + return time.After(t + time.Duration(rand.Int63n(int64(t/2)))) +} diff --git a/internal/test/config.go b/internal/test/config.go index 68a5fff5b..abf6caaac 100644 --- a/internal/test/config.go +++ b/internal/test/config.go @@ -52,6 +52,10 @@ var ( MinMaxEphemeralAccountBalance: types.Siacoins(1), // 1SC } + PricePinSettings = api.PricePinSettings{ + Enabled: false, + } + RedundancySettings = api.RedundancySettings{ MinShards: 2, TotalShards: 3, diff --git a/internal/test/e2e/cluster.go b/internal/test/e2e/cluster.go index 552668079..6fd9f5673 100644 --- a/internal/test/e2e/cluster.go +++ b/internal/test/e2e/cluster.go @@ -24,16 +24,17 @@ import ( "go.sia.tech/renterd/config" "go.sia.tech/renterd/internal/node" "go.sia.tech/renterd/internal/test" + "go.sia.tech/renterd/internal/utils" + iworker "go.sia.tech/renterd/internal/worker" "go.sia.tech/renterd/stores" "go.sia.tech/renterd/worker/s3" + "go.sia.tech/web/renterd" "go.uber.org/zap" "go.uber.org/zap/zapcore" "gorm.io/gorm" "lukechampine.com/frand" "go.sia.tech/renterd/worker" - gormlogger "gorm.io/gorm/logger" - "moul.io/zapgorm2" ) const ( @@ -202,7 +203,6 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { defer cancel() // Apply options. - dbName := opts.dbName dir := t.TempDir() if opts.dir != "" { dir = opts.dir @@ -241,35 +241,31 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { if opts.autopilotSettings != nil { apSettings = *opts.autopilotSettings } - - // default database logger - if busCfg.DBLogger == nil { - busCfg.DBLogger = zapgorm2.Logger{ - ZapLogger: logger.Named("SQL"), - LogLevel: gormlogger.Warn, - SlowThreshold: 100 * time.Millisecond, - SkipCallerLookup: false, - IgnoreRecordNotFoundError: true, - Context: nil, - } + if busCfg.Logger == nil { + busCfg.Logger = logger + } + if opts.dbName != "" { + busCfg.Database.MySQL.Database = opts.dbName } // Check if we are testing against an external database. If so, we create a // database with a random name first. - uri, user, password, _ := stores.DBConfigFromEnv() - if uri != "" { - tmpDB, err := gorm.Open(stores.NewMySQLConnection(user, password, uri, "")) - tt.OK(err) - - if dbName == "" { - dbName = "db" + hex.EncodeToString(frand.Bytes(16)) + if mysql := config.MySQLConfigFromEnv(); mysql.URI != "" { + // generate a random database name if none are set + if busCfg.Database.MySQL.Database == "" { + busCfg.Database.MySQL.Database = "db" + hex.EncodeToString(frand.Bytes(16)) + } + if busCfg.Database.MySQL.MetricsDatabase == "" { + busCfg.Database.MySQL.MetricsDatabase = "db" + hex.EncodeToString(frand.Bytes(16)) } - dbMetricsName := "db" + hex.EncodeToString(frand.Bytes(16)) - tt.OK(tmpDB.Exec(fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %s;", dbName)).Error) - tt.OK(tmpDB.Exec(fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %s;", dbMetricsName)).Error) - busCfg.DBDialector = stores.NewMySQLConnection(user, password, uri, dbName) - busCfg.DBMetricsDialector = stores.NewMySQLConnection(user, password, uri, dbMetricsName) + tmpDB, err := gorm.Open(stores.NewMySQLConnection(mysql.User, mysql.Password, mysql.URI, "")) + tt.OK(err) + tt.OK(tmpDB.Exec(fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %s;", busCfg.Database.MySQL.Database)).Error) + tt.OK(tmpDB.Exec(fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %s;", busCfg.Database.MySQL.MetricsDatabase)).Error) + tmpDBB, err := tmpDB.DB() + tt.OK(err) + tt.OK(tmpDBB.Close()) } // Prepare individual dirs. @@ -292,7 +288,7 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { autopilotListener, err := net.Listen("tcp", "127.0.0.1:0") tt.OK(err) - busAddr := "http://" + busListener.Addr().String() + busAddr := fmt.Sprintf("http://%s/bus", busListener.Addr().String()) workerAddr := "http://" + workerListener.Addr().String() s3Addr := s3Listener.Addr().String() // not fully qualified path autopilotAddr := "http://" + autopilotListener.Addr().String() @@ -317,25 +313,30 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { busCfg.Miner = node.NewMiner(busClient) // Create bus. - b, bStopFn, err := node.NewBus(busCfg, busDir, wk, logger) + b, bSetupFn, bShutdownFn, err := node.NewBus(busCfg, busDir, wk, logger) tt.OK(err) busAuth := jape.BasicAuth(busPassword) - busServer := http.Server{ - Handler: busAuth(b), + busServer := &http.Server{ + Handler: utils.TreeMux{ + Handler: renterd.Handler(), // ui + Sub: map[string]utils.TreeMux{ + "/bus": { + Handler: busAuth(b), + }, + }, + }, } var busShutdownFns []func(context.Context) error busShutdownFns = append(busShutdownFns, busServer.Shutdown) - busShutdownFns = append(busShutdownFns, bStopFn) + busShutdownFns = append(busShutdownFns, bShutdownFn) // Create worker. - w, s3Handler, wShutdownFn, err := node.NewWorker(workerCfg, s3.Opts{}, busClient, wk, logger) + w, s3Handler, wSetupFn, wShutdownFn, err := node.NewWorker(workerCfg, s3.Opts{}, busClient, wk, logger) tt.OK(err) - - workerAuth := jape.BasicAuth(workerPassword) workerServer := http.Server{ - Handler: workerAuth(w), + Handler: iworker.Auth(workerPassword, false)(w), } var workerShutdownFns []func(context.Context) error @@ -366,7 +367,7 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { cluster := &TestCluster{ apID: apCfg.ID, dir: dir, - dbName: dbName, + dbName: busCfg.Database.MySQL.Database, logger: logger, network: busCfg.Network, miner: busCfg.Miner, @@ -414,6 +415,16 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { }() } + // Finish bus setup. + if err := bSetupFn(ctx); err != nil { + tt.Fatalf("failed to setup bus, err: %v", err) + } + + // Finish worker setup. + if err := wSetupFn(ctx, workerAddr, workerPassword); err != nil { + tt.Fatalf("failed to setup worker, err: %v", err) + } + // Set the test contract set to make sure we can add objects at the // beginning of a test right away. tt.OK(busClient.SetContractSet(ctx, test.ContractSet, []types.FileContractID{})) @@ -428,8 +439,9 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { // Update the bus settings. tt.OK(busClient.UpdateSetting(ctx, api.SettingGouging, test.GougingSettings)) - tt.OK(busClient.UpdateSetting(ctx, api.SettingRedundancy, test.RedundancySettings)) tt.OK(busClient.UpdateSetting(ctx, api.SettingContractSet, test.ContractSetSettings)) + tt.OK(busClient.UpdateSetting(ctx, api.SettingPricePinning, test.PricePinSettings)) + tt.OK(busClient.UpdateSetting(ctx, api.SettingRedundancy, test.RedundancySettings)) tt.OK(busClient.UpdateSetting(ctx, api.SettingS3Authentication, api.S3AuthenticationSettings{ V4Keypairs: map[string]string{test.S3AccessKeyID: test.S3SecretAccessKey}, })) @@ -466,7 +478,14 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { cluster.AddHostsBlocking(nHosts) cluster.WaitForContracts() cluster.WaitForContractSet(test.ContractSet, nHosts) - _ = cluster.WaitForAccounts() + cluster.WaitForAccounts() + } + + // Ping the UI + resp, err := http.DefaultClient.Get(fmt.Sprintf("http://%v", busListener.Addr())) + tt.OK(err) + if resp.StatusCode != http.StatusOK { + tt.Fatalf("unexpected status code: %v", resp.StatusCode) } return cluster @@ -565,7 +584,9 @@ func (c *TestCluster) MineBlocks(n int) { if len(c.hosts) == 0 { c.tt.OK(c.miner.Mine(wallet.Address, n)) c.Sync() + return } + // Otherwise mine blocks in batches of 3 to avoid going out of sync with // hosts by too many blocks. for mined := 0; mined < n; { @@ -891,8 +912,15 @@ func testBusCfg() node.BusConfig { UsedUTXOExpiry: time.Minute, SlabBufferCompletionThreshold: 0, }, - Network: testNetwork(), - SlabPruningInterval: time.Second, + Database: config.Database{ + MySQL: config.MySQLConfigFromEnv(), + }, + DatabaseLog: config.DatabaseLog{ + Enabled: true, + IgnoreRecordNotFoundError: true, + SlowThreshold: 100 * time.Millisecond, + }, + Network: testNetwork(), } } diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 72ac4b95b..049e54d7e 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -24,12 +24,150 @@ import ( "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" "go.sia.tech/renterd/internal/test" + "go.sia.tech/renterd/internal/utils" "go.sia.tech/renterd/object" "go.sia.tech/renterd/wallet" "go.uber.org/zap" "lukechampine.com/frand" ) +func TestListObjects(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + + // assertMetadata asserts ModTime, ETag and MimeType are set and then clears + // them afterwards so we can compare without having to specify the metadata + start := time.Now() + assertMetadata := func(entries []api.ObjectMetadata) { + for i := range entries { + // assert mod time + if !strings.HasSuffix(entries[i].Name, "/") && !entries[i].ModTime.Std().After(start.UTC()) { + t.Fatal("mod time should be set") + } + entries[i].ModTime = api.TimeRFC3339{} + + // assert mime type + isDir := strings.HasSuffix(entries[i].Name, "/") && entries[i].Name != "//double/" // double is a file + if (isDir && entries[i].MimeType != "") || (!isDir && entries[i].MimeType == "") { + t.Fatal("unexpected mime type", entries[i].MimeType) + } + entries[i].MimeType = "" + + // assert etag + if isDir != (entries[i].ETag == "") { + t.Fatal("etag should be set for files and empty for dirs") + } + entries[i].ETag = "" + } + } + + // create a test cluster + cluster := newTestCluster(t, testClusterOptions{ + hosts: test.RedundancySettings.TotalShards, + }) + defer cluster.Shutdown() + + b := cluster.Bus + w := cluster.Worker + tt := cluster.tt + + // upload the following paths + uploads := []struct { + path string + size int + }{ + {"/foo/bar", 1}, + {"/foo/bat", 2}, + {"/foo/baz/quux", 3}, + {"/foo/baz/quuz", 4}, + {"/gab/guub", 5}, + {"/FOO/bar", 6}, // test case sensitivity + } + + for _, upload := range uploads { + if upload.size == 0 { + tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(nil), api.DefaultBucketName, upload.path, api.UploadObjectOptions{})) + } else { + data := make([]byte, upload.size) + frand.Read(data) + tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(data), api.DefaultBucketName, upload.path, api.UploadObjectOptions{})) + } + } + + tests := []struct { + prefix string + sortBy string + sortDir string + want []api.ObjectMetadata + }{ + {"/", "", "", []api.ObjectMetadata{{Name: "/FOO/bar", Size: 6, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/gab/guub", Size: 5, Health: 1}}}, + {"/", "", api.ObjectSortDirAsc, []api.ObjectMetadata{{Name: "/FOO/bar", Size: 6, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/gab/guub", Size: 5, Health: 1}}}, + {"/", "", api.ObjectSortDirDesc, []api.ObjectMetadata{{Name: "/gab/guub", Size: 5, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/FOO/bar", Size: 6, Health: 1}}}, + {"/", api.ObjectSortByHealth, api.ObjectSortDirAsc, []api.ObjectMetadata{{Name: "/FOO/bar", Size: 6, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/gab/guub", Size: 5, Health: 1}}}, + {"/", api.ObjectSortByHealth, api.ObjectSortDirDesc, []api.ObjectMetadata{{Name: "/FOO/bar", Size: 6, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/gab/guub", Size: 5, Health: 1}}}, + {"/foo/b", "", "", []api.ObjectMetadata{{Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}}}, + {"o/baz/quu", "", "", []api.ObjectMetadata{}}, + {"/foo", "", "", []api.ObjectMetadata{{Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}}}, + {"/foo", api.ObjectSortBySize, api.ObjectSortDirAsc, []api.ObjectMetadata{{Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/baz/quuz", Size: 4, Health: 1}}}, + {"/foo", api.ObjectSortBySize, api.ObjectSortDirDesc, []api.ObjectMetadata{{Name: "/foo/baz/quuz", Size: 4, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}}}, + } + for _, test := range tests { + // use the bus client + res, err := b.ListObjects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ + Prefix: test.prefix, + SortBy: test.sortBy, + SortDir: test.sortDir, + Limit: -1, + }) + if err != nil { + t.Fatal(err, test.prefix) + } + assertMetadata(res.Objects) + + got := res.Objects + if !(len(got) == 0 && len(test.want) == 0) && !reflect.DeepEqual(got, test.want) { + t.Log(cmp.Diff(got, test.want, cmp.Comparer(api.CompareTimeRFC3339))) + t.Fatalf("\nkey: %v\ngot: %v\nwant: %v\nsortBy: %v\nsortDir: %v", test.prefix, got, test.want, test.sortBy, test.sortDir) + } + if len(res.Objects) > 0 { + marker := "" + for offset := 0; offset < len(test.want); offset++ { + res, err := b.ListObjects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ + Prefix: test.prefix, + SortBy: test.sortBy, + SortDir: test.sortDir, + Marker: marker, + Limit: 1, + }) + if err != nil { + t.Fatal(err) + } + + // assert mod time & clear it afterwards so we can compare + assertMetadata(res.Objects) + + got := res.Objects + if len(got) != 1 { + t.Fatalf("expected 1 object, got %v", len(got)) + } else if got[0].Name != test.want[offset].Name { + t.Fatalf("expected %v, got %v, offset %v, marker %v, sortBy %v, sortDir %v", test.want[offset].Name, got[0].Name, offset, marker, test.sortBy, test.sortDir) + } + marker = res.NextMarker + } + } + } + + // list invalid marker + _, err := b.ListObjects(context.Background(), api.DefaultBucketName, api.ListObjectOptions{ + Marker: "invalid", + SortBy: api.ObjectSortByHealth, + }) + if !utils.IsErr(err, api.ErrMarkerNotFound) { + t.Fatal(err) + } +} + // TestNewTestCluster is a test for creating a cluster of Nodes for testing, // making sure that it forms contracts, renews contracts and shuts down. func TestNewTestCluster(t *testing.T) { @@ -251,14 +389,15 @@ func TestObjectEntries(t *testing.T) { entries[i].ModTime = api.TimeRFC3339{} // assert mime type - if entries[i].MimeType == "" { - t.Fatal("mime type should be set", entries[i].MimeType, entries[i].Name) + isDir := strings.HasSuffix(entries[i].Name, "/") && entries[i].Name != "//double/" // double is a file + if (isDir && entries[i].MimeType != "") || (!isDir && entries[i].MimeType == "") { + t.Fatal("unexpected mime type", entries[i].MimeType) } entries[i].MimeType = "" // assert etag - if entries[i].ETag == "" { - t.Fatal("ETag should be set") + if isDir != (entries[i].ETag == "") { + t.Fatal("etag should be set for files and empty for dirs") } entries[i].ETag = "" } @@ -670,14 +809,16 @@ func TestUploadDownloadExtended(t *testing.T) { tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(file2), api.DefaultBucketName, "fileÅ›/file2", api.UploadObjectOptions{})) // fetch all entries from the worker - entries, err := cluster.Worker.ObjectEntries(context.Background(), api.DefaultBucketName, "", api.GetObjectOptions{}) + entries, err := cluster.Worker.ObjectEntries(context.Background(), api.DefaultBucketName, "fileÅ›/", api.GetObjectOptions{}) tt.OK(err) - if len(entries) != 1 { - t.Fatal("expected one entry to be returned", len(entries)) + if len(entries) != 2 { + t.Fatal("expected two entries to be returned", len(entries)) } - if entries[0].MimeType != "application/octet-stream" { - t.Fatal("wrong mime type", entries[0].MimeType) + for _, entry := range entries { + if entry.MimeType != "application/octet-stream" { + t.Fatal("wrong mime type", entry.MimeType) + } } // fetch entries with "file" prefix @@ -1079,6 +1220,8 @@ func TestEphemeralAccounts(t *testing.T) { t.Fatal("account balance wasn't loaded") } else if acc.Drift.Cmp(big.NewInt(0)) == 0 { t.Fatal("account drift wasn't loaded") + } else if !acc.CleanShutdown { + t.Fatal("account should indicate a clean shutdown") } } @@ -1326,6 +1469,13 @@ func TestUploadDownloadSameHost(t *testing.T) { tt.OK(b.DeleteObject(context.Background(), api.DefaultBucketName, fmt.Sprintf("foo_%d", i), api.DeleteObjectOptions{})) } + // wait until the slabs and sectors were pruned before constructing the + // frankenstein object since constructing the object would otherwise violate + // the UNIQUE constraint for the slab_id and slab_index. That's because we + // don't want to allow inserting 2 sectors referencing the same slab with + // the same index within the slab which happens on an upsert + time.Sleep(time.Second) + // build a frankenstein object constructed with all sectors on the same host res.Object.Slabs[0].Shards = shards[res.Object.Slabs[0].Shards[0].LatestHost] tt.OK(b.AddObject(context.Background(), api.DefaultBucketName, "frankenstein", test.ContractSet, *res.Object.Object, api.AddObjectOptions{})) diff --git a/internal/test/e2e/events_test.go b/internal/test/e2e/events_test.go new file mode 100644 index 000000000..befa3194a --- /dev/null +++ b/internal/test/e2e/events_test.go @@ -0,0 +1,171 @@ +package e2e + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "sync" + "testing" + "time" + + "go.sia.tech/core/types" + "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/test" + "go.sia.tech/renterd/webhooks" +) + +// TestEvents is a test that verifies the bus sends webhooks for certain events, +// providing an event webhook was registered. +func TestEvents(t *testing.T) { + // list all webhooks + allEvents := []func(string, map[string]string) webhooks.Webhook{ + api.WebhookConsensusUpdate, + api.WebhookContractArchive, + api.WebhookContractRenew, + api.WebhookContractSetUpdate, + api.WebhookSettingDelete, + api.WebhookSettingUpdate, + } + + // define helper to check if the event is known + isKnownEvent := func(e webhooks.Event) bool { + for _, eFn := range allEvents { + known := eFn("", nil) + if known.Module == e.Module && known.Event == e.Event { + return true + } + } + return false + } + + // define a small helper to keep track of received events + var mu sync.Mutex + received := make(map[string]webhooks.Event) + receiveEvent := func(event webhooks.Event) error { + // ignore pings + if event.Event == webhooks.WebhookEventPing { + return nil + } + + // check if the event is expected + if !isKnownEvent(event) { + return fmt.Errorf("unexpected event %+v", event) + } + + // keep track of the event + mu.Lock() + defer mu.Unlock() + key := event.Module + "_" + event.Event + if _, ok := received[key]; !ok { + received[key] = event + } + return nil + } + + // setup test server to receive webhooks + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var webhook webhooks.Event + if err := json.NewDecoder(r.Body).Decode(&webhook); err != nil { + t.Fatal(err) + } else if err := receiveEvent(webhook); err != nil { + t.Fatal(err) + } + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + + // setup test cluster + cluster := newTestCluster(t, testClusterOptions{hosts: 1}) + defer cluster.Shutdown() + b := cluster.Bus + tt := cluster.tt + + // register webhooks + for _, e := range allEvents { + tt.OK(b.RegisterWebhook(context.Background(), e(server.URL, nil))) + } + + // fetch our contract + contracts, err := b.Contracts(context.Background(), api.ContractsOpts{}) + tt.OK(err) + if len(contracts) != 1 { + tt.Fatalf("expected 1 contract, got %v", len(contracts)) + } + c := contracts[0] + + // mine blocks to update consensus & to renew + cluster.MineToRenewWindow() + + // wait until our contract got renewed + var renewed api.ContractMetadata + tt.Retry(10, time.Second, func() (err error) { + renewed, err = b.RenewedContract(context.Background(), c.ID) + return err + }) + + // archive the renewal + tt.OK(b.ArchiveContracts(context.Background(), map[types.FileContractID]string{renewed.ID: t.Name()})) + + // fetch current gouging params + gp, err := b.GougingParams(context.Background()) + tt.OK(err) + + // update settings + gs := gp.GougingSettings + gs.HostBlockHeightLeeway = 100 + tt.OK(b.UpdateSetting(context.Background(), api.SettingGouging, gs)) + + // delete setting + tt.OK(b.DeleteSetting(context.Background(), api.SettingRedundancy)) + + // wait until we received the events + tt.Retry(10, time.Second, func() error { + mu.Lock() + defer mu.Unlock() + if len(received) < len(allEvents) { + cluster.MineBlocks(1) + return fmt.Errorf("expected %d unique events, got %+v (%d)", len(allEvents), received, len(received)) + } + return nil + }) + + // assert the events we received contain the expected information + for _, r := range received { + event, err := api.ParseEventWebhook(r) + tt.OK(err) + switch e := event.(type) { + case api.EventContractRenew: + if e.Renewal.ID != renewed.ID || e.Renewal.RenewedFrom != c.ID || e.Timestamp.IsZero() { + t.Fatalf("unexpected event %+v", e) + } + case api.EventContractArchive: + if e.ContractID != renewed.ID || e.Reason != t.Name() || e.Timestamp.IsZero() { + t.Fatalf("unexpected event %+v", e) + } + case api.EventContractSetUpdate: + if e.Name != test.ContractSet || len(e.ContractIDs) != 1 || e.ContractIDs[0] != c.ID || e.Timestamp.IsZero() { + t.Fatalf("unexpected event %+v", e) + } + case api.EventConsensusUpdate: + if e.TransactionFee.IsZero() || e.BlockHeight == 0 || e.Timestamp.IsZero() || !e.Synced { + t.Fatalf("unexpected event %+v", e) + } + case api.EventSettingUpdate: + if e.Key != api.SettingGouging || e.Timestamp.IsZero() { + t.Fatalf("unexpected event %+v", e) + } + var update api.GougingSettings + bytes, _ := json.Marshal(e.Update) + tt.OK(json.Unmarshal(bytes, &update)) + if update.HostBlockHeightLeeway != 100 { + t.Fatalf("unexpected update %+v", update) + } + case api.EventSettingDelete: + if e.Key != api.SettingRedundancy || e.Timestamp.IsZero() { + t.Fatalf("unexpected event %+v", e) + } + } + } +} diff --git a/internal/test/e2e/gouging_test.go b/internal/test/e2e/gouging_test.go index 22e67b4ea..8915a2e11 100644 --- a/internal/test/e2e/gouging_test.go +++ b/internal/test/e2e/gouging_test.go @@ -3,6 +3,7 @@ package e2e import ( "bytes" "context" + "errors" "fmt" "io" "testing" @@ -12,7 +13,6 @@ import ( "go.sia.tech/core/types" "go.sia.tech/renterd/api" "go.sia.tech/renterd/internal/test" - "go.uber.org/zap/zapcore" "lukechampine.com/frand" ) @@ -22,9 +22,7 @@ func TestGouging(t *testing.T) { } // create a new test cluster - cluster := newTestCluster(t, testClusterOptions{ - logger: newTestLoggerCustom(zapcore.ErrorLevel), - }) + cluster := newTestCluster(t, clusterOptsDefault) defer cluster.Shutdown() cfg := test.AutopilotConfig.Contracts @@ -33,12 +31,20 @@ func TestGouging(t *testing.T) { tt := cluster.tt // mine enough blocks for the current period to become > period - cluster.MineBlocks(int(cfg.Period) * 2) + cluster.MineBlocks(int(cfg.Period) + 1) // add hosts tt.OKAll(cluster.AddHostsBlocking(int(test.AutopilotConfig.Contracts.Amount))) cluster.WaitForAccounts() + // assert that the current period is greater than the period + tt.Retry(10, time.Second, func() error { + if ap, _ := b.Autopilot(context.Background(), api.DefaultAutopilotID); ap.CurrentPeriod <= cfg.Period { + return errors.New("current period is not greater than period") + } + return nil + }) + // build a hosts map hostsMap := make(map[string]*Host) for _, h := range cluster.hosts { diff --git a/internal/test/e2e/host.go b/internal/test/e2e/host.go index 36589b581..284ab65ae 100644 --- a/internal/test/e2e/host.go +++ b/internal/test/e2e/host.go @@ -172,16 +172,15 @@ func NewHost(privKey types.PrivateKey, dir string, network *consensus.Network, d if err := <-errCh; err != nil { return nil, fmt.Errorf("failed to create consensus set: %w", err) } - cm, err := node.NewChainManager(cs, network) - if err != nil { - return nil, err - } - tpool, err := transactionpool.New(cs, g, filepath.Join(dir, "transactionpool")) if err != nil { return nil, fmt.Errorf("failed to create transaction pool: %w", err) } tp := node.NewTransactionPool(tpool) + cm, err := node.NewChainManager(cs, tp, network) + if err != nil { + return nil, err + } log := zap.NewNop() db, err := sqlite.OpenDatabase(filepath.Join(dir, "hostd.db"), log.Named("sqlite")) @@ -189,7 +188,7 @@ func NewHost(privKey types.PrivateKey, dir string, network *consensus.Network, d return nil, fmt.Errorf("failed to create sql store: %w", err) } - wallet, err := wallet.NewSingleAddressWallet(privKey, cm, tp, db, log.Named("wallet")) + wallet, err := wallet.NewSingleAddressWallet(privKey, cm, db, log.Named("wallet")) if err != nil { return nil, fmt.Errorf("failed to create wallet: %w", err) } diff --git a/internal/test/host.go b/internal/test/host.go index c412e6813..e95d1d3d1 100644 --- a/internal/test/host.go +++ b/internal/test/host.go @@ -39,6 +39,7 @@ func NewHost(hk types.PublicKey, pt rhpv3.HostPriceTable, settings rhpv2.HostSet PriceTable: api.HostPriceTable{HostPriceTable: pt, Expiry: time.Now().Add(time.Minute)}, Settings: settings, Scanned: true, + Subnets: []string{"38.135.51.0/24"}, } } diff --git a/internal/utils/errors.go b/internal/utils/errors.go index 6c248b61d..67696b984 100644 --- a/internal/utils/errors.go +++ b/internal/utils/errors.go @@ -12,6 +12,7 @@ var ( ErrConnectionRefused = errors.New("connection refused") ErrConnectionTimedOut = errors.New("connection timed out") ErrConnectionResetByPeer = errors.New("connection reset by peer") + ErrIOTimeout = errors.New("i/o timeout") ) // IsErr can be used to compare an error to a target and also works when used on diff --git a/internal/utils/net.go b/internal/utils/net.go new file mode 100644 index 000000000..a4aabd252 --- /dev/null +++ b/internal/utils/net.go @@ -0,0 +1,91 @@ +package utils + +import ( + "context" + "fmt" + "net" + "sort" + + "go.sia.tech/renterd/api" +) + +const ( + ipv4FilterRange = 24 + ipv6FilterRange = 32 +) + +var ( + privateSubnets []*net.IPNet +) + +func init() { + for _, subnet := range []string{ + "10.0.0.0/8", + "172.16.0.0/12", + "192.168.0.0/16", + "100.64.0.0/10", + } { + _, subnet, err := net.ParseCIDR(subnet) + if err != nil { + panic(fmt.Sprintf("failed to parse subnet: %v", err)) + } + privateSubnets = append(privateSubnets, subnet) + } +} + +func ResolveHostIP(ctx context.Context, hostIP string) (subnets []string, private bool, _ error) { + // resolve host address + host, _, err := net.SplitHostPort(hostIP) + if err != nil { + return nil, false, err + } + addrs, err := (&net.Resolver{}).LookupIPAddr(ctx, host) + if err != nil { + return nil, false, err + } + + // filter out hosts associated with more than two addresses or two of the same type + if len(addrs) > 2 || (len(addrs) == 2) && (len(addrs[0].IP) == len(addrs[1].IP)) { + return nil, false, api.ErrHostTooManyAddresses + } + + // parse out subnets + for _, address := range addrs { + private = private || isPrivateIP(address.IP) + + // figure out the IP range + ipRange := ipv6FilterRange + if address.IP.To4() != nil { + ipRange = ipv4FilterRange + } + + // parse the subnet + cidr := fmt.Sprintf("%s/%d", address.String(), ipRange) + _, ipnet, err := net.ParseCIDR(cidr) + if err != nil { + continue + } + + // add it + subnets = append(subnets, ipnet.String()) + } + + // sort the subnets + sort.Slice(subnets, func(i, j int) bool { + return subnets[i] < subnets[j] + }) + return +} + +func isPrivateIP(addr net.IP) bool { + if addr.IsLoopback() || addr.IsLinkLocalUnicast() || addr.IsLinkLocalMulticast() { + return true + } + + for _, block := range privateSubnets { + if block.Contains(addr) { + return true + } + } + return false +} diff --git a/cmd/renterd/web.go b/internal/utils/web.go similarity index 60% rename from cmd/renterd/web.go rename to internal/utils/web.go index d42e8c500..0a490acd6 100644 --- a/cmd/renterd/web.go +++ b/internal/utils/web.go @@ -1,4 +1,4 @@ -package main +package utils import ( "net/http" @@ -6,26 +6,26 @@ import ( "strings" ) -type treeMux struct { - h http.Handler - sub map[string]treeMux +type TreeMux struct { + Handler http.Handler + Sub map[string]TreeMux } -func (t treeMux) ServeHTTP(w http.ResponseWriter, req *http.Request) { +func (t TreeMux) ServeHTTP(w http.ResponseWriter, req *http.Request) { if strings.HasPrefix(req.URL.Path, "/debug/pprof") { http.DefaultServeMux.ServeHTTP(w, req) return } - for prefix, c := range t.sub { + for prefix, c := range t.Sub { if strings.HasPrefix(req.URL.Path, prefix) { req.URL.Path = strings.TrimPrefix(req.URL.Path, prefix) c.ServeHTTP(w, req) return } } - if t.h != nil { - t.h.ServeHTTP(w, req) + if t.Handler != nil { + t.Handler.ServeHTTP(w, req) return } http.NotFound(w, req) diff --git a/internal/worker/auth.go b/internal/worker/auth.go new file mode 100644 index 000000000..032d2536c --- /dev/null +++ b/internal/worker/auth.go @@ -0,0 +1,20 @@ +package worker + +import ( + "net/http" + "strings" + + "go.sia.tech/jape" +) + +func Auth(password string, unauthenticatedDownloads bool) func(http.Handler) http.Handler { + return func(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if unauthenticatedDownloads && req.Method == http.MethodGet && strings.HasPrefix(req.URL.Path, "/objects/") { + h.ServeHTTP(w, req) + } else { + jape.BasicAuth(password)(h).ServeHTTP(w, req) + } + }) + } +} diff --git a/internal/worker/cache.go b/internal/worker/cache.go new file mode 100644 index 000000000..e223c82fe --- /dev/null +++ b/internal/worker/cache.go @@ -0,0 +1,344 @@ +package worker + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "sort" + "sync" + "time" + + "go.uber.org/zap" + + "go.sia.tech/renterd/api" + "go.sia.tech/renterd/webhooks" +) + +const ( + cacheKeyDownloadContracts = "downloadcontracts" + cacheKeyGougingParams = "gougingparams" + + cacheEntryExpiry = 5 * time.Minute +) + +var ( + errCacheNotReady = errors.New("cache is not ready yet, required webhooks have not been registered") + errCacheOutdated = errors.New("cache is outdated, the value fetched from the bus does not match the cached value") +) + +type memoryCache struct { + items map[string]*cacheEntry + mu sync.RWMutex +} + +type cacheEntry struct { + value interface{} + expiry time.Time +} + +func newMemoryCache() *memoryCache { + return &memoryCache{ + items: make(map[string]*cacheEntry), + } +} + +func (c *memoryCache) Get(key string) (value interface{}, found bool, expired bool) { + c.mu.RLock() + defer c.mu.RUnlock() + entry, ok := c.items[key] + if !ok { + return nil, false, false + } else if time.Now().After(entry.expiry) { + return entry.value, true, true + } + return entry.value, true, false +} + +func (c *memoryCache) Set(key string, value interface{}) { + c.mu.Lock() + defer c.mu.Unlock() + c.items[key] = &cacheEntry{ + value: value, + expiry: time.Now().Add(cacheEntryExpiry), + } +} + +func (c *memoryCache) Invalidate(key string) { + c.mu.Lock() + defer c.mu.Unlock() + delete(c.items, key) +} + +type ( + Bus interface { + Contracts(ctx context.Context, opts api.ContractsOpts) ([]api.ContractMetadata, error) + GougingParams(ctx context.Context) (api.GougingParams, error) + RegisterWebhook(ctx context.Context, wh webhooks.Webhook) error + } + + WorkerCache interface { + DownloadContracts(ctx context.Context) ([]api.ContractMetadata, error) + GougingParams(ctx context.Context) (api.GougingParams, error) + HandleEvent(event webhooks.Event) error + Initialize(ctx context.Context, workerAPI string, opts ...webhooks.HeaderOption) error + } +) + +type cache struct { + b Bus + + cache *memoryCache + logger *zap.SugaredLogger + + mu sync.Mutex + ready bool +} + +func NewCache(b Bus, logger *zap.Logger) WorkerCache { + return &cache{ + b: b, + + cache: newMemoryCache(), + logger: logger.Sugar().Named("workercache"), + } +} + +func (c *cache) DownloadContracts(ctx context.Context) (contracts []api.ContractMetadata, err error) { + // fetch directly from bus if the cache is not ready + if !c.isReady() { + c.logger.Warn(errCacheNotReady) + contracts, err = c.b.Contracts(ctx, api.ContractsOpts{}) + return + } + + // fetch from bus if it's not cached or expired + value, found, expired := c.cache.Get(cacheKeyDownloadContracts) + if !found || expired { + contracts, err = c.b.Contracts(ctx, api.ContractsOpts{}) + if err == nil { + c.cache.Set(cacheKeyDownloadContracts, contracts) + } + if expired && !contractsEqual(value.([]api.ContractMetadata), contracts) { + c.logger.Warn(fmt.Errorf("%w: key %v", errCacheOutdated, cacheKeyDownloadContracts)) + } + return + } + + return value.([]api.ContractMetadata), nil +} + +func (c *cache) GougingParams(ctx context.Context) (gp api.GougingParams, err error) { + // fetch directly from bus if the cache is not ready + if !c.isReady() { + c.logger.Warn(errCacheNotReady) + gp, err = c.b.GougingParams(ctx) + return + } + + // fetch from bus if it's not cached or expired + value, found, expired := c.cache.Get(cacheKeyGougingParams) + if !found || expired { + gp, err = c.b.GougingParams(ctx) + if err == nil { + c.cache.Set(cacheKeyGougingParams, gp) + } + if expired && !gougingParamsEqual(value.(api.GougingParams), gp) { + c.logger.Warn(fmt.Errorf("%w: key %v", errCacheOutdated, cacheKeyGougingParams)) + } + return + } + + return value.(api.GougingParams), nil +} + +func (c *cache) HandleEvent(event webhooks.Event) (err error) { + log := c.logger.With("module", event.Module, "event", event.Event) + + // parse the event + parsed, err := api.ParseEventWebhook(event) + if err != nil { + log.Errorw("failed to parse event", "error", err) + return err + } + + // handle the event + switch e := parsed.(type) { + case api.EventConsensusUpdate: + log = log.With("bh", e.BlockHeight, "ts", e.Timestamp) + c.handleConsensusUpdate(e) + case api.EventContractArchive: + log = log.With("fcid", e.ContractID, "ts", e.Timestamp) + c.handleContractArchive(e) + case api.EventContractRenew: + log = log.With("fcid", e.Renewal.ID, "renewedFrom", e.Renewal.RenewedFrom, "ts", e.Timestamp) + c.handleContractRenew(e) + case api.EventSettingUpdate: + log = log.With("key", e.Key, "ts", e.Timestamp) + err = c.handleSettingUpdate(e) + case api.EventSettingDelete: + log = log.With("key", e.Key, "ts", e.Timestamp) + c.handleSettingDelete(e) + default: + log.Info("unhandled event", e) + return + } + + // log the outcome + if err != nil { + log.Errorw("failed to handle event", "error", err) + } else { + log.Info("handled event") + } + return +} + +func (c *cache) Initialize(ctx context.Context, workerAPI string, webhookOpts ...webhooks.HeaderOption) error { + eventsURL := fmt.Sprintf("%s/events", workerAPI) + headers := make(map[string]string) + for _, opt := range webhookOpts { + opt(headers) + } + for _, wh := range []webhooks.Webhook{ + api.WebhookConsensusUpdate(eventsURL, headers), + api.WebhookContractArchive(eventsURL, headers), + api.WebhookContractRenew(eventsURL, headers), + api.WebhookSettingUpdate(eventsURL, headers), + } { + if err := c.b.RegisterWebhook(ctx, wh); err != nil { + return fmt.Errorf("failed to register webhook '%s', err: %v", wh, err) + } + } + c.mu.Lock() + c.ready = true + c.mu.Unlock() + return nil +} + +func (c *cache) isReady() bool { + c.mu.Lock() + defer c.mu.Unlock() + return c.ready +} + +func (c *cache) handleConsensusUpdate(event api.EventConsensusUpdate) { + // return early if the doesn't have gouging params to update + value, found, _ := c.cache.Get(cacheKeyGougingParams) + if !found { + return + } + + // update gouging params + gp := value.(api.GougingParams) + gp.ConsensusState = event.ConsensusState + gp.TransactionFee = event.TransactionFee + c.cache.Set(cacheKeyGougingParams, gp) +} + +func (c *cache) handleContractArchive(event api.EventContractArchive) { + // return early if the cache doesn't have contracts + value, found, _ := c.cache.Get(cacheKeyDownloadContracts) + if !found { + return + } + contracts := value.([]api.ContractMetadata) + + // remove the contract from the cache + for i, contract := range contracts { + if contract.ID == event.ContractID { + contracts = append(contracts[:i], contracts[i+1:]...) + break + } + } + c.cache.Set(cacheKeyDownloadContracts, contracts) +} + +func (c *cache) handleContractRenew(event api.EventContractRenew) { + // return early if the cache doesn't have contracts + value, found, _ := c.cache.Get(cacheKeyDownloadContracts) + if !found { + return + } + contracts := value.([]api.ContractMetadata) + + // update the renewed contract in the cache + for i, contract := range contracts { + if contract.ID == event.Renewal.RenewedFrom { + contracts[i] = event.Renewal + break + } + } + + c.cache.Set(cacheKeyDownloadContracts, contracts) +} + +func (c *cache) handleSettingDelete(e api.EventSettingDelete) { + if e.Key == api.SettingGouging || e.Key == api.SettingRedundancy { + c.cache.Invalidate(cacheKeyGougingParams) + } +} + +func (c *cache) handleSettingUpdate(e api.EventSettingUpdate) (err error) { + // return early if the cache doesn't have gouging params to update + value, found, _ := c.cache.Get(cacheKeyGougingParams) + if !found { + return nil + } + gp := value.(api.GougingParams) + + // marshal the updated value + data, err := json.Marshal(e.Update) + if err != nil { + return fmt.Errorf("couldn't marshal the given value, error: %v", err) + } + + // unmarshal into the appropriated setting and update the cache + switch e.Key { + case api.SettingGouging: + var gs api.GougingSettings + if err := json.Unmarshal(data, &gs); err != nil { + return fmt.Errorf("couldn't update gouging settings, invalid request body, %t", e.Update) + } else if err := gs.Validate(); err != nil { + return fmt.Errorf("couldn't update gouging settings, error: %v", err) + } + + gp.GougingSettings = gs + c.cache.Set(cacheKeyGougingParams, gp) + case api.SettingRedundancy: + var rs api.RedundancySettings + if err := json.Unmarshal(data, &rs); err != nil { + return fmt.Errorf("couldn't update redundancy settings, invalid request body, %t", e.Update) + } else if err := rs.Validate(); err != nil { + return fmt.Errorf("couldn't update redundancy settings, error: %v", err) + } + + gp.RedundancySettings = rs + c.cache.Set(cacheKeyGougingParams, gp) + default: + } + + return nil +} + +func contractsEqual(x, y []api.ContractMetadata) bool { + if len(x) != len(y) { + return false + } + sort.Slice(x, func(i, j int) bool { return x[i].ID.String() < x[j].ID.String() }) + sort.Slice(y, func(i, j int) bool { return y[i].ID.String() < y[j].ID.String() }) + for i, c := range x { + if c.ID.String() != y[i].ID.String() { + return false + } + } + return true +} + +func gougingParamsEqual(x, y api.GougingParams) bool { + var xb bytes.Buffer + var yb bytes.Buffer + json.NewEncoder(&xb).Encode(x) + json.NewEncoder(&yb).Encode(y) + return bytes.Equal(xb.Bytes(), yb.Bytes()) +} diff --git a/internal/worker/cache_test.go b/internal/worker/cache_test.go new file mode 100644 index 000000000..e696ed02c --- /dev/null +++ b/internal/worker/cache_test.go @@ -0,0 +1,183 @@ +package worker + +import ( + "context" + "strings" + "testing" + "time" + + "go.sia.tech/core/types" + "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/test" + "go.sia.tech/renterd/webhooks" + + "go.uber.org/zap" + "go.uber.org/zap/zaptest/observer" +) + +type mockBus struct { + contracts []api.ContractMetadata + gougingParams api.GougingParams +} + +func (m *mockBus) Contracts(ctx context.Context, opts api.ContractsOpts) ([]api.ContractMetadata, error) { + return m.contracts, nil +} +func (m *mockBus) GougingParams(ctx context.Context) (api.GougingParams, error) { + return m.gougingParams, nil +} +func (m *mockBus) RegisterWebhook(ctx context.Context, wh webhooks.Webhook) error { + return nil +} + +func newMockBus() *mockBus { + return &mockBus{ + contracts: []api.ContractMetadata{ + testContractMetadata(1), + testContractMetadata(2), + testContractMetadata(3), + }, + gougingParams: api.GougingParams{ + RedundancySettings: test.RedundancySettings, + GougingSettings: test.GougingSettings, + TransactionFee: types.Siacoins(1), + ConsensusState: api.ConsensusState{ + BlockHeight: 1, + LastBlockTime: api.TimeRFC3339{}, + Synced: true, + }, + }, + } +} + +func TestWorkerCache(t *testing.T) { + // observe logs + observedZapCore, observedLogs := observer.New(zap.DebugLevel) + + // create mock bus and cache + c, b, mc := newTestCache(zap.New(observedZapCore)) + + // assert using cache before it's initialized prints a warning + contracts, err := c.DownloadContracts(context.Background()) + if err != nil { + t.Fatal(err) + } else if len(contracts) != 3 { + t.Fatal("expected 3 contracts, got", len(contracts)) + } + gp, err := c.GougingParams(context.Background()) + if err != nil { + t.Fatal(err) + } else if gp.RedundancySettings != test.RedundancySettings { + t.Fatal("expected redundancy settings to match", gp.RedundancySettings, test.RedundancySettings) + } else if gp.GougingSettings != test.GougingSettings { + t.Fatal("expected gouging settings to match", gp.GougingSettings, test.GougingSettings) + } else if !gp.TransactionFee.Equals(types.Siacoins(1)) { + t.Fatal("expected transaction fee to match", gp.TransactionFee, types.Siacoins(1)) + } + + // assert warnings are printed when the cache is not ready yet + if logs := observedLogs.FilterLevelExact(zap.WarnLevel); logs.Len() != 2 { + t.Fatal("expected 2 warnings, got", logs.Len()) + } else if lines := observedLogs.TakeAll(); lines[0].Message != lines[1].Message { + t.Fatal("expected same message, got", lines[0].Message, lines[1].Message) + } else if !strings.Contains(lines[0].Message, errCacheNotReady.Error()) { + t.Fatal("expected error message to contain 'cache is not ready yet', got", lines[0].Message) + } + + // initialize the cache + if err := c.Initialize(context.Background(), ""); err != nil { + t.Fatal(err) + } + + // fetch contracts & gouging params so they're cached + _, err = c.DownloadContracts(context.Background()) + if err != nil { + t.Fatal(err) + } + _, err = c.GougingParams(context.Background()) + if err != nil { + t.Fatal(err) + } + + // update bus contracts & expire cache entry manually + b.contracts = append(b.contracts, testContractMetadata(4)) + contracts, err = c.DownloadContracts(context.Background()) + if err != nil { + t.Fatal(err) + } else if len(contracts) != 3 { + t.Fatal("expected 3 contracts, got", len(contracts)) + } + mc.mu.Lock() + mc.items[cacheKeyDownloadContracts].expiry = time.Now().Add(-1 * time.Minute) + mc.mu.Unlock() + + // fetch contracts again, assert we have 4 now and we printed a warning to indicate the cache entry was invalid + contracts, err = c.DownloadContracts(context.Background()) + if err != nil { + t.Fatal(err) + } else if len(contracts) != 4 { + t.Fatal("expected 4 contracts, got", len(contracts)) + } else if logs := observedLogs.FilterLevelExact(zap.WarnLevel); logs.Len() != 1 { + t.Fatal("expected 1 warning, got", logs.Len(), logs.All()) + } else if lines := observedLogs.TakeAll(); !strings.Contains(lines[0].Message, errCacheOutdated.Error()) || !strings.Contains(lines[0].Message, cacheKeyDownloadContracts) { + t.Fatal("expected error message to contain 'cache is outdated', got", lines[0].Message) + } + + // update gouging params & expire cache entry manually + b.gougingParams.TransactionFee = b.gougingParams.TransactionFee.Mul64(2) + + // expire cache entry manually + mc.mu.Lock() + mc.items[cacheKeyGougingParams].expiry = time.Now().Add(-1 * time.Minute) + mc.mu.Unlock() + + // fetch contracts again, assert we have 4 now and we printed a warning to indicate the cache entry was invalid + gp, err = c.GougingParams(context.Background()) + if err != nil { + t.Fatal(err) + } else if !gp.TransactionFee.Equals(b.gougingParams.TransactionFee) { + t.Fatal("expected transaction fee to be updated, got", gp.TransactionFee) + } else if logs := observedLogs.FilterLevelExact(zap.WarnLevel); logs.Len() != 1 { + t.Fatal("expected 1 warning, got", logs.Len(), logs.All()) + } else if lines := observedLogs.TakeAll(); !strings.Contains(lines[0].Message, errCacheOutdated.Error()) || !strings.Contains(lines[0].Message, cacheKeyGougingParams) { + t.Fatal("expected error message to contain 'cache is outdated', got", lines[0].Message) + } + + // assert the worker cache handles every event + _ = observedLogs.TakeAll() // clear logs + for _, event := range []webhooks.Event{ + {Module: api.ModuleConsensus, Event: api.EventUpdate, Payload: nil}, + {Module: api.ModuleContract, Event: api.EventArchive, Payload: nil}, + {Module: api.ModuleContract, Event: api.EventRenew, Payload: nil}, + {Module: api.ModuleSetting, Event: api.EventUpdate, Payload: nil}, + {Module: api.ModuleSetting, Event: api.EventDelete, Payload: nil}, + } { + if err := c.HandleEvent(event); err != nil { + t.Fatal(err) + } + } + for _, entry := range observedLogs.TakeAll() { + if strings.Contains(entry.Message, "unhandled event") { + t.Fatal("expected no unhandled event, got", entry) + } + } +} + +func newTestCache(logger *zap.Logger) (WorkerCache, *mockBus, *memoryCache) { + b := newMockBus() + c := newMemoryCache() + return &cache{ + b: b, + cache: c, + logger: logger.Sugar(), + }, b, c +} + +func testContractMetadata(n int) api.ContractMetadata { + return api.ContractMetadata{ + ID: types.FileContractID{byte(n)}, + HostKey: types.PublicKey{byte(n)}, + WindowStart: 0, + WindowEnd: 10, + } +} diff --git a/object/object.go b/object/object.go index e8243fac1..95517619f 100644 --- a/object/object.go +++ b/object/object.go @@ -118,7 +118,7 @@ func GenerateEncryptionKey() EncryptionKey { // clean. type Object struct { Key EncryptionKey `json:"key,omitempty"` - Slabs []SlabSlice `json:"slabs,omitempty"` + Slabs SlabSlices `json:"slabs,omitempty"` } // NewObject returns a new Object with a random key. @@ -128,21 +128,8 @@ func NewObject(ec EncryptionKey) Object { } } -func (o Object) Contracts() map[types.PublicKey]map[types.FileContractID]struct{} { - usedContracts := make(map[types.PublicKey]map[types.FileContractID]struct{}) - for _, s := range o.Slabs { - contracts := ContractsFromShards(s.Shards) - for h, fcids := range contracts { - for fcid := range fcids { - if _, exists := usedContracts[h]; !exists { - usedContracts[h] = fcids - } else { - usedContracts[h][fcid] = struct{}{} - } - } - } - } - return usedContracts +func (o Object) Contracts() []types.FileContractID { + return o.Slabs.Contracts() } // TotalSize returns the total size of the object. diff --git a/object/slab.go b/object/slab.go index f2762abf3..770df9ef6 100644 --- a/object/slab.go +++ b/object/slab.go @@ -68,8 +68,20 @@ func ContractsFromShards(shards []Sector) map[types.PublicKey]map[types.FileCont return usedContracts } -func (s Slab) Contracts() map[types.PublicKey]map[types.FileContractID]struct{} { - return ContractsFromShards(s.Shards) +func (s Slab) Contracts() []types.FileContractID { + var usedContracts []types.FileContractID + added := make(map[types.FileContractID]struct{}) + for _, shard := range s.Shards { + for _, fcids := range shard.Contracts { + for _, fcid := range fcids { + if _, exists := added[fcid]; !exists { + usedContracts = append(usedContracts, fcid) + added[fcid] = struct{}{} + } + } + } + } + return usedContracts } // Length returns the length of the raw data stored in s. @@ -189,6 +201,26 @@ func (ss SlabSlice) Recover(w io.Writer, shards [][]byte) error { return stripedJoin(w, shards[:ss.MinShards], int(skip), int(ss.Length)) } +type SlabSlices []SlabSlice + +func (ss SlabSlices) Contracts() []types.FileContractID { + var usedContracts []types.FileContractID + added := make(map[types.FileContractID]struct{}) + for _, s := range ss { + for _, shard := range s.Shards { + for _, fcids := range shard.Contracts { + for _, fcid := range fcids { + if _, exists := added[fcid]; !exists { + added[fcid] = struct{}{} + usedContracts = append(usedContracts, fcid) + } + } + } + } + } + return usedContracts +} + // stripedSplit splits data into striped data shards, which must have sufficient // capacity. func stripedSplit(data []byte, dataShards [][]byte) { diff --git a/stores/accounts.go b/stores/accounts.go index 69f4aeff8..523ba2697 100644 --- a/stores/accounts.go +++ b/stores/accounts.go @@ -2,103 +2,34 @@ package stores import ( "context" - "math/big" - rhpv3 "go.sia.tech/core/rhp/v3" - "go.sia.tech/core/types" "go.sia.tech/renterd/api" - "gorm.io/gorm/clause" + sql "go.sia.tech/renterd/stores/sql" ) -type ( - dbAccount struct { - Model - - // AccountID identifies an account. - AccountID publicKey `gorm:"unique;NOT NULL;size:32"` - - // CleanShutdown indicates whether the account was saved during a clean - // shutdown shutdown. - CleanShutdown bool `gorm:"default:false"` - - // Host describes the host the account was created with. - Host publicKey `gorm:"NOT NULL"` - - // Balance is the balance of the account. - Balance *balance - - // Drift is the accumulated delta between the bus' tracked balance for - // an account and the balance reported by a host. - Drift *balance - - // RequiresSync indicates whether an account needs to be synced with the - // host before it can be used again. - RequiresSync bool `gorm:"index"` - } -) - -func (dbAccount) TableName() string { - return "ephemeral_accounts" -} - -func (a dbAccount) convert() api.Account { - return api.Account{ - ID: rhpv3.Account(a.AccountID), - CleanShutdown: a.CleanShutdown, - HostKey: types.PublicKey(a.Host), - Balance: (*big.Int)(a.Balance), - Drift: (*big.Int)(a.Drift), - RequiresSync: a.RequiresSync, - } -} - // Accounts returns all accounts from the db. -func (s *SQLStore) Accounts(ctx context.Context) ([]api.Account, error) { - var dbAccounts []dbAccount - if err := s.db.WithContext(ctx).Find(&dbAccounts).Error; err != nil { - return nil, err - } - accounts := make([]api.Account, len(dbAccounts)) - for i, acc := range dbAccounts { - accounts[i] = acc.convert() - } - return accounts, nil +func (s *SQLStore) Accounts(ctx context.Context) (accounts []api.Account, err error) { + err = s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + accounts, err = tx.Accounts(ctx) + return err + }) + return } -// SetCleanShutdown sets the clean shutdown flag on the accounts to 'false' and -// also sets the 'requires_sync' flag. That way, the autopilot will know to sync -// all accounts after an unclean shutdown and the bus will know not to apply -// drift. +// SetUncleanShutdown sets the clean shutdown flag on the accounts to 'false' +// and also sets the 'requires_sync' flag. That way, the autopilot will know to +// sync all accounts after an unclean shutdown and the bus will know not to +// apply drift. func (s *SQLStore) SetUncleanShutdown(ctx context.Context) error { - return s.db. - WithContext(ctx). - Model(&dbAccount{}). - Where("TRUE"). - Updates(map[string]interface{}{ - "clean_shutdown": false, - "requires_sync": true, - }). - Error + return s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + return tx.SetUncleanShutdown(ctx) + }) } // SaveAccounts saves the given accounts in the db, overwriting any existing // ones. func (s *SQLStore) SaveAccounts(ctx context.Context, accounts []api.Account) error { - if len(accounts) == 0 { - return nil - } - dbAccounts := make([]dbAccount, len(accounts)) - for i, acc := range accounts { - dbAccounts[i] = dbAccount{ - AccountID: publicKey(acc.ID), - Host: publicKey(acc.HostKey), - Balance: (*balance)(acc.Balance), - Drift: (*balance)(acc.Drift), - RequiresSync: acc.RequiresSync, - } - } - return s.db.WithContext(ctx).Clauses(clause.OnConflict{ - Columns: []clause.Column{{Name: "account_id"}}, - UpdateAll: true, - }).Create(&dbAccounts).Error + return s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + return tx.SaveAccounts(ctx, accounts) + }) } diff --git a/stores/autopilot.go b/stores/autopilot.go index 5a5c5ed2d..45b899576 100644 --- a/stores/autopilot.go +++ b/stores/autopilot.go @@ -5,64 +5,23 @@ import ( "errors" "go.sia.tech/renterd/api" - "gorm.io/gorm" - "gorm.io/gorm/clause" + sql "go.sia.tech/renterd/stores/sql" ) -type ( - dbAutopilot struct { - Model - - Identifier string `gorm:"unique;NOT NULL;"` - Config api.AutopilotConfig `gorm:"serializer:json"` - CurrentPeriod uint64 `gorm:"default:0"` - } -) - -// TableName implements the gorm.Tabler interface. -func (dbAutopilot) TableName() string { return "autopilots" } - -// convert converts a dbContract to a ContractMetadata. -func (c dbAutopilot) convert() api.Autopilot { - return api.Autopilot{ - ID: c.Identifier, - Config: c.Config, - CurrentPeriod: c.CurrentPeriod, - } +func (s *SQLStore) Autopilots(ctx context.Context) (aps []api.Autopilot, _ error) { + err := s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) (err error) { + aps, err = tx.Autopilots(ctx) + return + }) + return aps, err } -func (s *SQLStore) Autopilots(ctx context.Context) ([]api.Autopilot, error) { - var entities []dbAutopilot - err := s.db. - WithContext(ctx). - Model(&dbAutopilot{}). - Find(&entities). - Error - if err != nil { - return nil, err - } - - autopilots := make([]api.Autopilot, len(entities)) - for i, ap := range entities { - autopilots[i] = ap.convert() - } - return autopilots, nil -} - -func (s *SQLStore) Autopilot(ctx context.Context, id string) (api.Autopilot, error) { - var entity dbAutopilot - err := s.db. - WithContext(ctx). - Model(&dbAutopilot{}). - Where("identifier = ?", id). - First(&entity). - Error - if errors.Is(err, gorm.ErrRecordNotFound) { - return api.Autopilot{}, api.ErrAutopilotNotFound - } else if err != nil { - return api.Autopilot{}, err - } - return entity.convert(), nil +func (s *SQLStore) Autopilot(ctx context.Context, id string) (ap api.Autopilot, _ error) { + err := s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) (err error) { + ap, err = tx.Autopilot(ctx, id) + return + }) + return ap, err } func (s *SQLStore) UpdateAutopilot(ctx context.Context, ap api.Autopilot) error { @@ -73,16 +32,7 @@ func (s *SQLStore) UpdateAutopilot(ctx context.Context, ap api.Autopilot) error if err := ap.Config.Validate(); err != nil { return err } - - // upsert - return s.db. - WithContext(ctx). - Clauses(clause.OnConflict{ - Columns: []clause.Column{{Name: "identifier"}}, - UpdateAll: true, - }).Create(&dbAutopilot{ - Identifier: ap.ID, - Config: ap.Config, - CurrentPeriod: ap.CurrentPeriod, - }).Error + return s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + return tx.UpdateAutopilot(ctx, ap) + }) } diff --git a/stores/hostdb.go b/stores/hostdb.go index 0aa3ab0b2..9831db7a4 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -2,18 +2,17 @@ package stores import ( "context" - "database/sql" + dsql "database/sql" "errors" "fmt" "net" "strings" "time" - rhpv2 "go.sia.tech/core/rhp/v2" - rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" "go.sia.tech/renterd/api" "go.sia.tech/renterd/hostdb" + sql "go.sia.tech/renterd/stores/sql" "go.sia.tech/siad/modules" "gorm.io/gorm" "gorm.io/gorm/clause" @@ -28,15 +27,9 @@ const ( // consensusInfoID defines the primary key of the entry in the consensusInfo // table. consensusInfoID = 1 - - // hostRetrievalBatchSize is the number of hosts we fetch from the - // database per batch. Empirically tested to verify that this is a value - // that performs reasonably well. - hostRetrievalBatchSize = 10000 ) var ( - ErrNegativeOffset = errors.New("offset can not be negative") ErrNegativeMaxDowntime = errors.New("max downtime can not be negative") ) @@ -53,7 +46,7 @@ type ( PublicKey publicKey `gorm:"unique;index;NOT NULL;size:32"` Settings hostSettings PriceTable hostPriceTable - PriceTableExpiry sql.NullTime + PriceTableExpiry dsql.NullTime TotalScans uint64 LastScan int64 `gorm:"index"` // unix nano @@ -75,6 +68,7 @@ type ( LastAnnouncement time.Time NetAddress string `gorm:"index"` + Subnets string Allowlist []dbAllowlistEntry `gorm:"many2many:host_allowlist_entry_hosts;constraint:OnDelete:CASCADE"` Blocklist []dbBlocklistEntry `gorm:"many2many:host_blocklist_entry_hosts;constraint:OnDelete:CASCADE"` @@ -87,7 +81,6 @@ type ( Model DBAutopilotID uint - DBAutopilot dbAutopilot DBHostID uint DBHost dbHost @@ -159,83 +152,6 @@ type ( } ) -// convert converts hostSettings to rhp.HostSettings -func (pt hostPriceTable) convert() rhpv3.HostPriceTable { - return rhpv3.HostPriceTable{ - UID: pt.UID, - Validity: pt.Validity, - HostBlockHeight: pt.HostBlockHeight, - UpdatePriceTableCost: pt.UpdatePriceTableCost, - AccountBalanceCost: pt.AccountBalanceCost, - FundAccountCost: pt.FundAccountCost, - LatestRevisionCost: pt.LatestRevisionCost, - SubscriptionMemoryCost: pt.SubscriptionMemoryCost, - SubscriptionNotificationCost: pt.SubscriptionNotificationCost, - InitBaseCost: pt.InitBaseCost, - MemoryTimeCost: pt.MemoryTimeCost, - DownloadBandwidthCost: pt.DownloadBandwidthCost, - UploadBandwidthCost: pt.UploadBandwidthCost, - DropSectorsBaseCost: pt.DropSectorsBaseCost, - DropSectorsUnitCost: pt.DropSectorsUnitCost, - HasSectorBaseCost: pt.HasSectorBaseCost, - ReadBaseCost: pt.ReadBaseCost, - ReadLengthCost: pt.ReadLengthCost, - RenewContractCost: pt.RenewContractCost, - RevisionBaseCost: pt.RevisionBaseCost, - SwapSectorBaseCost: pt.SwapSectorBaseCost, - WriteBaseCost: pt.WriteBaseCost, - WriteLengthCost: pt.WriteLengthCost, - WriteStoreCost: pt.WriteStoreCost, - TxnFeeMinRecommended: pt.TxnFeeMinRecommended, - TxnFeeMaxRecommended: pt.TxnFeeMaxRecommended, - ContractPrice: pt.ContractPrice, - CollateralCost: pt.CollateralCost, - MaxCollateral: pt.MaxCollateral, - MaxDuration: pt.MaxDuration, - WindowSize: pt.WindowSize, - RegistryEntriesLeft: pt.RegistryEntriesLeft, - RegistryEntriesTotal: pt.RegistryEntriesTotal, - } -} - -func convertHostPriceTable(pt rhpv3.HostPriceTable) hostPriceTable { - return hostPriceTable{ - UID: pt.UID, - Validity: pt.Validity, - HostBlockHeight: pt.HostBlockHeight, - UpdatePriceTableCost: pt.UpdatePriceTableCost, - AccountBalanceCost: pt.AccountBalanceCost, - FundAccountCost: pt.FundAccountCost, - LatestRevisionCost: pt.LatestRevisionCost, - SubscriptionMemoryCost: pt.SubscriptionMemoryCost, - SubscriptionNotificationCost: pt.SubscriptionNotificationCost, - InitBaseCost: pt.InitBaseCost, - MemoryTimeCost: pt.MemoryTimeCost, - DownloadBandwidthCost: pt.DownloadBandwidthCost, - UploadBandwidthCost: pt.UploadBandwidthCost, - DropSectorsBaseCost: pt.DropSectorsBaseCost, - DropSectorsUnitCost: pt.DropSectorsUnitCost, - HasSectorBaseCost: pt.HasSectorBaseCost, - ReadBaseCost: pt.ReadBaseCost, - ReadLengthCost: pt.ReadLengthCost, - RenewContractCost: pt.RenewContractCost, - RevisionBaseCost: pt.RevisionBaseCost, - SwapSectorBaseCost: pt.SwapSectorBaseCost, - WriteBaseCost: pt.WriteBaseCost, - WriteLengthCost: pt.WriteLengthCost, - WriteStoreCost: pt.WriteStoreCost, - TxnFeeMinRecommended: pt.TxnFeeMinRecommended, - TxnFeeMaxRecommended: pt.TxnFeeMaxRecommended, - ContractPrice: pt.ContractPrice, - CollateralCost: pt.CollateralCost, - MaxCollateral: pt.MaxCollateral, - MaxDuration: pt.MaxDuration, - WindowSize: pt.WindowSize, - RegistryEntriesLeft: pt.RegistryEntriesLeft, - RegistryEntriesTotal: pt.RegistryEntriesTotal, - } -} - // TableName implements the gorm.Tabler interface. func (dbAnnouncement) TableName() string { return "host_announcements" } @@ -254,75 +170,6 @@ func (dbAllowlistEntry) TableName() string { return "host_allowlist_entries" } // TableName implements the gorm.Tabler interface. func (dbBlocklistEntry) TableName() string { return "host_blocklist_entries" } -// convert converts a host into a api.HostInfo -func (h dbHost) convert(blocked bool, storedData uint64) api.Host { - var lastScan time.Time - if h.LastScan > 0 { - lastScan = time.Unix(0, h.LastScan) - } - checks := make(map[string]api.HostCheck) - for _, check := range h.Checks { - checks[check.DBAutopilot.Identifier] = check.convert() - } - return api.Host{ - KnownSince: h.CreatedAt, - LastAnnouncement: h.LastAnnouncement, - NetAddress: h.NetAddress, - Interactions: api.HostInteractions{ - TotalScans: h.TotalScans, - LastScan: lastScan, - LastScanSuccess: h.LastScanSuccess, - SecondToLastScanSuccess: h.SecondToLastScanSuccess, - Uptime: h.Uptime, - Downtime: h.Downtime, - SuccessfulInteractions: h.SuccessfulInteractions, - FailedInteractions: h.FailedInteractions, - LostSectors: h.LostSectors, - }, - PriceTable: api.HostPriceTable{ - HostPriceTable: h.PriceTable.convert(), - Expiry: h.PriceTableExpiry.Time, - }, - PublicKey: types.PublicKey(h.PublicKey), - Scanned: h.Scanned, - Settings: rhpv2.HostSettings(h.Settings), - Blocked: blocked, - Checks: checks, - StoredData: storedData, - } -} - -func (hi dbHostCheck) convert() api.HostCheck { - return api.HostCheck{ - Gouging: api.HostGougingBreakdown{ - ContractErr: hi.GougingContractErr, - DownloadErr: hi.GougingDownloadErr, - GougingErr: hi.GougingGougingErr, - PruneErr: hi.GougingPruneErr, - UploadErr: hi.GougingUploadErr, - }, - Score: api.HostScoreBreakdown{ - Age: hi.ScoreAge, - Collateral: hi.ScoreCollateral, - Interactions: hi.ScoreInteractions, - StorageRemaining: hi.ScoreStorageRemaining, - Uptime: hi.ScoreUptime, - Version: hi.ScoreVersion, - Prices: hi.ScorePrices, - }, - Usability: api.HostUsabilityBreakdown{ - Blocked: hi.UsabilityBlocked, - Offline: hi.UsabilityOffline, - LowScore: hi.UsabilityLowScore, - RedundantIP: hi.UsabilityRedundantIP, - Gouging: hi.UsabilityGouging, - NotAcceptingContracts: hi.UsabilityNotAcceptingContracts, - NotAnnounced: hi.UsabilityNotAnnounced, - NotCompletingScan: hi.UsabilityNotCompletingScan, - }, - } -} - func (h *dbHost) BeforeCreate(tx *gorm.DB) (err error) { tx.Statement.AddClause(clause.OnConflict{ Columns: []clause.Column{{Name: "public_key"}}, @@ -440,175 +287,26 @@ func (ss *SQLStore) Host(ctx context.Context, hostKey types.PublicKey) (api.Host } func (ss *SQLStore) UpdateHostCheck(ctx context.Context, autopilotID string, hk types.PublicKey, hc api.HostCheck) (err error) { - err = ss.retryTransaction(ctx, (func(tx *gorm.DB) error { - // fetch ap id - var apID uint - if err := tx. - Model(&dbAutopilot{}). - Where("identifier = ?", autopilotID). - Select("id"). - Take(&apID). - Error; errors.Is(err, gorm.ErrRecordNotFound) { - return api.ErrAutopilotNotFound - } else if err != nil { - return err - } - - // fetch host id - var hID uint - if err := tx. - Model(&dbHost{}). - Where("public_key = ?", publicKey(hk)). - Select("id"). - Take(&hID). - Error; errors.Is(err, gorm.ErrRecordNotFound) { - return api.ErrHostNotFound - } else if err != nil { - return err - } - - // update host info - return tx. - Clauses(clause.OnConflict{ - Columns: []clause.Column{{Name: "db_autopilot_id"}, {Name: "db_host_id"}}, - UpdateAll: true, - }). - Create(&dbHostCheck{ - DBAutopilotID: apID, - DBHostID: hID, - - UsabilityBlocked: hc.Usability.Blocked, - UsabilityOffline: hc.Usability.Offline, - UsabilityLowScore: hc.Usability.LowScore, - UsabilityRedundantIP: hc.Usability.RedundantIP, - UsabilityGouging: hc.Usability.Gouging, - UsabilityNotAcceptingContracts: hc.Usability.NotAcceptingContracts, - UsabilityNotAnnounced: hc.Usability.NotAnnounced, - UsabilityNotCompletingScan: hc.Usability.NotCompletingScan, - - ScoreAge: hc.Score.Age, - ScoreCollateral: hc.Score.Collateral, - ScoreInteractions: hc.Score.Interactions, - ScoreStorageRemaining: hc.Score.StorageRemaining, - ScoreUptime: hc.Score.Uptime, - ScoreVersion: hc.Score.Version, - ScorePrices: hc.Score.Prices, - - GougingContractErr: hc.Gouging.ContractErr, - GougingDownloadErr: hc.Gouging.DownloadErr, - GougingGougingErr: hc.Gouging.GougingErr, - GougingPruneErr: hc.Gouging.PruneErr, - GougingUploadErr: hc.Gouging.UploadErr, - }). - Error - })) - return + return ss.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + return tx.UpdateHostCheck(ctx, autopilotID, hk, hc) + }) } // HostsForScanning returns the address of hosts for scanning. -func (ss *SQLStore) HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]api.HostAddress, error) { - if offset < 0 { - return nil, ErrNegativeOffset - } - - var hosts []struct { - PublicKey publicKey `gorm:"unique;index;NOT NULL"` - NetAddress string - } - var hostAddresses []api.HostAddress - - err := ss.db. - WithContext(ctx). - Model(&dbHost{}). - Where("last_scan < ?", maxLastScan.UnixNano()). - Offset(offset). - Limit(limit). - Order("last_scan ASC"). - FindInBatches(&hosts, hostRetrievalBatchSize, func(tx *gorm.DB, batch int) error { - for _, h := range hosts { - hostAddresses = append(hostAddresses, api.HostAddress{ - PublicKey: types.PublicKey(h.PublicKey), - NetAddress: h.NetAddress, - }) - } - return nil - }). - Error - if err != nil { - return nil, err - } - return hostAddresses, err +func (ss *SQLStore) HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) (hosts []api.HostAddress, err error) { + err = ss.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + hosts, err = tx.HostsForScanning(ctx, maxLastScan, offset, limit) + return err + }) + return } func (ss *SQLStore) SearchHosts(ctx context.Context, autopilotID, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) { - if offset < 0 { - return nil, ErrNegativeOffset - } - - // validate filterMode - switch filterMode { - case api.HostFilterModeAllowed: - case api.HostFilterModeBlocked: - case api.HostFilterModeAll: - default: - return nil, fmt.Errorf("invalid filter mode: %v", filterMode) - } - - // prepare query - query := ss.db. - Model(&dbHost{}). - Scopes( - autopilotFilter(autopilotID), - hostFilter(filterMode, ss.hasAllowlist(), ss.hasBlocklist()), - hostNetAddress(addressContains), - hostPublicKey(keyIn), - usabilityFilter(autopilotID, usabilityMode), - ) - - // preload allowlist and blocklist - if filterMode == api.HostFilterModeAll { - query = query. - Preload("Allowlist"). - Preload("Blocklist") - } - - // fetch stored data for each host - var storedData []struct { - HostID uint - StoredData uint64 - } - err := ss.db.Raw("SELECT host_id, SUM(size) as StoredData FROM contracts GROUP BY host_id"). - Scan(&storedData). - Error - if err != nil { - return nil, fmt.Errorf("failed to fetch stored data: %w", err) - } - storedDataMap := make(map[uint]uint64) - for _, host := range storedData { - storedDataMap[host.HostID] = host.StoredData - } - var hosts []api.Host - var fullHosts []dbHost - err = query. - Offset(offset). - Limit(limit). - FindInBatches(&fullHosts, hostRetrievalBatchSize, func(tx *gorm.DB, batch int) error { - for _, fh := range fullHosts { - var blocked bool - if filterMode == api.HostFilterModeAll { - blocked = ss.isBlocked(fh) - } else { - blocked = filterMode == api.HostFilterModeBlocked - } - hosts = append(hosts, fh.convert(blocked, storedDataMap[fh.ID])) - } - return nil - }). - Error - if err != nil { - return nil, err - } + err := ss.bMain.Transaction(ctx, func(tx sql.DatabaseTx) (err error) { + hosts, err = tx.SearchHosts(ctx, autopilotID, filterMode, usabilityMode, addressContains, keyIn, offset, limit) + return + }) return hosts, err } @@ -622,62 +320,11 @@ func (ss *SQLStore) RemoveOfflineHosts(ctx context.Context, minRecentFailures ui if maxDowntime < 0 { return 0, ErrNegativeMaxDowntime } - - // fetch all hosts outside of the transaction - var hosts []dbHost - if err := ss.db. - WithContext(ctx). - Model(&dbHost{}). - Where("recent_downtime >= ? AND recent_scan_failures >= ?", maxDowntime, minRecentFailures). - Find(&hosts). - Error; err != nil { - return 0, err - } - - // return early - if len(hosts) == 0 { - return 0, nil - } - - // remove every host one by one - var errs []error - for _, h := range hosts { - if err := ss.retryTransaction(ctx, func(tx *gorm.DB) error { - // fetch host contracts - hcs, err := contractsForHost(tx, h) - if err != nil { - return err - } - - // create map - toArchive := make(map[types.FileContractID]string) - for _, c := range hcs { - toArchive[types.FileContractID(c.FCID)] = api.ContractArchivalReasonHostPruned - } - - // archive host contracts - if err := archiveContracts(tx, hcs, toArchive); err != nil { - return err - } - - // remove the host - if err := tx.Delete(&h).Error; err != nil { - return err - } - removed++ - return nil - }); err != nil { - errs = append(errs, err) - } - } - - if len(errs) > 0 { - var msgs []string - for _, err := range errs { - msgs = append(msgs, err.Error()) - } - err = errors.New(strings.Join(msgs, ";")) - } + err = ss.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + n, err := tx.RemoveOfflineHosts(ctx, minRecentFailures, maxDowntime) + removed = uint64(n) + return err + }) return } @@ -686,37 +333,8 @@ func (ss *SQLStore) UpdateHostAllowlistEntries(ctx context.Context, add, remove if len(add)+len(remove) == 0 && !clear { return nil } - defer ss.updateHasAllowlist(&err) - - // clear allowlist - if clear { - return ss.retryTransaction(ctx, func(tx *gorm.DB) error { - return tx.Where("TRUE").Delete(&dbAllowlistEntry{}).Error - }) - } - - var toInsert []dbAllowlistEntry - for _, entry := range add { - toInsert = append(toInsert, dbAllowlistEntry{Entry: publicKey(entry)}) - } - - toDelete := make([]publicKey, len(remove)) - for i, entry := range remove { - toDelete[i] = publicKey(entry) - } - - return ss.retryTransaction(ctx, func(tx *gorm.DB) error { - if len(toInsert) > 0 { - if err := tx.Create(&toInsert).Error; err != nil { - return err - } - } - if len(toDelete) > 0 { - if err := tx.Delete(&dbAllowlistEntry{}, "entry IN ?", toDelete).Error; err != nil { - return err - } - } - return nil + return ss.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + return tx.UpdateHostAllowlistEntries(ctx, add, remove, clear) }) } @@ -725,263 +343,36 @@ func (ss *SQLStore) UpdateHostBlocklistEntries(ctx context.Context, add, remove if len(add)+len(remove) == 0 && !clear { return nil } - defer ss.updateHasBlocklist(&err) - - // clear blocklist - if clear { - return ss.retryTransaction(ctx, func(tx *gorm.DB) error { - return tx.Where("TRUE").Delete(&dbBlocklistEntry{}).Error - }) - } - - var toInsert []dbBlocklistEntry - for _, entry := range add { - toInsert = append(toInsert, dbBlocklistEntry{Entry: entry}) - } - - return ss.retryTransaction(ctx, func(tx *gorm.DB) error { - if len(toInsert) > 0 { - if err := tx.Create(&toInsert).Error; err != nil { - return err - } - } - if len(remove) > 0 { - if err := tx.Delete(&dbBlocklistEntry{}, "entry IN ?", remove).Error; err != nil { - return err - } - } - return nil + return ss.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + return tx.UpdateHostBlocklistEntries(ctx, add, remove, clear) }) } func (ss *SQLStore) HostAllowlist(ctx context.Context) (allowlist []types.PublicKey, err error) { - var pubkeys []publicKey - err = ss.db. - WithContext(ctx). - Model(&dbAllowlistEntry{}). - Pluck("entry", &pubkeys). - Error - - for _, pubkey := range pubkeys { - allowlist = append(allowlist, types.PublicKey(pubkey)) - } + err = ss.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + allowlist, err = tx.HostAllowlist(ctx) + return err + }) return } func (ss *SQLStore) HostBlocklist(ctx context.Context) (blocklist []string, err error) { - err = ss.db. - WithContext(ctx). - Model(&dbBlocklistEntry{}). - Pluck("entry", &blocklist). - Error + err = ss.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + blocklist, err = tx.HostBlocklist(ctx) + return err + }) return } func (ss *SQLStore) RecordHostScans(ctx context.Context, scans []api.HostScan) error { - if len(scans) == 0 { - return nil // nothing to do - } - - // Get keys from input. - keyMap := make(map[publicKey]struct{}) - var hks []publicKey - for _, scan := range scans { - if _, exists := keyMap[publicKey(scan.HostKey)]; !exists { - hks = append(hks, publicKey(scan.HostKey)) - keyMap[publicKey(scan.HostKey)] = struct{}{} - } - } - - // Fetch hosts for which to add scans. This can be done outsisde the - // transaction to reduce the time we spend in the transaction since we don't - // need it to be perfectly consistent. - var hosts []dbHost - for i := 0; i < len(hks); i += maxSQLVars { - end := i + maxSQLVars - if end > len(hks) { - end = len(hks) - } - var batchHosts []dbHost - if err := ss.db.WithContext(ctx).Where("public_key IN (?)", hks[i:end]). - Find(&batchHosts).Error; err != nil { - return err - } - hosts = append(hosts, batchHosts...) - } - hostMap := make(map[publicKey]dbHost) - for _, h := range hosts { - hostMap[h.PublicKey] = h - } - - // Write the interactions and update to the hosts atomically within a single - // transaction. - return ss.retryTransaction(ctx, func(tx *gorm.DB) error { - // Handle scans - for _, scan := range scans { - host, exists := hostMap[publicKey(scan.HostKey)] - if !exists { - continue // host doesn't exist - } - lastScan := time.Unix(0, host.LastScan) - - if scan.Success { - // Handle successful scan. - host.SuccessfulInteractions++ - if host.LastScan > 0 && lastScan.Before(scan.Timestamp) { - host.Uptime += scan.Timestamp.Sub(lastScan) - } - host.RecentDowntime = 0 - host.RecentScanFailures = 0 - - // overwrite the NetAddress in the settings with the one we - // received through the host announcement - scan.Settings.NetAddress = host.NetAddress - host.Settings = hostSettings(scan.Settings) - - // scans can only update the price table if the current - // pricetable is expired anyway, ensuring scans never - // overwrite a valid price table since the price table from - // scans are not paid for and thus not useful for anything - // aside from gouging checks - if time.Now().After(host.PriceTableExpiry.Time) { - host.PriceTable = convertHostPriceTable(scan.PriceTable) - host.PriceTableExpiry = sql.NullTime{ - Time: time.Now(), - Valid: true, - } - } - } else { - // Handle failed scan. - host.FailedInteractions++ - host.RecentScanFailures++ - if host.LastScan > 0 && lastScan.Before(scan.Timestamp) { - host.Downtime += scan.Timestamp.Sub(lastScan) - host.RecentDowntime += scan.Timestamp.Sub(lastScan) - } - } - - host.TotalScans++ - host.Scanned = host.Scanned || scan.Success - host.SecondToLastScanSuccess = host.LastScanSuccess - host.LastScanSuccess = scan.Success - host.LastScan = scan.Timestamp.UnixNano() - - // Save to map again. - hostMap[host.PublicKey] = host - } - - // Persist. - for _, h := range hostMap { - err := tx.Model(&dbHost{}). - Where("public_key", h.PublicKey). - Updates(map[string]interface{}{ - "scanned": h.Scanned, - "total_scans": h.TotalScans, - "second_to_last_scan_success": h.SecondToLastScanSuccess, - "last_scan_success": h.LastScanSuccess, - "recent_downtime": h.RecentDowntime, - "recent_scan_failures": h.RecentScanFailures, - "downtime": h.Downtime, - "uptime": h.Uptime, - "last_scan": h.LastScan, - "settings": h.Settings, - "price_table": h.PriceTable, - "price_table_expiry": h.PriceTableExpiry, - "successful_interactions": h.SuccessfulInteractions, - "failed_interactions": h.FailedInteractions, - }).Error - if err != nil { - return err - } - } - return nil + return ss.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + return tx.RecordHostScans(ctx, scans) }) } func (ss *SQLStore) RecordPriceTables(ctx context.Context, priceTableUpdate []api.HostPriceTableUpdate) error { - if len(priceTableUpdate) == 0 { - return nil // nothing to do - } - - // Get keys from input. - keyMap := make(map[publicKey]struct{}) - var hks []publicKey - for _, ptu := range priceTableUpdate { - if _, exists := keyMap[publicKey(ptu.HostKey)]; !exists { - hks = append(hks, publicKey(ptu.HostKey)) - keyMap[publicKey(ptu.HostKey)] = struct{}{} - } - } - - // Fetch hosts for which to add interactions. This can be done - // outsisde the transaction to reduce the time we spend in the - // transaction since we don't need it to be perfectly - // consistent. - var hosts []dbHost - for i := 0; i < len(hks); i += maxSQLVars { - end := i + maxSQLVars - if end > len(hks) { - end = len(hks) - } - var batchHosts []dbHost - if err := ss.db.WithContext(ctx).Where("public_key IN (?)", hks[i:end]). - Find(&batchHosts).Error; err != nil { - return err - } - hosts = append(hosts, batchHosts...) - } - hostMap := make(map[publicKey]dbHost) - for _, h := range hosts { - hostMap[h.PublicKey] = h - } - - // Write the interactions and update to the hosts atomically within a single - // transaction. - return ss.retryTransaction(ctx, func(tx *gorm.DB) error { - // Handle price table updates - for _, ptu := range priceTableUpdate { - host, exists := hostMap[publicKey(ptu.HostKey)] - if !exists { - continue // host doesn't exist - } - if ptu.Success { - // Handle successful update. - host.SuccessfulInteractions++ - host.RecentDowntime = 0 - host.RecentScanFailures = 0 - - // Update pricetable. - host.PriceTable = convertHostPriceTable(ptu.PriceTable.HostPriceTable) - host.PriceTableExpiry = sql.NullTime{ - Time: ptu.PriceTable.Expiry, - Valid: ptu.PriceTable.Expiry != time.Time{}, - } - } else { - // Handle failed update. - host.FailedInteractions++ - } - - // Save to map again. - hostMap[host.PublicKey] = host - } - - // Persist. - for _, h := range hostMap { - err := tx.Model(&dbHost{}). - Where("public_key", h.PublicKey). - Updates(map[string]interface{}{ - "recent_downtime": h.RecentDowntime, - "recent_scan_failures": h.RecentScanFailures, - "price_table": h.PriceTable, - "price_table_expiry": h.PriceTableExpiry, - "successful_interactions": h.SuccessfulInteractions, - "failed_interactions": h.FailedInteractions, - }).Error - if err != nil { - return err - } - } - return nil + return ss.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + return tx.RecordPriceTables(ctx, priceTableUpdate) }) } @@ -1012,104 +403,6 @@ func (ss *SQLStore) processConsensusChangeHostDB(cc modules.ConsensusChange) { ss.unappliedAnnouncements = append(ss.unappliedAnnouncements, newAnnouncements...) } -// hostNetAddress can be used as a scope to filter hosts by their net address. -func hostNetAddress(addressContains string) func(*gorm.DB) *gorm.DB { - return func(db *gorm.DB) *gorm.DB { - if addressContains != "" { - return db.Where("net_address LIKE ?", "%"+addressContains+"%") - } - return db - } -} - -func hostPublicKey(keyIn []types.PublicKey) func(*gorm.DB) *gorm.DB { - return func(db *gorm.DB) *gorm.DB { - if len(keyIn) > 0 { - pubKeys := make([]publicKey, len(keyIn)) - for i, pk := range keyIn { - pubKeys[i] = publicKey(pk) - } - return db.Where("public_key IN ?", pubKeys) - } - return db - } -} - -// autopilotFilter can be used as a scope to filter host checks based on their -// autopilot -func autopilotFilter(autopilotID string) func(*gorm.DB) *gorm.DB { - return func(db *gorm.DB) *gorm.DB { - if autopilotID == "" { - return db.Preload("Checks.DBAutopilot") - } - return db.Preload("Checks.DBAutopilot", "identifier = ?", autopilotID) - } -} - -// hostFilter can be used as a scope to filter hosts based on their filter mode, -// returning either all, allowed or blocked hosts. -func hostFilter(filterMode string, hasAllowlist, hasBlocklist bool) func(*gorm.DB) *gorm.DB { - return func(db *gorm.DB) *gorm.DB { - switch filterMode { - case api.HostFilterModeAllowed: - if hasAllowlist { - db = db.Where("EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = hosts.id)") - } - if hasBlocklist { - db = db.Where("NOT EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = hosts.id)") - } - case api.HostFilterModeBlocked: - if hasAllowlist { - db = db.Where("NOT EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = hosts.id)") - } - if hasBlocklist { - db = db.Where("EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = hosts.id)") - } - if !hasAllowlist && !hasBlocklist { - // if neither an allowlist nor a blocklist exist, all hosts are allowed - // which means we return none - db = db.Where("1 = 0") - } - case api.HostFilterModeAll: - // do nothing - } - return db - } -} - -func usabilityFilter(autopilotID, usabilityMode string) func(*gorm.DB) *gorm.DB { - return func(db *gorm.DB) *gorm.DB { - switch usabilityMode { - case api.UsabilityFilterModeUsable: - db = db. - Joins("INNER JOIN host_checks hc on hc.db_host_id = hosts.id"). - Joins("INNER JOIN autopilots a on a.id = hc.db_autopilot_id AND a.identifier = ?", autopilotID). - Where("hc.usability_blocked = ? AND hc.usability_offline = ? AND hc.usability_low_score = ? AND hc.usability_redundant_ip = ? AND hc.usability_gouging = ? AND hc.usability_not_accepting_contracts = ? AND hc.usability_not_announced = ? AND hc.usability_not_completing_scan = ?", false, false, false, false, false, false, false, false) - case api.UsabilityFilterModeUnusable: - db = db. - Joins("INNER JOIN host_checks hc on hc.db_host_id = hosts.id"). - Joins("INNER JOIN autopilots a on a.id = hc.db_autopilot_id AND a.identifier = ?", autopilotID). - Where("hc.usability_blocked = ? OR hc.usability_offline = ? OR hc.usability_low_score = ? OR hc.usability_redundant_ip = ? OR hc.usability_gouging = ? OR hc.usability_not_accepting_contracts = ? OR hc.usability_not_announced = ? OR hc.usability_not_completing_scan = ?", true, true, true, true, true, true, true, true) - case api.UsabilityFilterModeAll: - // do nothing - } - return db - } -} - -func (ss *SQLStore) isBlocked(h dbHost) (blocked bool) { - ss.mu.Lock() - defer ss.mu.Unlock() - - if ss.allowListCnt > 0 && len(h.Allowlist) == 0 { - blocked = true - } - if ss.blockListCnt > 0 && len(h.Blocklist) > 0 { - blocked = true - } - return -} - func updateCCID(tx *gorm.DB, newCCID modules.ConsensusChangeID, newTip types.ChainIndex) error { return tx.Model(&dbConsensusInfo{}).Where(&dbConsensusInfo{ Model: Model{ @@ -1219,10 +512,7 @@ func updateBlocklist(tx *gorm.DB, hk types.PublicKey, allowlist []dbAllowlistEnt } func (s *SQLStore) ResetLostSectors(ctx context.Context, hk types.PublicKey) error { - return s.retryTransaction(ctx, func(tx *gorm.DB) error { - return tx.Model(&dbHost{}). - Where("public_key", publicKey(hk)). - Update("lost_sectors", 0). - Error + return s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + return tx.ResetLostSectors(ctx, hk) }) } diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index ca3c07130..d6195d9c9 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -12,9 +12,11 @@ import ( "github.com/google/go-cmp/cmp" "gitlab.com/NebulousLabs/encoding" rhpv2 "go.sia.tech/core/rhp/v2" + rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" "go.sia.tech/renterd/api" "go.sia.tech/renterd/hostdb" + sql "go.sia.tech/renterd/stores/sql" "go.sia.tech/siad/crypto" "go.sia.tech/siad/modules" stypes "go.sia.tech/siad/types" @@ -125,10 +127,6 @@ func TestSQLHostDB(t *testing.T) { t.Fatal("known since not set") } - // Wait for the persist interval to pass to make sure an empty consensus - // change triggers a persist. - time.Sleep(testPersistInterval) - // Apply a consensus change. ccid2 := modules.ConsensusChangeID{1, 2, 3} ss.ProcessConsensusChange(modules.ConsensusChange{ @@ -136,6 +134,9 @@ func TestSQLHostDB(t *testing.T) { AppliedBlocks: []stypes.Block{{}}, AppliedDiffs: []modules.ConsensusChangeDiffs{{}}, }) + if err := ss.applyUpdates(true); err != nil { + t.Fatal(err) + } // Connect to the same DB again. hdb2 := ss.Reopen() @@ -188,7 +189,7 @@ func TestSQLHosts(t *testing.T) { if hosts, err := ss.Hosts(ctx, 3, 1); err != nil || len(hosts) != 0 { t.Fatal("unexpected", len(hosts), err) } - if _, err := ss.Hosts(ctx, -1, -1); err != ErrNegativeOffset { + if _, err := ss.Hosts(ctx, -1, -1); !errors.Is(err, sql.ErrNegativeOffset) { t.Fatal("unexpected error", err) } @@ -337,7 +338,7 @@ func TestSearchHosts(t *testing.T) { t.Fatal(err) } - // add host checks, h1 gets ap1 and h2 gets both, h3 gets none + // add host checks, h1 gets ap1 and h2 gets both h1c := newTestHostCheck() h1c.Score.Age = .1 err = ss.UpdateHostCheck(context.Background(), ap1, hk1, h1c) @@ -370,8 +371,8 @@ func TestSearchHosts(t *testing.T) { his, err = ss.SearchHosts(context.Background(), "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, -1) if err != nil { t.Fatal(err) - } else if cnt != 3 { - t.Fatal("unexpected", cnt) + } else if len(his) != 3 { + t.Fatal("unexpected", len(his)) } // assert h1 and h2 have the expected checks @@ -387,8 +388,8 @@ func TestSearchHosts(t *testing.T) { his, err = ss.SearchHosts(context.Background(), ap1, api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, -1) if err != nil { t.Fatal(err) - } else if cnt != 3 { - t.Fatal("unexpected", cnt) + } else if len(his) != 3 { + t.Fatal("unexpected", len(his)) } // assert h1 and h2 have the expected checks @@ -484,6 +485,11 @@ func TestRecordScan(t *testing.T) { t.Fatal("mismatch") } + // The host shouldn't have any subnets. + if len(host.Subnets) != 0 { + t.Fatal("unexpected", host.Subnets, len(host.Subnets)) + } + // Fetch the host directly to get the creation time. h, err := hostByPubKey(ss.db, hk) if err != nil { @@ -495,13 +501,32 @@ func TestRecordScan(t *testing.T) { // Record a scan. firstScanTime := time.Now().UTC() + subnets := []string{"212.1.96.0/24", "38.135.51.0/24"} settings := rhpv2.HostSettings{NetAddress: "host.com"} - if err := ss.RecordHostScans(ctx, []api.HostScan{newTestScan(hk, firstScanTime, settings, true)}); err != nil { + pt := rhpv3.HostPriceTable{ + HostBlockHeight: 123, + } + if err := ss.RecordHostScans(ctx, []api.HostScan{newTestScan(hk, firstScanTime, settings, pt, true, subnets)}); err != nil { t.Fatal(err) } host, err = ss.Host(ctx, hk) if err != nil { t.Fatal(err) + } else if time.Now().Before(host.PriceTable.Expiry) { + t.Fatal("invalid expiry") + } else if host.PriceTable.HostBlockHeight != pt.HostBlockHeight { + t.Fatalf("mismatch %v %v", host.PriceTable.HostBlockHeight, pt.HostBlockHeight) + } + + // Update the price table expiry to be in the future. + _, err = ss.DB().Exec(ctx, "UPDATE hosts SET price_table_expiry = ? WHERE public_key = ?", time.Now().Add(time.Hour), sql.PublicKey(hk)) + if err != nil { + t.Fatal(err) + } + + // The host should have the subnets. + if !reflect.DeepEqual(host.Subnets, subnets) { + t.Fatal("mismatch") } // We expect no uptime or downtime from only a single scan. @@ -527,17 +552,22 @@ func TestRecordScan(t *testing.T) { t.Fatal("mismatch") } - // Record another scan 1 hour after the previous one. + // Record another scan 1 hour after the previous one. We don't pass any + // subnets this time. secondScanTime := firstScanTime.Add(time.Hour) - if err := ss.RecordHostScans(ctx, []api.HostScan{newTestScan(hk, secondScanTime, settings, true)}); err != nil { + pt.HostBlockHeight = 456 + if err := ss.RecordHostScans(ctx, []api.HostScan{newTestScan(hk, secondScanTime, settings, pt, true, nil)}); err != nil { t.Fatal(err) } host, err = ss.Host(ctx, hk) if err != nil { t.Fatal(err) - } - if host.Interactions.LastScan.UnixNano() != secondScanTime.UnixNano() { + } else if host.Interactions.LastScan.UnixNano() != secondScanTime.UnixNano() { t.Fatal("wrong time") + } else if time.Now().After(host.PriceTable.Expiry) { + t.Fatal("invalid expiry") + } else if host.PriceTable.HostBlockHeight != 123 { + t.Fatal("price table was updated") } host.Interactions.LastScan = time.Time{} uptime += secondScanTime.Sub(firstScanTime) @@ -554,9 +584,14 @@ func TestRecordScan(t *testing.T) { t.Fatal("mismatch") } + // The host should still have the subnets. + if !reflect.DeepEqual(host.Subnets, subnets) { + t.Fatal("mismatch") + } + // Record another scan 2 hours after the second one. This time it fails. thirdScanTime := secondScanTime.Add(2 * time.Hour) - if err := ss.RecordHostScans(ctx, []api.HostScan{newTestScan(hk, thirdScanTime, settings, false)}); err != nil { + if err := ss.RecordHostScans(ctx, []api.HostScan{newTestScan(hk, thirdScanTime, settings, pt, false, nil)}); err != nil { t.Fatal(err) } host, err = ss.Host(ctx, hk) @@ -612,10 +647,11 @@ func TestRemoveHosts(t *testing.T) { } now := time.Now().UTC() + pt := rhpv3.HostPriceTable{} t1 := now.Add(-time.Minute * 120) // 2 hours ago t2 := now.Add(-time.Minute * 90) // 1.5 hours ago (30min downtime) - hi1 := newTestScan(hk, t1, rhpv2.HostSettings{NetAddress: "host.com"}, false) - hi2 := newTestScan(hk, t2, rhpv2.HostSettings{NetAddress: "host.com"}, false) + hi1 := newTestScan(hk, t1, rhpv2.HostSettings{NetAddress: "host.com"}, pt, false, nil) + hi2 := newTestScan(hk, t2, rhpv2.HostSettings{NetAddress: "host.com"}, pt, false, nil) // record interactions if err := ss.RecordHostScans(context.Background(), []api.HostScan{hi1, hi2}); err != nil { @@ -645,7 +681,7 @@ func TestRemoveHosts(t *testing.T) { // record interactions t3 := now.Add(-time.Minute * 60) // 1 hour ago (60min downtime) - hi3 := newTestScan(hk, t3, rhpv2.HostSettings{NetAddress: "host.com"}, false) + hi3 := newTestScan(hk, t3, rhpv2.HostSettings{NetAddress: "host.com"}, pt, false, nil) if err := ss.RecordHostScans(context.Background(), []api.HostScan{hi3}); err != nil { t.Fatal(err) } @@ -1300,12 +1336,14 @@ func hostByPubKey(tx *gorm.DB, hostKey types.PublicKey) (dbHost, error) { } // newTestScan returns a host interaction with given parameters. -func newTestScan(hk types.PublicKey, scanTime time.Time, settings rhpv2.HostSettings, success bool) api.HostScan { +func newTestScan(hk types.PublicKey, scanTime time.Time, settings rhpv2.HostSettings, pt rhpv3.HostPriceTable, success bool, subnets []string) api.HostScan { return api.HostScan{ - HostKey: hk, - Success: success, - Timestamp: scanTime, - Settings: settings, + HostKey: hk, + PriceTable: pt, + Settings: settings, + Subnets: subnets, + Success: success, + Timestamp: scanTime, } } diff --git a/stores/metadata.go b/stores/metadata.go index e9846f305..56b92d1c4 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -2,11 +2,9 @@ package stores import ( "context" - "encoding/json" "errors" "fmt" "math" - "regexp" "strings" "time" "unicode/utf8" @@ -16,6 +14,7 @@ import ( "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" "go.sia.tech/renterd/object" + sql "go.sia.tech/renterd/stores/sql" "go.sia.tech/siad/modules" "go.uber.org/zap" "gorm.io/gorm" @@ -23,10 +22,6 @@ import ( "lukechampine.com/frand" ) -var ( - pruneSlabsAlertID = frand.Entropy256() -) - const ( // batchDurationThreshold is the upper bound for the duration of a batch // operation on the database. As long as we are below the threshold, we @@ -42,17 +37,15 @@ const ( // upsert sectors. sectorInsertionBatchSize = 500 + // slabPruningBatchSize is the number of slabs per batch when we prune + // slabs. We limit this to 100 slabs which is 3000 sectors at default + // redundancy. + slabPruningBatchSize = 100 + refreshHealthMinHealthValidity = 12 * time.Hour refreshHealthMaxHealthValidity = 72 * time.Hour ) -var ( - errInvalidNumberOfShards = errors.New("slab has invalid number of shards") - errShardRootChanged = errors.New("shard root changed") - - objectDeleteBatchSizes = []int64{10, 50, 100, 200, 500, 1000, 5000, 10000, 50000, 100000} -) - const ( contractStateInvalid contractState = iota contractStatePending @@ -61,6 +54,15 @@ const ( contractStateFailed ) +var ( + pruneSlabsAlertID = frand.Entropy256() + pruneDirsAlertID = frand.Entropy256() +) + +var ( + objectDeleteBatchSizes = []int64{10, 50, 100, 200, 500, 1000, 5000, 10000, 50000, 100000} +) + type ( contractState uint8 @@ -115,9 +117,18 @@ type ( Contracts []dbContract `gorm:"many2many:contract_set_contracts;constraint:OnDelete:CASCADE"` } + dbDirectory struct { + Model + + Name string + DBParentID uint + } + dbObject struct { Model + DBDirectoryID uint + DBBucketID uint `gorm:"index;uniqueIndex:idx_object_bucket;NOT NULL"` DBBucket dbBucket ObjectID string `gorm:"index;uniqueIndex:idx_object_bucket"` @@ -176,14 +187,6 @@ type ( Shards []dbSector `gorm:"constraint:OnDelete:CASCADE"` // CASCADE to delete shards too } - dbBufferedSlab struct { - Model - - DBSlab dbSlab - - Filename string - } - dbSector struct { Model @@ -241,12 +244,12 @@ type ( // rawObjectMetadata is used for hydrating object metadata. rawObjectMetadata struct { - ETag string - Health float64 - MimeType string - ModTime datetime - Name string - Size int64 + ETag string + Health float64 + MimeType string + ModTime datetime + ObjectName string + Size int64 } ) @@ -304,6 +307,9 @@ func (dbContractSector) TableName() string { return "contract_sectors" } // TableName implements the gorm.Tabler interface. func (dbContractSet) TableName() string { return "contract_sets" } +// TableName implements the gorm.Tabler interface. +func (dbDirectory) TableName() string { return "directories" } + // TableName implements the gorm.Tabler interface. func (dbObject) TableName() string { return "objects" } @@ -316,40 +322,9 @@ func (dbSector) TableName() string { return "sectors" } // TableName implements the gorm.Tabler interface. func (dbSlab) TableName() string { return "slabs" } -// TableName implements the gorm.Tabler interface. -func (dbBufferedSlab) TableName() string { return "buffered_slabs" } - // TableName implements the gorm.Tabler interface. func (dbSlice) TableName() string { return "slices" } -// convert converts a dbContract to an ArchivedContract. -func (c dbArchivedContract) convert() api.ArchivedContract { - var revisionNumber uint64 - _, _ = fmt.Sscan(c.RevisionNumber, &revisionNumber) - return api.ArchivedContract{ - ID: types.FileContractID(c.FCID), - HostKey: types.PublicKey(c.Host), - RenewedTo: types.FileContractID(c.RenewedTo), - - ProofHeight: c.ProofHeight, - RevisionHeight: c.RevisionHeight, - RevisionNumber: revisionNumber, - Size: c.Size, - StartHeight: c.StartHeight, - State: c.State.String(), - WindowStart: c.WindowStart, - WindowEnd: c.WindowEnd, - - Spending: api.ContractSpending{ - Uploads: types.Currency(c.UploadSpending), - Downloads: types.Currency(c.DownloadSpending), - FundAccount: types.Currency(c.FundAccountSpending), - Deletions: types.Currency(c.DeleteSpending), - SectorRoots: types.Currency(c.ListSpending), - }, - } -} - // convert converts a dbContract to a ContractMetadata. func (c dbContract) convert() api.ContractMetadata { var revisionNumber uint64 @@ -418,7 +393,7 @@ func (s dbSlab) convert() (slab object.Slab, err error) { func (raw rawObjectMetadata) convert() api.ObjectMetadata { return newObjectMetadata( - raw.Name, + raw.ObjectName, raw.ETag, raw.MimeType, raw.Health, @@ -484,226 +459,62 @@ func (raw rawObject) toSlabSlice() (slice object.SlabSlice, _ error) { return slice, nil } -func (s *SQLStore) Bucket(ctx context.Context, bucket string) (api.Bucket, error) { - var b dbBucket - err := s.db. - WithContext(ctx). - Model(&dbBucket{}). - Where("name = ?", bucket). - Take(&b). - Error - if errors.Is(err, gorm.ErrRecordNotFound) { - return api.Bucket{}, api.ErrBucketNotFound - } else if err != nil { - return api.Bucket{}, err - } - return api.Bucket{ - CreatedAt: api.TimeRFC3339(b.CreatedAt.UTC()), - Name: b.Name, - Policy: b.Policy, - }, nil +func (s *SQLStore) Bucket(ctx context.Context, bucket string) (b api.Bucket, err error) { + err = s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) (err error) { + b, err = tx.Bucket(ctx, bucket) + return + }) + return } func (s *SQLStore) CreateBucket(ctx context.Context, bucket string, policy api.BucketPolicy) error { - // Create bucket. - return s.retryTransaction(ctx, func(tx *gorm.DB) error { - res := tx.Clauses(clause.OnConflict{ - DoNothing: true, - }). - Create(&dbBucket{ - Name: bucket, - Policy: policy, - }) - if res.Error != nil { - return res.Error - } else if res.RowsAffected == 0 { - return api.ErrBucketExists - } - return nil + return s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + return tx.CreateBucket(ctx, bucket, policy) }) } func (s *SQLStore) UpdateBucketPolicy(ctx context.Context, bucket string, policy api.BucketPolicy) error { - b, err := json.Marshal(policy) - if err != nil { - return err - } - return s.retryTransaction(ctx, func(tx *gorm.DB) error { - return tx. - Model(&dbBucket{}). - Where("name", bucket). - Updates(map[string]interface{}{ - "policy": string(b), - }, - ). - Error + return s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + return tx.UpdateBucketPolicy(ctx, bucket, policy) }) } func (s *SQLStore) DeleteBucket(ctx context.Context, bucket string) error { - // Delete bucket. - return s.retryTransaction(ctx, func(tx *gorm.DB) error { - var b dbBucket - if err := tx.Take(&b, "name = ?", bucket).Error; errors.Is(err, gorm.ErrRecordNotFound) { - return api.ErrBucketNotFound - } else if err != nil { - return err - } - var count int64 - if err := tx.Model(&dbObject{}).Where("db_bucket_id = ?", b.ID). - Limit(1). - Count(&count).Error; err != nil { - return err - } - if count > 0 { - return api.ErrBucketNotEmpty - } - res := tx.Delete(&b) - if res.Error != nil { - return res.Error - } - return nil + return s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + return tx.DeleteBucket(ctx, bucket) }) } -func (s *SQLStore) ListBuckets(ctx context.Context) ([]api.Bucket, error) { - var buckets []dbBucket - err := s.db. - WithContext(ctx). - Model(&dbBucket{}). - Find(&buckets). - Error - if err != nil { - return nil, err - } - - resp := make([]api.Bucket, len(buckets)) - for i, b := range buckets { - resp[i] = api.Bucket{ - CreatedAt: api.TimeRFC3339(b.CreatedAt.UTC()), - Name: b.Name, - Policy: b.Policy, - } - } - return resp, nil +func (s *SQLStore) ListBuckets(ctx context.Context) (buckets []api.Bucket, err error) { + err = s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) (err error) { + buckets, err = tx.ListBuckets(ctx) + return + }) + return } // ObjectsStats returns some info related to the objects stored in the store. To // reduce locking and make sure all results are consistent, everything is done // within a single transaction. -func (s *SQLStore) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) (api.ObjectsStatsResponse, error) { - db := s.db.WithContext(ctx) - - // fetch bucket id if a bucket was specified - var bucketID uint - if opts.Bucket != "" { - err := db.Model(&dbBucket{}).Select("id").Where("name = ?", opts.Bucket).Take(&bucketID).Error - if err != nil { - return api.ObjectsStatsResponse{}, err - } - } - - // number of objects - var objInfo struct { - NumObjects uint64 - MinHealth float64 - TotalObjectsSize uint64 - } - objInfoQuery := db. - Model(&dbObject{}). - Select("COUNT(*) AS NumObjects, COALESCE(MIN(health), 1) as MinHealth, SUM(size) AS TotalObjectsSize") - if opts.Bucket != "" { - objInfoQuery = objInfoQuery.Where("db_bucket_id", bucketID) - } - err := objInfoQuery.Scan(&objInfo).Error - if err != nil { - return api.ObjectsStatsResponse{}, err - } - - // number of unfinished objects - var unfinishedObjects uint64 - unfinishedObjectsQuery := db. - Model(&dbMultipartUpload{}). - Select("COUNT(*)") - if opts.Bucket != "" { - unfinishedObjectsQuery = unfinishedObjectsQuery.Where("db_bucket_id", bucketID) - } - err = unfinishedObjectsQuery.Scan(&unfinishedObjects).Error - if err != nil { - return api.ObjectsStatsResponse{}, err - } - - // size of unfinished objects - var totalUnfinishedObjectsSize uint64 - totalUnfinishedObjectsSizeQuery := db. - Model(&dbMultipartPart{}). - Joins("INNER JOIN multipart_uploads mu ON multipart_parts.db_multipart_upload_id = mu.id"). - Select("COALESCE(SUM(size), 0)") - if opts.Bucket != "" { - totalUnfinishedObjectsSizeQuery = totalUnfinishedObjectsSizeQuery.Where("db_bucket_id", bucketID) - } - err = totalUnfinishedObjectsSizeQuery.Scan(&totalUnfinishedObjectsSize).Error - if err != nil { - return api.ObjectsStatsResponse{}, err - } - - var totalSectors int64 - totalSectorsQuery := db. - Table("slabs sla"). - Select("COALESCE(SUM(total_shards), 0)"). - Where("db_buffered_slab_id IS NULL") - - if opts.Bucket != "" { - totalSectorsQuery = totalSectorsQuery.Where(` - EXISTS ( - SELECT 1 FROM slices sli - INNER JOIN objects o ON o.id = sli.db_object_id AND o.db_bucket_id = ? - WHERE sli.db_slab_id = sla.id - ) - `, bucketID) - } - err = totalSectorsQuery.Scan(&totalSectors).Error - if err != nil { - return api.ObjectsStatsResponse{}, err - } - - var totalUploaded int64 - err = db. - Model(&dbContract{}). - Select("COALESCE(SUM(size), 0)"). - Scan(&totalUploaded). - Error - if err != nil { - return api.ObjectsStatsResponse{}, err - } - - return api.ObjectsStatsResponse{ - MinHealth: objInfo.MinHealth, - NumObjects: objInfo.NumObjects, - NumUnfinishedObjects: unfinishedObjects, - TotalUnfinishedObjectsSize: totalUnfinishedObjectsSize, - TotalObjectsSize: objInfo.TotalObjectsSize, - TotalSectorsSize: uint64(totalSectors) * rhpv2.SectorSize, - TotalUploadedSize: uint64(totalUploaded), - }, nil +func (s *SQLStore) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) (resp api.ObjectsStatsResponse, _ error) { + err := s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) (err error) { + resp, err = tx.ObjectsStats(ctx, opts) + return + }) + return resp, err } func (s *SQLStore) SlabBuffers(ctx context.Context) ([]api.SlabBuffer, error) { - // Slab buffer info from the database. - var bufferedSlabs []dbBufferedSlab - err := s.db.Model(&dbBufferedSlab{}). - Joins("DBSlab"). - Joins("DBSlab.DBContractSet"). - Find(&bufferedSlabs). - Error + var err error + var fileNameToContractSet map[string]string + err = s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + fileNameToContractSet, err = tx.SlabBuffers(ctx) + return err + }) if err != nil { - return nil, err - } - // Translate buffers to contract set. - fileNameToContractSet := make(map[string]string) - for _, slab := range bufferedSlabs { - fileNameToContractSet[slab.Filename] = slab.DBSlab.DBContractSet.Name + return nil, fmt.Errorf("failed to fetch slab buffers: %w", err) } + // Fetch in-memory buffer info and fill in contract set name. buffers := s.slabBufferMgr.SlabBuffers() for i := range buffers { @@ -730,83 +541,12 @@ func (s *SQLStore) AddContract(ctx context.Context, c rhpv2.ContractRevision, co } func (s *SQLStore) Contracts(ctx context.Context, opts api.ContractsOpts) ([]api.ContractMetadata, error) { - db := s.db.WithContext(ctx) - - // helper to check whether a contract set exists - hasContractSet := func() error { - if opts.ContractSet == "" { - return nil - } - err := db.Where("name", opts.ContractSet).Take(&dbContractSet{}).Error - if errors.Is(err, gorm.ErrRecordNotFound) { - return api.ErrContractSetNotFound - } - return err - } - - // fetch all contracts, their hosts and the contract set name - var rows []struct { - Contract dbContract `gorm:"embedded"` - Host dbHost `gorm:"embedded"` - Name string - } - tx := db - if opts.ContractSet == "" { - // no filter, use all contracts - tx = tx.Table("contracts") - } else { - // filter contracts by contract set first - tx = tx.Table("(?) contracts", db.Model(&dbContract{}). - Select("contracts.*"). - Joins("INNER JOIN hosts h ON h.id = contracts.host_id"). - Joins("INNER JOIN contract_set_contracts csc ON csc.db_contract_id = contracts.id"). - Joins("INNER JOIN contract_sets cs ON cs.id = csc.db_contract_set_id AND cs.name = ?", opts.ContractSet)) - } - err := tx. - Select("contracts.*, h.*, cs.name as Name"). - Joins("INNER JOIN hosts h ON h.id = contracts.host_id"). - Joins("LEFT JOIN contract_set_contracts csc ON csc.db_contract_id = contracts.id"). - Joins("LEFT JOIN contract_sets cs ON cs.id = csc.db_contract_set_id"). - Order("contracts.id ASC"). - Scan(&rows). - Error - if err != nil { - return nil, err - } else if len(rows) == 0 { - return nil, hasContractSet() - } - - // merge 'Host', 'Name' and 'Contract' into dbContracts - var dbContracts []dbContract - for i := range rows { - dbContract := rows[i].Contract - dbContract.Host = rows[i].Host - if rows[i].Name != "" { - dbContract.ContractSets = append(dbContract.ContractSets, dbContractSet{Name: rows[i].Name}) - } - dbContracts = append(dbContracts, dbContract) - } - - // merge contract sets var contracts []api.ContractMetadata - current, dbContracts := dbContracts[0], dbContracts[1:] - for { - if len(dbContracts) == 0 { - contracts = append(contracts, current.convert()) - break - } else if current.ID != dbContracts[0].ID { - contracts = append(contracts, current.convert()) - } else if len(dbContracts[0].ContractSets) > 0 { - current.ContractSets = append(current.ContractSets, dbContracts[0].ContractSets...) - } - current, dbContracts = dbContracts[0], dbContracts[1:] - } - - // if no contracts are left, check if the set existed in the first place - if len(contracts) == 0 { - return nil, hasContractSet() - } - return contracts, nil + err := s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) (err error) { + contracts, err = tx.Contracts(ctx, opts) + return + }) + return contracts, err } // AddRenewedContract adds a new contract which was created as the result of a renewal to the store. @@ -847,6 +587,9 @@ func (s *SQLStore) AddRenewedContract(ctx context.Context, c rhpv2.ContractRevis return err } + // Populate host. + newContract.Host = oldContract.Host + s.addKnownContract(c.ID()) renewed = newContract return nil @@ -857,19 +600,12 @@ func (s *SQLStore) AddRenewedContract(ctx context.Context, c rhpv2.ContractRevis return renewed.convert(), nil } -func (s *SQLStore) AncestorContracts(ctx context.Context, id types.FileContractID, startHeight uint64) ([]api.ArchivedContract, error) { - var ancestors []dbArchivedContract - err := s.db.WithContext(ctx).Raw("WITH RECURSIVE ancestors AS (SELECT * FROM archived_contracts WHERE renewed_to = ? UNION ALL SELECT archived_contracts.* FROM ancestors, archived_contracts WHERE archived_contracts.renewed_to = ancestors.fcid) SELECT * FROM ancestors WHERE start_height >= ?", fileContractID(id), startHeight). - Scan(&ancestors). - Error - if err != nil { - return nil, err - } - contracts := make([]api.ArchivedContract, len(ancestors)) - for i, ancestor := range ancestors { - contracts[i] = ancestor.convert() - } - return contracts, nil +func (s *SQLStore) AncestorContracts(ctx context.Context, id types.FileContractID, startHeight uint64) (ancestors []api.ArchivedContract, err error) { + err = s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + ancestors, err = tx.AncestorContracts(ctx, id, startHeight) + return err + }) + return } func (s *SQLStore) ArchiveContract(ctx context.Context, id types.FileContractID, reason string) error { @@ -877,45 +613,41 @@ func (s *SQLStore) ArchiveContract(ctx context.Context, id types.FileContractID, } func (s *SQLStore) ArchiveContracts(ctx context.Context, toArchive map[types.FileContractID]string) error { - // fetch ids - var ids []types.FileContractID - for id := range toArchive { - ids = append(ids, id) - } - - // fetch contracts - cs, err := contracts(s.db, ids) - if err != nil { - return err + // archive contracts one-by-one to avoid overwhelming the database due to + // the cascade deletion of contract-sectors. + var errs []string + for fcid, reason := range toArchive { + // invalidate health of related sectors before archiving the contract + // NOTE: even if this is not done in the same transaction it won't have any + // lasting negative effects. + if err := s.invalidateSlabHealthByFCID(ctx, []types.FileContractID{fcid}); err != nil { + return fmt.Errorf("ArchiveContracts: failed to invalidate slab health: %w", err) + } + + // archive the contract but don't interrupt the process if one contract + // fails + if err := s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + return tx.ArchiveContract(ctx, fcid, reason) + }); err != nil { + errs = append(errs, fmt.Sprintf("%v: %v", fcid, err)) + continue + } } - - // archive them - if err := s.retryTransaction(ctx, func(tx *gorm.DB) error { - return archiveContracts(tx, cs, toArchive) - }); err != nil { - return err + if len(errs) > 0 { + return fmt.Errorf("ArchiveContracts: failed to archive at least one contract: %v", strings.Join(errs, "; ")) } - return nil } func (s *SQLStore) ArchiveAllContracts(ctx context.Context, reason string) error { - // fetch contract ids - var fcids []fileContractID - if err := s.db. - WithContext(ctx). - Model(&dbContract{}). - Pluck("fcid", &fcids). - Error; err != nil { - return err + contracts, err := s.Contracts(ctx, api.ContractsOpts{}) + if err != nil { + return fmt.Errorf("failed to fetch contracts: %w", err) } - - // create map toArchive := make(map[types.FileContractID]string) - for _, fcid := range fcids { - toArchive[types.FileContractID(fcid)] = reason + for _, c := range contracts { + toArchive[c.ID] = reason } - return s.ArchiveContracts(ctx, toArchive) } @@ -932,107 +664,38 @@ func (s *SQLStore) ContractRoots(ctx context.Context, id types.FileContractID) ( return nil, api.ErrContractNotFound } - var dbRoots []hash256 - if err = s.db. - WithContext(ctx). - Raw(` -SELECT sec.root -FROM contracts c -INNER JOIN contract_sectors cs ON cs.db_contract_id = c.id -INNER JOIN sectors sec ON cs.db_sector_id = sec.id -WHERE c.fcid = ? -`, fileContractID(id)). - Scan(&dbRoots). - Error; err == nil { - for _, r := range dbRoots { - roots = append(roots, *(*types.Hash256)(&r)) - } - } + err = s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + roots, err = tx.ContractRoots(ctx, id) + return err + }) return } -func (s *SQLStore) ContractSets(ctx context.Context) ([]string, error) { - var sets []string - err := s.db.WithContext(ctx).Raw("SELECT name FROM contract_sets"). - Scan(&sets). - Error +func (s *SQLStore) ContractSets(ctx context.Context) (sets []string, err error) { + err = s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + sets, err = tx.ContractSets(ctx) + return err + }) return sets, err } -func (s *SQLStore) ContractSizes(ctx context.Context) (map[types.FileContractID]api.ContractSize, error) { - type size struct { - Fcid fileContractID `json:"fcid"` - Size uint64 `json:"size"` - Prunable uint64 `json:"prunable"` - } - - var nullContracts []size - var dataContracts []size - if err := s.retryTransaction(ctx, func(tx *gorm.DB) error { - // first, we fetch all contracts without sectors and consider their - // entire size as prunable - if err := tx. - Raw(` -SELECT c.fcid, c.size, c.size as prunable FROM contracts c WHERE NOT EXISTS (SELECT 1 FROM contract_sectors cs WHERE cs.db_contract_id = c.id)`). - Scan(&nullContracts). - Error; err != nil { - return err - } - - // second, we fetch how much data can be pruned from all contracts that - // do have sectors, we take a two-step approach because it allows us to - // use an INNER JOIN on contract_sectors, drastically improving the - // performance of the query - return tx. - Raw(` -SELECT fcid, contract_size as size, CASE WHEN contract_size > sector_size THEN contract_size - sector_size ELSE 0 END as prunable FROM ( -SELECT c.fcid, MAX(c.size) as contract_size, COUNT(cs.db_sector_id) * ? as sector_size FROM contracts c INNER JOIN contract_sectors cs ON cs.db_contract_id = c.id GROUP BY c.fcid -) i`, rhpv2.SectorSize). - Scan(&dataContracts). - Error - }); err != nil { - return nil, err - } - - sizes := make(map[types.FileContractID]api.ContractSize) - for _, row := range append(nullContracts, dataContracts...) { - if types.FileContractID(row.Fcid) == (types.FileContractID{}) { - return nil, errors.New("invalid file contract id") - } - sizes[types.FileContractID(row.Fcid)] = api.ContractSize{ - Size: row.Size, - Prunable: row.Prunable, - } - } - return sizes, nil +func (s *SQLStore) ContractSizes(ctx context.Context) (sizes map[types.FileContractID]api.ContractSize, err error) { + err = s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + sizes, err = tx.ContractSizes(ctx) + return err + }) + return } -func (s *SQLStore) ContractSize(ctx context.Context, id types.FileContractID) (api.ContractSize, error) { +func (s *SQLStore) ContractSize(ctx context.Context, id types.FileContractID) (cs api.ContractSize, err error) { if !s.isKnownContract(id) { return api.ContractSize{}, api.ErrContractNotFound } - - var size struct { - Size uint64 `json:"size"` - Prunable uint64 `json:"prunable"` - } - - if err := s.db. - WithContext(ctx). - Raw(` -SELECT contract_size as size, CASE WHEN contract_size > sector_size THEN contract_size - sector_size ELSE 0 END as prunable FROM ( -SELECT MAX(c.size) as contract_size, COUNT(cs.db_sector_id) * ? as sector_size FROM contracts c LEFT JOIN contract_sectors cs ON cs.db_contract_id = c.id WHERE c.fcid = ? -) i -`, rhpv2.SectorSize, fileContractID(id)). - Take(&size). - Error; err != nil { - return api.ContractSize{}, err - } - - return api.ContractSize{ - Size: size.Size, - Prunable: size.Prunable, - }, nil + err = s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) (err error) { + cs, err = tx.ContractSize(ctx, id) + return + }) + return cs, err } func (s *SQLStore) SetContractSet(ctx context.Context, name string, contractIds []types.FileContractID) error { @@ -1043,7 +706,7 @@ func (s *SQLStore) SetContractSet(ctx context.Context, name string, contractIds wanted[fileContractID(fcid)] = struct{}{} } - var diff []fileContractID + var diff []types.FileContractID var nContractsAfter int err := s.retryTransaction(ctx, func(tx *gorm.DB) error { // fetch contract set @@ -1072,14 +735,14 @@ func (s *SQLStore) SetContractSet(ctx context.Context, name string, contractIds // add removals to the diff for _, contract := range cs.Contracts { if _, ok := wanted[contract.FCID]; !ok { - diff = append(diff, contract.FCID) + diff = append(diff, types.FileContractID(contract.FCID)) } delete(wanted, contract.FCID) } // add additions to the diff for fcid := range wanted { - diff = append(diff, fcid) + diff = append(diff, types.FileContractID(fcid)) } // update the association @@ -1112,28 +775,17 @@ func (s *SQLStore) SetContractSet(ctx context.Context, name string, contractIds } func (s *SQLStore) RemoveContractSet(ctx context.Context, name string) error { - return s.db. - WithContext(ctx). - Where(dbContractSet{Name: name}). - Delete(&dbContractSet{}). - Error + return s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + return tx.RemoveContractSet(ctx, name) + }) } -func (s *SQLStore) RenewedContract(ctx context.Context, renewedFrom types.FileContractID) (_ api.ContractMetadata, err error) { - var contract dbContract - - err = s.db. - WithContext(ctx). - Where(&dbContract{ContractCommon: ContractCommon{RenewedFrom: fileContractID(renewedFrom)}}). - Joins("Host"). - Take(&contract). - Error - if errors.Is(err, gorm.ErrRecordNotFound) { - err = api.ErrContractNotFound - return - } - - return contract.convert(), nil +func (s *SQLStore) RenewedContract(ctx context.Context, renewedFrom types.FileContractID) (cm api.ContractMetadata, err error) { + err = s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + cm, err = tx.RenewedContract(ctx, renewedFrom) + return err + }) + return } func (s *SQLStore) SearchObjects(ctx context.Context, bucket, substring string, offset, limit int) ([]api.ObjectMetadata, error) { @@ -1161,11 +813,6 @@ func (s *SQLStore) SearchObjects(ctx context.Context, bucket, substring string, return objects, nil } -func replaceAnyValue(query string) string { - re := regexp.MustCompile(`ANY_VALUE\((.*?)\)`) - return re.ReplaceAllString(query, "$1") -} - func (s *SQLStore) ObjectEntries(ctx context.Context, bucket, path, prefix, sortBy, sortDir, marker string, offset, limit int) (metadata []api.ObjectMetadata, hasMore bool, err error) { // sanity check we are passing a directory if !strings.HasSuffix(path, "/") { @@ -1216,65 +863,76 @@ func (s *SQLStore) ObjectEntries(ctx context.Context, bucket, path, prefix, sort offset = 0 } - indexHint := "" - if !isSQLite(s.db) { - indexHint = "USE INDEX (idx_object_bucket, idx_objects_created_at)" + // fetch id of directory to query + dirID, err := s.dirID(s.db, path) + if errors.Is(err, gorm.ErrRecordNotFound) { + return []api.ObjectMetadata{}, false, nil + } else if err != nil { + return nil, false, err } - onameExpr := fmt.Sprintf("CASE INSTR(SUBSTR(object_id, ?), '/') WHEN 0 THEN %s ELSE %s END", - sqlConcat(s.db, "?", "SUBSTR(object_id, ?)"), - sqlConcat(s.db, "?", "substr(SUBSTR(object_id, ?), 1, INSTR(SUBSTR(object_id, ?), '/'))"), - ) + // fetch bucket id + var dBucket dbBucket + if err := s.db.Select("id"). + Where("name", bucket). + Take(&dBucket).Error; err != nil { + return nil, false, fmt.Errorf("failed to fetch bucket id: %w", err) + } - // build objects query & parameters - objectsQuery := fmt.Sprintf(` -SELECT ETag, ModTime, oname as Name, Size, Health, MimeType -FROM ( - SELECT - ANY_VALUE(etag) AS ETag, - MAX(objects.created_at) AS ModTime, - %s AS oname, - SUM(size) AS Size, - MIN(health) as Health, - ANY_VALUE(mime_type) as MimeType - FROM objects %s - INNER JOIN buckets b ON objects.db_bucket_id = b.id - WHERE object_id LIKE ? AND SUBSTR(object_id, 1, ?) = ? AND b.name = ? AND SUBSTR(%s, 1, ?) = ? AND %s != ? - GROUP BY oname -) baseQuery -`, - onameExpr, - indexHint, - onameExpr, - onameExpr, - ) + // build prefix expression + prefixExpr := "TRUE" + if prefix != "" { + prefixExpr = "SUBSTR(o.object_id, 1, ?) = ?" + } + lengthFn := "CHAR_LENGTH" if isSQLite(s.db) { - objectsQuery = replaceAnyValue(objectsQuery) + lengthFn = "LENGTH" } - objectsQueryParams := []interface{}{ - utf8.RuneCountInString(path) + 1, // onameExpr - path, utf8.RuneCountInString(path) + 1, // onameExpr - path, utf8.RuneCountInString(path) + 1, utf8.RuneCountInString(path) + 1, // onameExpr - - path + "%", - - utf8.RuneCountInString(path), // WHERE SUBSTR(%s, 1, ?) = ? AND %s != ? AND b.name = ? - path, // WHERE SUBSTR(%s, 1, ?) = ? AND %s != ? AND b.name = ? - bucket, // WHERE SUBSTR(%s, 1, ?) = ? AND %s != ? AND b.name = ? - - utf8.RuneCountInString(path) + 1, // onameExpr - path, utf8.RuneCountInString(path) + 1, // onameExpr - path, utf8.RuneCountInString(path) + 1, utf8.RuneCountInString(path) + 1, // onameExpr - - utf8.RuneCountInString(path + prefix), // WHERE SUBSTR(%s, 1, ?) = ? AND %s != ? AND b.name = ? - path + prefix, // WHERE SUBSTR(%s, 1, ?) = ? AND %s != ? AND b.name = ? - utf8.RuneCountInString(path) + 1, // onameExpr - path, utf8.RuneCountInString(path) + 1, // onameExpr - path, utf8.RuneCountInString(path) + 1, utf8.RuneCountInString(path) + 1, // onameExpr - path, // WHERE SUBSTR(%s, 1, ?) = ? AND %s != ? AND b.name = ? - } + // objectsQuery consists of 2 parts + // 1. fetch all objects in requested directory + // 2. fetch all sub-directories + objectsQuery := fmt.Sprintf(` +SELECT o.etag as ETag, o.created_at as ModTime, o.object_id as ObjectName, o.size as Size, o.health as Health, o.mime_type as MimeType +FROM objects o +WHERE o.object_id != ? AND o.db_directory_id = ? AND o.db_bucket_id = (SELECT id FROM buckets b WHERE b.name = ?) AND %s +UNION ALL +SELECT '' as ETag, MAX(o.created_at) as ModTime, d.name as ObjectName, SUM(o.size) as Size, MIN(o.health) as Health, '' as MimeType +FROM objects o +INNER JOIN directories d ON SUBSTR(o.object_id, 1, %s(d.name)) = d.name AND %s +WHERE o.db_bucket_id = (SELECT id FROM buckets b WHERE b.name = ?) +AND o.object_id LIKE ? +AND SUBSTR(o.object_id, 1, ?) = ? +AND d.db_parent_id = ? +GROUP BY d.id +`, prefixExpr, + lengthFn, + prefixExpr) + + // build query params + var objectsQueryParams []interface{} + if prefix != "" { + objectsQueryParams = []interface{}{ + path, // o.object_id != ? + dirID, bucket, // o.db_directory_id = ? AND b.name = ? + utf8.RuneCountInString(path + prefix), path + prefix, + utf8.RuneCountInString(path + prefix), path + prefix, + bucket, // b.name = ? + path + "%", // o.object_id LIKE ? + utf8.RuneCountInString(path), path, // SUBSTR(o.object_id, 1, ?) = ? + dirID, // d.db_parent_id = ? + } + } else { + objectsQueryParams = []interface{}{ + path, // o.object_id != ? + dirID, bucket, // o.db_directory_id = ? AND b.name = ? + bucket, + path + "%", // o.object_id LIKE ? + utf8.RuneCountInString(path), path, // SUBSTR(o.object_id, 1, ?) = ? + dirID, // d.db_parent_id = ? + } + } // build marker expr markerExpr := "1 = 1" @@ -1285,41 +943,41 @@ FROM ( var markerHealth float64 if err = s.db. WithContext(ctx). - Raw(fmt.Sprintf(`SELECT Health FROM (%s WHERE oname >= ? ORDER BY oname LIMIT 1) as n`, objectsQuery), append(objectsQueryParams, marker)...). + Raw(fmt.Sprintf(`SELECT Health FROM (SELECT * FROM (%s) h WHERE ObjectName >= ? ORDER BY ObjectName LIMIT 1) as n`, objectsQuery), append(objectsQueryParams, marker)...). Scan(&markerHealth). Error; err != nil { return } if sortDir == api.ObjectSortDirAsc { - markerExpr = "(Health > ? OR (Health = ? AND Name > ?))" + markerExpr = "(Health > ? OR (Health = ? AND ObjectName > ?))" markerParams = []interface{}{markerHealth, markerHealth, marker} } else { - markerExpr = "(Health = ? AND Name > ?) OR Health < ?" + markerExpr = "(Health = ? AND ObjectName > ?) OR Health < ?" markerParams = []interface{}{markerHealth, marker, markerHealth} } case api.ObjectSortBySize: var markerSize float64 if err = s.db. WithContext(ctx). - Raw(fmt.Sprintf(`SELECT Size FROM (%s WHERE oname >= ? ORDER BY oname LIMIT 1) as n`, objectsQuery), append(objectsQueryParams, marker)...). + Raw(fmt.Sprintf(`SELECT Size FROM (SELECT * FROM (%s) s WHERE ObjectName >= ? ORDER BY ObjectName LIMIT 1) as n`, objectsQuery), append(objectsQueryParams, marker)...). Scan(&markerSize). Error; err != nil { return } if sortDir == api.ObjectSortDirAsc { - markerExpr = "(Size > ? OR (Size = ? AND Name > ?))" + markerExpr = "(Size > ? OR (Size = ? AND ObjectName > ?))" markerParams = []interface{}{markerSize, markerSize, marker} } else { - markerExpr = "(Size = ? AND Name > ?) OR Size < ?" + markerExpr = "(Size = ? AND ObjectName > ?) OR Size < ?" markerParams = []interface{}{markerSize, marker, markerSize} } case api.ObjectSortByName: if sortDir == api.ObjectSortDirAsc { - markerExpr = "Name > ?" + markerExpr = "ObjectName > ?" } else { - markerExpr = "Name < ?" + markerExpr = "ObjectName < ?" } markerParams = []interface{}{marker} default: @@ -1328,9 +986,12 @@ FROM ( } // build order clause + if sortBy == api.ObjectSortByName { + sortBy = "ObjectName" + } orderByClause := fmt.Sprintf("%s %s", sortBy, sortDir) - if sortBy != api.ObjectSortByName { - orderByClause += ", Name" + if sortBy != "ObjectName" { + orderByClause += ", ObjectName" } var rows []rawObjectMetadata @@ -1363,7 +1024,7 @@ FROM ( } func (s *SQLStore) Object(ctx context.Context, bucket, path string) (obj api.Object, err error) { - err = s.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error { + err = s.retryTransaction(ctx, func(tx *gorm.DB) error { obj, err = s.object(tx, bucket, path) return err }) @@ -1498,62 +1159,38 @@ func fetchUsedContracts(tx *gorm.DB, usedContractsByHost map[types.PublicKey]map usedContracts[types.FileContractID(c.RenewedFrom)] = c } } - return usedContracts, nil } func (s *SQLStore) RenameObject(ctx context.Context, bucket, keyOld, keyNew string, force bool) error { - return s.retryTransaction(ctx, func(tx *gorm.DB) error { - if force { - // delete potentially existing object at destination - if _, err := s.deleteObject(tx, bucket, keyNew); err != nil { - return fmt.Errorf("RenameObject: failed to delete object: %w", err) - } - } - tx = tx.Exec(`UPDATE objects SET object_id = ? WHERE object_id = ? AND ?`, keyNew, keyOld, sqlWhereBucket("objects", bucket)) - if tx.Error != nil && - (strings.Contains(tx.Error.Error(), "UNIQUE constraint failed") || strings.Contains(tx.Error.Error(), "Duplicate entry")) { - return api.ErrObjectExists - } else if tx.Error != nil { - return tx.Error + return s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + // create new dir + dirID, err := tx.MakeDirsForPath(ctx, keyNew) + if err != nil { + return err } - if tx.RowsAffected == 0 { - return fmt.Errorf("%w: key %v", api.ErrObjectNotFound, keyOld) + // update object + err = tx.RenameObject(ctx, bucket, keyOld, keyNew, dirID, force) + if err != nil { + return err } + // delete old dir if empty + s.triggerSlabPruning() return nil }) } func (s *SQLStore) RenameObjects(ctx context.Context, bucket, prefixOld, prefixNew string, force bool) error { - return s.retryTransaction(ctx, func(tx *gorm.DB) error { - if force { - // delete potentially existing objects at destination - inner := tx.Raw("SELECT ? FROM objects WHERE object_id LIKE ? AND SUBSTR(object_id, 1, ?) = ? AND ?", - gorm.Expr(sqlConcat(tx, "?", "SUBSTR(object_id, ?)")), prefixNew, - utf8.RuneCountInString(prefixOld)+1, prefixOld+"%", - utf8.RuneCountInString(prefixOld), prefixOld, sqlWhereBucket("objects", bucket)) - - if !isSQLite(tx) { - inner = tx.Raw("SELECT * FROM (?) as i", inner) - } - resp := tx.Model(&dbObject{}). - Where("object_id IN (?)", inner). - Delete(&dbObject{}) - if err := resp.Error; err != nil { - return err - } - } - tx = tx.Exec("UPDATE objects SET object_id = "+sqlConcat(tx, "?", "SUBSTR(object_id, ?)")+" WHERE object_id LIKE ? AND SUBSTR(object_id, 1, ?) = ? AND ?", - prefixNew, utf8.RuneCountInString(prefixOld)+1, prefixOld+"%", utf8.RuneCountInString(prefixOld), prefixOld, sqlWhereBucket("objects", bucket)) - if tx.Error != nil && - (strings.Contains(tx.Error.Error(), "UNIQUE constraint failed") || strings.Contains(tx.Error.Error(), "Duplicate entry")) { - return api.ErrObjectExists - } else if tx.Error != nil { - return tx.Error - } - if tx.RowsAffected == 0 { - return fmt.Errorf("%w: prefix %v", api.ErrObjectNotFound, prefixOld) + return s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + // create new dir + dirID, err := tx.MakeDirsForPath(ctx, prefixNew) + if err != nil { + return fmt.Errorf("RenameObjects: failed to create new directory: %w", err) + } else if err := tx.RenameObjects(ctx, bucket, prefixOld, prefixNew, dirID, force); err != nil { + return err } + // prune old dirs + s.triggerSlabPruning() return nil }) } @@ -1571,161 +1208,46 @@ func (s *SQLStore) AddPartialSlab(ctx context.Context, data []byte, minShards, t } func (s *SQLStore) CopyObject(ctx context.Context, srcBucket, dstBucket, srcPath, dstPath, mimeType string, metadata api.ObjectUserMetadata) (om api.ObjectMetadata, err error) { - err = s.retryTransaction(ctx, func(tx *gorm.DB) error { + err = s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { if srcBucket != dstBucket || srcPath != dstPath { - _, err = s.deleteObject(tx, dstBucket, dstPath) + _, err = tx.DeleteObject(ctx, dstBucket, dstPath) if err != nil { return fmt.Errorf("CopyObject: failed to delete object: %w", err) } } - - var srcObj dbObject - err = tx.Where("objects.object_id = ? AND DBBucket.name = ?", srcPath, srcBucket). - Joins("DBBucket"). - Take(&srcObj). - Error - if err != nil { - return fmt.Errorf("failed to fetch src object: %w", err) - } - - if srcBucket == dstBucket && srcPath == dstPath { - // No copying is happening. We just update the metadata on the src - // object. - srcObj.MimeType = mimeType - om = newObjectMetadata( - srcObj.ObjectID, - srcObj.Etag, - srcObj.MimeType, - srcObj.Health, - srcObj.CreatedAt, - srcObj.Size, - ) - if err := s.updateUserMetadata(tx, srcObj.ID, metadata); err != nil { - return fmt.Errorf("failed to update user metadata: %w", err) - } - return tx.Save(&srcObj).Error - } - - var srcSlices []dbSlice - err = tx.Where("db_object_id = ?", srcObj.ID). - Find(&srcSlices). - Error - if err != nil { - return fmt.Errorf("failed to fetch src slices: %w", err) - } - for i := range srcSlices { - srcSlices[i].Model = Model{} // clear model - srcSlices[i].DBObjectID = nil // clear object id - } - - var bucket dbBucket - err = tx.Where("name = ?", dstBucket). - Take(&bucket). - Error - if err != nil { - return fmt.Errorf("failed to fetch dst bucket: %w", err) - } - - dstObj := srcObj - dstObj.Model = Model{} // clear model - dstObj.DBBucket = bucket // set dst bucket - dstObj.ObjectID = dstPath // set dst path - dstObj.DBBucketID = bucket.ID // set dst bucket id - dstObj.Slabs = srcSlices // set slices - if mimeType != "" { - dstObj.MimeType = mimeType // override mime type - } - if err := tx.Create(&dstObj).Error; err != nil { - return fmt.Errorf("failed to create copy of object: %w", err) - } - - if err := s.createUserMetadata(tx, dstObj.ID, metadata); err != nil { - return fmt.Errorf("failed to create object metadata: %w", err) - } - - om = newObjectMetadata( - dstObj.ObjectID, - dstObj.Etag, - dstObj.MimeType, - dstObj.Health, - dstObj.CreatedAt, - dstObj.Size, - ) - return nil + om, err = tx.CopyObject(ctx, srcBucket, dstBucket, srcPath, dstPath, mimeType, metadata) + return err }) return } -func (s *SQLStore) DeleteHostSector(ctx context.Context, hk types.PublicKey, root types.Hash256) (int, error) { - var deletedSectors int - err := s.retryTransaction(ctx, func(tx *gorm.DB) error { - // Fetch contract_sectors to delete. - var sectors []dbContractSector - err := tx.Raw(` - SELECT contract_sectors.* - FROM contract_sectors - INNER JOIN sectors s ON s.id = contract_sectors.db_sector_id - INNER JOIN contracts c ON c.id = contract_sectors.db_contract_id - INNER JOIN hosts h ON h.id = c.host_id - WHERE s.root = ? AND h.public_key = ? - `, root[:], publicKey(hk)). - Scan(§ors). - Error - if err != nil { - return fmt.Errorf("failed to fetch contract sectors for deletion: %w", err) - } - - if len(sectors) > 0 { - // Update the affected slabs. - var sectorIDs []uint - uniqueIDs := make(map[uint]struct{}) - for _, s := range sectors { - if _, exists := uniqueIDs[s.DBSectorID]; !exists { - uniqueIDs[s.DBSectorID] = struct{}{} - sectorIDs = append(sectorIDs, s.DBSectorID) - } - } - err = tx.Exec("UPDATE slabs SET health_valid_until = ? WHERE id IN (SELECT db_slab_id FROM sectors WHERE id IN (?))", time.Now().Unix(), sectorIDs).Error - if err != nil { - return fmt.Errorf("failed to invalidate slab health: %w", err) - } +func (s *SQLStore) DeleteHostSector(ctx context.Context, hk types.PublicKey, root types.Hash256) (deletedSectors int, err error) { + err = s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + deletedSectors, err = tx.DeleteHostSector(ctx, hk, root) + return err + }) + return +} - // Delete contract_sectors. - res := tx.Delete(§ors) - if err := res.Error; err != nil { - return fmt.Errorf("failed to delete contract sectors: %w", err) - } else if res.RowsAffected != int64(len(sectors)) { - return fmt.Errorf("expected %v affected rows but got %v", len(sectors), res.RowsAffected) - } - deletedSectors = len(sectors) +func (s *SQLStore) dirID(tx *gorm.DB, dirPath string) (uint, error) { + if !strings.HasPrefix(dirPath, "/") { + return 0, fmt.Errorf("path must start with /") + } else if !strings.HasSuffix(dirPath, "/") { + return 0, fmt.Errorf("path must end with /") + } - // Increment the host's lostSectors by the number of lost sectors. - if err := tx.Exec("UPDATE hosts SET lost_sectors = lost_sectors + ? WHERE public_key = ?", len(sectors), publicKey(hk)).Error; err != nil { - return fmt.Errorf("failed to increment lost sectors: %w", err) - } - } + if dirPath == "/" { + return 1, nil // root dir returned + } - // Fetch the sector and update the latest_host field if the host for - // which we remove the sector is the latest_host. - var sector dbSector - err = tx.Where("root", root[:]). - Preload("Contracts.Host"). - Find(§or). - Error - if err != nil { - return fmt.Errorf("failed to fetch sectors: %w", err) - } - if sector.LatestHost == publicKey(hk) { - if len(sector.Contracts) == 0 { - sector.LatestHost = publicKey{} // no more hosts - } else { - sector.LatestHost = sector.Contracts[len(sector.Contracts)-1].Host.PublicKey // most recent contract - } - return tx.Save(sector).Error - } - return nil - }) - return deletedSectors, err + var dir dbDirectory + if err := tx.Where("name", dirPath). + Select("id"). + Take(&dir). + Error; err != nil { + return 0, fmt.Errorf("failed to fetch directory: %w", err) + } + return dir.ID, nil } func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, eTag, mimeType string, metadata api.ObjectUserMetadata, o object.Object) error { @@ -1739,11 +1261,9 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, } } - // collect all used contracts - usedContracts := o.Contracts() - // UpdateObject is ACID. - return s.retryTransaction(ctx, func(tx *gorm.DB) error { + var prune bool + err := s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { // Try to delete. We want to get rid of the object and its slices if it // exists. // @@ -1754,94 +1274,80 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, // NOTE: the metadata is not deleted because this delete will cascade, // if we stop recreating the object we have to make sure to delete the // object's metadata before trying to recreate it - _, err := s.deleteObject(tx, bucket, path) + var err error + prune, err = tx.DeleteObject(ctx, bucket, path) if err != nil { return fmt.Errorf("UpdateObject: failed to delete object: %w", err) } - // Insert a new object. - objKey, err := o.Key.MarshalBinary() + // create the dir + dirID, err := tx.MakeDirsForPath(ctx, path) if err != nil { - return fmt.Errorf("failed to marshal object key: %w", err) - } - // fetch bucket id - var bucketID uint - err = s.db.Table("(SELECT id from buckets WHERE buckets.name = ?) bucket_id", bucket). - Take(&bucketID).Error - if errors.Is(err, gorm.ErrRecordNotFound) { - return fmt.Errorf("bucket %v not found: %w", bucket, api.ErrBucketNotFound) - } else if err != nil { - return fmt.Errorf("failed to fetch bucket id: %w", err) + return fmt.Errorf("failed to create directories for path '%s': %w", path, err) } - obj := dbObject{ - DBBucketID: bucketID, - ObjectID: path, - Key: objKey, - Size: o.TotalSize(), - MimeType: mimeType, - Etag: eTag, - } - err = tx.Create(&obj).Error - if err != nil { - return fmt.Errorf("failed to create object: %w", err) - } - - // Fetch contract set. - var cs dbContractSet - if err := tx.Take(&cs, "name = ?", contractSet).Error; err != nil { - return fmt.Errorf("contract set %v not found: %w", contractSet, err) - } - - // Fetch the used contracts. - contracts, err := fetchUsedContracts(tx, usedContracts) + // Insert a new object. + err = tx.InsertObject(ctx, bucket, path, contractSet, dirID, o, mimeType, eTag, metadata) if err != nil { - return fmt.Errorf("failed to fetch used contracts: %w", err) - } - - // Create all slices. This also creates any missing slabs or sectors. - if err := s.createSlices(tx, &obj.ID, nil, cs.ID, contracts, o.Slabs); err != nil { - return fmt.Errorf("failed to create slices: %w", err) - } - - // Create all user metadata. - if err := s.createUserMetadata(tx, obj.ID, metadata); err != nil { - return fmt.Errorf("failed to create user metadata: %w", err) + return fmt.Errorf("failed to insert object: %w", err) } - return nil }) + if err != nil { + return err + } else if prune { + // trigger pruning if we deleted an object + s.triggerSlabPruning() + } + return nil } func (s *SQLStore) RemoveObject(ctx context.Context, bucket, path string) error { - var rowsAffected int64 - var err error - err = s.retryTransaction(ctx, func(tx *gorm.DB) error { - rowsAffected, err = s.deleteObject(tx, bucket, path) - if err != nil { - return fmt.Errorf("RemoveObject: failed to delete object: %w", err) - } - return nil + var prune bool + err := s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) (err error) { + prune, err = tx.DeleteObject(ctx, bucket, path) + return }) if err != nil { - return err - } - if rowsAffected == 0 { + return fmt.Errorf("RemoveObject: failed to delete object: %w", err) + } else if !prune { return fmt.Errorf("%w: key: %s", api.ErrObjectNotFound, path) } + s.triggerSlabPruning() return nil } func (s *SQLStore) RemoveObjects(ctx context.Context, bucket, prefix string) error { - var rowsAffected int64 - var err error - rowsAffected, err = s.deleteObjects(ctx, bucket, prefix) - if err != nil { - return err + var prune bool + batchSizeIdx := 0 + for { + start := time.Now() + var done bool + var duration time.Duration + if err := s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + deleted, err := tx.DeleteObjects(ctx, bucket, prefix, objectDeleteBatchSizes[batchSizeIdx]) + if err != nil { + return err + } + prune = prune || deleted + done = !deleted + return nil + }); err != nil { + return fmt.Errorf("failed to delete objects: %w", err) + } else if done { + break // nothing more to delete + } + duration = time.Since(start) + + // increase the batch size if deletion was faster than the threshold + if duration < batchDurationThreshold && batchSizeIdx < len(objectDeleteBatchSizes)-1 { + batchSizeIdx++ + } } - if rowsAffected == 0 { + if !prune { return fmt.Errorf("%w: prefix: %s", api.ErrObjectNotFound, prefix) } + s.triggerSlabPruning() return nil } @@ -1875,112 +1381,9 @@ func (ss *SQLStore) UpdateSlab(ctx context.Context, s object.Slab, contractSet s } } - // extract the slab key - key, err := s.Key.MarshalBinary() - if err != nil { - return err - } - - // collect all used contracts - usedContracts := s.Contracts() - // Update slab. - return ss.retryTransaction(ctx, func(tx *gorm.DB) (err error) { - // update slab - if err := tx.Model(&dbSlab{}). - Where("key", key). - Updates(map[string]interface{}{ - "db_contract_set_id": gorm.Expr("(SELECT id FROM contract_sets WHERE name = ?)", contractSet), - "health_valid_until": time.Now().Unix(), - "health": 1, - }). - Error; err != nil { - return err - } - - // find all used contracts - contracts, err := fetchUsedContracts(tx, usedContracts) - if err != nil { - return err - } - - // find existing slab - var slab dbSlab - if err = tx. - Where(&dbSlab{Key: key}). - Preload("Shards"). - Take(&slab). - Error; err == gorm.ErrRecordNotFound { - return fmt.Errorf("slab with key '%s' not found: %w", string(key), err) - } else if err != nil { - return err - } - - // make sure the number of shards doesn't change. - // NOTE: check both the slice as well as the TotalShards field to be - // safe. - if len(s.Shards) != int(slab.TotalShards) { - return fmt.Errorf("%w: expected %v shards (TotalShards) but got %v", errInvalidNumberOfShards, slab.TotalShards, len(s.Shards)) - } else if len(s.Shards) != len(slab.Shards) { - return fmt.Errorf("%w: expected %v shards (Shards) but got %v", errInvalidNumberOfShards, len(slab.Shards), len(s.Shards)) - } - - // make sure the roots stay the same. - for i, shard := range s.Shards { - if shard.Root != types.Hash256(slab.Shards[i].Root) { - return fmt.Errorf("%w: shard %v has changed root from %v to %v", errShardRootChanged, i, slab.Shards[i].Root, shard.Root[:]) - } - } - - // prepare sectors to update - sectors := make([]dbSector, len(s.Shards)) - for i := range s.Shards { - sectors[i] = dbSector{ - DBSlabID: slab.ID, - SlabIndex: i + 1, - LatestHost: publicKey(s.Shards[i].LatestHost), - Root: s.Shards[i].Root[:], - } - } - - // ensure the sectors exists - sectorIDs, err := upsertSectors(tx, sectors) - if err != nil { - return fmt.Errorf("failed to create sector: %w", err) - } - - // build contract <-> sector links - var contractSectors []dbContractSector - for i, shard := range s.Shards { - sectorID := sectorIDs[i] - - // ensure the associations are updated - for _, fcids := range shard.Contracts { - for _, fcid := range fcids { - if _, ok := contracts[fcid]; ok { - contractSectors = append(contractSectors, dbContractSector{ - DBSectorID: sectorID, - DBContractID: contracts[fcid].ID, - }) - } - } - } - } - - // if there are no associations we are done - if len(contractSectors) == 0 { - return nil - } - - // create associations - if err := tx.Table("contract_sectors"). - Clauses(clause.OnConflict{ - DoNothing: true, - }). - Create(&contractSectors).Error; err != nil { - return err - } - return nil + return ss.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + return tx.UpdateSlab(ctx, s, contractSet, s.Contracts()) }) } @@ -1993,59 +1396,18 @@ func (s *SQLStore) RefreshHealth(ctx context.Context) error { return nil // nothing to do } - // Update slab health in batches. - now := time.Now() - - // build health query - healthQuery := s.db.Raw(` -SELECT slabs.id, slabs.db_contract_set_id, CASE WHEN (slabs.min_shards = slabs.total_shards) -THEN - CASE WHEN (COUNT(DISTINCT(CASE WHEN cs.name IS NULL THEN NULL ELSE c.host_id END)) < slabs.min_shards) - THEN -1 - ELSE 1 - END -ELSE (CAST(COUNT(DISTINCT(CASE WHEN cs.name IS NULL THEN NULL ELSE c.host_id END)) AS FLOAT) - CAST(slabs.min_shards AS FLOAT)) / Cast(slabs.total_shards - slabs.min_shards AS FLOAT) -END AS health -FROM slabs -INNER JOIN sectors s ON s.db_slab_id = slabs.id -LEFT JOIN contract_sectors se ON s.id = se.db_sector_id -LEFT JOIN contracts c ON se.db_contract_id = c.id -LEFT JOIN contract_set_contracts csc ON csc.db_contract_id = c.id AND csc.db_contract_set_id = slabs.db_contract_set_id -LEFT JOIN contract_sets cs ON cs.id = csc.db_contract_set_id -WHERE slabs.health_valid_until <= ? -GROUP BY slabs.id -LIMIT ? -`, now.Unix(), refreshHealthBatchSize) - for { + // update slabs var rowsAffected int64 - err := s.retryTransaction(ctx, func(tx *gorm.DB) error { - var res *gorm.DB - if isSQLite(s.db) { - res = tx.Exec("UPDATE slabs SET health = inner.health, health_valid_until = (?) FROM (?) AS inner WHERE slabs.id=inner.id", sqlRandomTimestamp(s.db, now, refreshHealthMinHealthValidity, refreshHealthMaxHealthValidity), healthQuery) - } else { - res = tx.Exec("UPDATE slabs sla INNER JOIN (?) h ON sla.id = h.id SET sla.health = h.health, health_valid_until = (?)", healthQuery, sqlRandomTimestamp(s.db, now, refreshHealthMinHealthValidity, refreshHealthMaxHealthValidity)) - } - if res.Error != nil { - return res.Error - } - rowsAffected = res.RowsAffected - - // Update the health of objects with outdated health. - return tx.Exec(` -UPDATE objects SET health = ( - SELECT MIN(slabs.health) - FROM slabs - INNER JOIN slices ON slices.db_slab_id = slabs.id AND slices.db_object_id = objects.id -) WHERE health != ( - SELECT MIN(slabs.health) - FROM slabs - INNER JOIN slices ON slices.db_slab_id = slabs.id AND slices.db_object_id = objects.id -)`).Error + err := s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) (err error) { + rowsAffected, err = tx.UpdateSlabHealth(ctx, refreshHealthBatchSize, refreshHealthMinHealthValidity, refreshHealthMaxHealthValidity) + return }) if err != nil { - return err - } else if rowsAffected < refreshHealthBatchSize { + return fmt.Errorf("failed to update slab health: %w", err) + } + // check if done + if rowsAffected < refreshHealthBatchSize { return nil // done } select { @@ -2096,183 +1458,14 @@ func (s *SQLStore) UnhealthySlabs(ctx context.Context, healthCutoff float64, set return slabs, nil } -func (s *SQLStore) createUserMetadata(tx *gorm.DB, objID uint, metadata api.ObjectUserMetadata) error { - entities := make([]*dbObjectUserMetadata, 0, len(metadata)) - for k, v := range metadata { - metadata := &dbObjectUserMetadata{ - DBObjectID: &objID, - Key: k, - Value: v, - } - entities = append(entities, metadata) - } - return tx.CreateInBatches(&entities, 1000).Error -} - -func (s *SQLStore) createMultipartMetadata(tx *gorm.DB, multipartUploadID uint, metadata api.ObjectUserMetadata) error { - entities := make([]*dbObjectUserMetadata, 0, len(metadata)) - for k, v := range metadata { - metadata := &dbObjectUserMetadata{ - DBMultipartUploadID: &multipartUploadID, - Key: k, - Value: v, - } - entities = append(entities, metadata) - } - return tx.CreateInBatches(&entities, 1000).Error -} - -func (s *SQLStore) updateUserMetadata(tx *gorm.DB, objID uint, metadata api.ObjectUserMetadata) error { - // delete all existing metadata - err := tx. - Where("db_object_id = ?", objID). - Delete(&dbObjectUserMetadata{}). - Error - if err != nil { - return err - } - - return s.createUserMetadata(tx, objID, metadata) -} - -func (s *SQLStore) createSlices(tx *gorm.DB, objID, multiPartID *uint, contractSetID uint, contracts map[types.FileContractID]dbContract, slices []object.SlabSlice) error { - if (objID == nil && multiPartID == nil) || (objID != nil && multiPartID != nil) { - return fmt.Errorf("either objID or multiPartID must be set") - } else if len(slices) == 0 { - return nil // nothing to do - } - - // build slabs - slabs := make([]dbSlab, len(slices)) - for i := range slices { - slabKey, err := slices[i].Key.MarshalBinary() - if err != nil { - return fmt.Errorf("failed to marshal slab key: %w", err) - } - slabs[i] = dbSlab{ - Key: slabKey, - DBContractSetID: contractSetID, - MinShards: slices[i].MinShards, - TotalShards: uint8(len(slices[i].Shards)), - } - } - - // create slabs that don't exist yet - err := tx. - Clauses(clause.OnConflict{ - DoNothing: true, - Columns: []clause.Column{{Name: "key"}}, - }). - Create(&slabs).Error - if err != nil { - return fmt.Errorf("failed to create slabs %w", err) - } - - // fetch the upserted slabs - for i := range slabs { - if err := tx.Raw("SELECT * FROM slabs WHERE `key` = ?", slabs[i].Key). - Scan(&slabs[i]). - Error; err != nil { - return fmt.Errorf("failed to fetch slab: %w", err) - } else if slabs[i].DBContractSetID != contractSetID { - return fmt.Errorf("slab already exists in another contract set %v != %v", slabs[i].DBContractSetID, contractSetID) - } - } - - // build slices - dbSlices := make([]dbSlice, len(slices)) - for i := range slices { - slab := slabs[i] - dbSlices[i] = dbSlice{ - DBSlabID: slab.ID, - DBObjectID: objID, - ObjectIndex: uint(i + 1), - DBMultipartPartID: multiPartID, - Offset: slices[i].Offset, - Length: slices[i].Length, - } - } - - // if there are no slices we are done - if len(dbSlices) == 0 { - return nil - } - - // create slices - err = tx.Create(&dbSlices).Error - if err != nil { - return fmt.Errorf("failed to create slice %w", err) - } - - // build sectors - var sectors []dbSector - for i, ss := range slices { - slab := slabs[i] - for j := range ss.Shards { - sectors = append(sectors, dbSector{ - DBSlabID: slab.ID, - SlabIndex: j + 1, - LatestHost: publicKey(ss.Shards[j].LatestHost), - Root: ss.Shards[j].Root[:], - }) - } - } - - // create sector that don't exist yet - sectorIDs, err := upsertSectors(tx, sectors) - if err != nil { - return fmt.Errorf("failed to create sectors: %w", err) - } - - // build contract <-> sector links - sectorIdx := 0 - var contractSectors []dbContractSector - for _, ss := range slices { - for _, shard := range ss.Shards { - sectorID := sectorIDs[sectorIdx] - for _, fcids := range shard.Contracts { - for _, fcid := range fcids { - if _, ok := contracts[fcid]; ok { - contractSectors = append(contractSectors, dbContractSector{ - DBSectorID: sectorID, - DBContractID: contracts[fcid].ID, - }) - } else { - s.logger.Warn("missing contract for shard", - "contract", fcid, - "root", shard.Root, - "latest_host", shard.LatestHost, - ) - } - } - } - sectorIdx++ - } - } - - // if there are no associations we are done - if len(contractSectors) == 0 { - return nil - } - - // create associations - if err := tx. - Table("contract_sectors"). - Clauses(clause.OnConflict{ - DoNothing: true, - }). - CreateInBatches(&contractSectors, sectorInsertionBatchSize).Error; err != nil { - return err - } - return nil -} - // object retrieves an object from the store. func (s *SQLStore) object(tx *gorm.DB, bucket, path string) (api.Object, error) { // fetch raw object data raw, err := s.objectRaw(tx, bucket, path) - if errors.Is(err, gorm.ErrRecordNotFound) || len(raw) == 0 { + if errors.Is(err, gorm.ErrRecordNotFound) || (err == nil && len(raw) == 0) { return api.Object{}, api.ErrObjectNotFound + } else if err != nil { + return api.Object{}, err } // hydrate raw object data @@ -2476,7 +1669,7 @@ func (s *SQLStore) ObjectsBySlabKey(ctx context.Context, bucket string, slabKey err = s.retryTransaction(ctx, func(tx *gorm.DB) error { return tx.Raw(` -SELECT DISTINCT obj.object_id as Name, obj.size as Size, obj.mime_type as MimeType, sla.health as Health +SELECT DISTINCT obj.object_id as ObjectName, obj.size as Size, obj.mime_type as MimeType, sla.health as Health FROM slabs sla INNER JOIN slices sli ON sli.db_slab_id = sla.id INNER JOIN objects obj ON sli.db_object_id = obj.id @@ -2554,14 +1747,12 @@ func (s *SQLStore) markPackedSlabUploaded(tx *gorm.DB, slab api.UploadedPackedSl } // delete buffer - var buffer dbBufferedSlab - if err := tx.Take(&buffer, "id = ?", slab.BufferID).Error; err != nil { + var fileName string + if err := tx.Raw("SELECT filename FROM buffered_slabs WHERE id = ?", slab.BufferID). + Scan(&fileName).Error; err != nil { return "", err } - fileName := buffer.Filename - err = tx.Delete(&buffer). - Error - if err != nil { + if err := tx.Exec("DELETE FROM buffered_slabs WHERE id = ?", slab.BufferID).Error; err != nil { return "", err } @@ -2603,23 +1794,6 @@ func contract(tx *gorm.DB, id fileContractID) (contract dbContract, err error) { return } -// contracts retrieves all contracts for the given ids from the store. -func contracts(tx *gorm.DB, ids []types.FileContractID) (dbContracts []dbContract, err error) { - fcids := make([]fileContractID, len(ids)) - for i, fcid := range ids { - fcids[i] = fileContractID(fcid) - } - - // fetch contracts - err = tx. - Model(&dbContract{}). - Where("fcid IN (?)", fcids). - Joins("Host"). - Find(&dbContracts). - Error - return -} - // contractsForHost retrieves all contracts for the given host func contractsForHost(tx *gorm.DB, host dbHost) (contracts []dbContract, err error) { err = tx. @@ -2682,48 +1856,6 @@ func addContract(tx *gorm.DB, c rhpv2.ContractRevision, contractPrice, totalCost return contract, nil } -// archiveContracts archives the given contracts and uses the given reason as -// archival reason -// -// NOTE: this function archives the contracts without setting a renewed ID -func archiveContracts(tx *gorm.DB, contracts []dbContract, toArchive map[types.FileContractID]string) error { - var toInvalidate []fileContractID - for _, contract := range contracts { - toInvalidate = append(toInvalidate, contract.FCID) - } - // Invalidate the health on the slabs before deleting the contracts to avoid - // breaking the relations beforehand. - if err := invalidateSlabHealthByFCID(tx, toInvalidate); err != nil { - return fmt.Errorf("invalidating slab health failed: %w", err) - } - for _, contract := range contracts { - // sanity check the host is populated - if contract.Host.ID == 0 { - return fmt.Errorf("host not populated for contract %v", contract.FCID) - } - - // create a copy in the archive - if err := tx.Create(&dbArchivedContract{ - Host: publicKey(contract.Host.PublicKey), - Reason: toArchive[types.FileContractID(contract.FCID)], - - ContractCommon: contract.ContractCommon, - }).Error; err != nil { - return err - } - - // remove the contract - res := tx.Delete(&contract) - if err := res.Error; err != nil { - return err - } - if res.RowsAffected != 1 { - return fmt.Errorf("expected to delete 1 row, deleted %d", res.RowsAffected) - } - } - return nil -} - func (s *SQLStore) pruneSlabsLoop() { for { select { @@ -2732,40 +1864,67 @@ func (s *SQLStore) pruneSlabsLoop() { return } - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second+sumDurations(s.retryTransactionIntervals)) - err := s.retryTransaction(ctx, pruneSlabs) + // prune slabs + pruneSuccess := true + for { + var deleted int64 + err := s.bMain.Transaction(s.shutdownCtx, func(dt sql.DatabaseTx) error { + var err error + deleted, err = dt.PruneSlabs(s.shutdownCtx, slabPruningBatchSize) + return err + }) + if err != nil { + s.logger.Errorw("slab pruning failed", zap.Error(err)) + s.alerts.RegisterAlert(s.shutdownCtx, alerts.Alert{ + ID: pruneSlabsAlertID, + Severity: alerts.SeverityWarning, + Message: "Failed to prune slabs", + Timestamp: time.Now(), + Data: map[string]interface{}{ + "error": err.Error(), + "hint": "This might happen when your database is under a lot of load due to deleting objects rapidly. This alert will disappear the next time slabs are pruned successfully.", + }, + }) + pruneSuccess = false + } else { + s.alerts.DismissAlerts(s.shutdownCtx, pruneSlabsAlertID) + } + + if deleted < slabPruningBatchSize { + break // done + } + } + + // prune dirs + err := s.bMain.Transaction(s.shutdownCtx, func(dt sql.DatabaseTx) error { + return dt.PruneEmptydirs(s.shutdownCtx) + }) if err != nil { - s.logger.Errorw("failed to prune slabs", zap.Error(err)) + s.logger.Errorw("dir pruning failed", zap.Error(err)) s.alerts.RegisterAlert(s.shutdownCtx, alerts.Alert{ - ID: pruneSlabsAlertID, + ID: pruneDirsAlertID, Severity: alerts.SeverityWarning, - Message: "Failed to prune slabs from database", + Message: "Failed to prune dirs", Timestamp: time.Now(), Data: map[string]interface{}{ "error": err.Error(), "hint": "This might happen when your database is under a lot of load due to deleting objects rapidly. This alert will disappear the next time slabs are pruned successfully.", }, }) + pruneSuccess = false } else { - s.alerts.DismissAlerts(s.shutdownCtx, pruneSlabsAlertID) + s.alerts.DismissAlerts(s.shutdownCtx, pruneDirsAlertID) + } + // mark the last prune time where both slabs and dirs were pruned + if pruneSuccess { s.mu.Lock() s.lastPrunedAt = time.Now() s.mu.Unlock() } - cancel() } } -func pruneSlabs(tx *gorm.DB) error { - return tx.Exec(` -DELETE -FROM slabs -WHERE NOT EXISTS (SELECT 1 FROM slices WHERE slices.db_slab_id = slabs.id) -AND slabs.db_buffered_slab_id IS NULL -`).Error -} - func (s *SQLStore) triggerSlabPruning() { select { case s.slabPruneSigChan <- struct{}{}: @@ -2773,201 +1932,31 @@ func (s *SQLStore) triggerSlabPruning() { } } -// deleteObject deletes an object from the store and prunes all slabs which are -// without an obect after the deletion. That means in case of packed uploads, -// the slab is only deleted when no more objects point to it. -func (s *SQLStore) deleteObject(tx *gorm.DB, bucket string, path string) (int64, error) { - // check if the object exists first to avoid unnecessary locking for the - // common case - var objID uint - resp := tx.Model(&dbObject{}). - Where("object_id = ? AND ?", path, sqlWhereBucket("objects", bucket)). - Select("id"). - Limit(1). - Scan(&objID) - if err := resp.Error; err != nil { - return 0, err - } else if resp.RowsAffected == 0 { - return 0, nil - } - - tx = tx.Where("id", objID). - Delete(&dbObject{}) - if tx.Error != nil { - return 0, tx.Error - } - numDeleted := tx.RowsAffected - if numDeleted == 0 { - return 0, nil // nothing to prune if no object was deleted - } - s.triggerSlabPruning() - return numDeleted, nil -} - -// deleteObjects deletes a batch of objects from the database. The order of -// deletion goes from largest to smallest. That's because the batch size is -// dynamically increased and the smaller objects get the faster we can delete -// them meaning it makes sense to increase the batch size over time. -func (s *SQLStore) deleteObjects(ctx context.Context, bucket string, path string) (numDeleted int64, _ error) { - batchSizeIdx := 0 +func (s *SQLStore) invalidateSlabHealthByFCID(ctx context.Context, fcids []types.FileContractID) error { for { - var duration time.Duration - var rowsAffected int64 - if err := s.retryTransaction(ctx, func(tx *gorm.DB) error { - start := time.Now() - res := tx.Exec(` - DELETE FROM objects - WHERE id IN ( - SELECT id FROM ( - SELECT id FROM objects - WHERE object_id LIKE ? AND SUBSTR(object_id, 1, ?) = ? AND ? - ORDER BY size DESC - LIMIT ? - ) tmp - )`, - path+"%", utf8.RuneCountInString(path), path, sqlWhereBucket("objects", bucket), - objectDeleteBatchSizes[batchSizeIdx]) - if err := res.Error; err != nil { - return res.Error - } - // prune slabs if we deleted an object - rowsAffected = res.RowsAffected - if rowsAffected > 0 { - s.triggerSlabPruning() - } - duration = time.Since(start) - return nil - }); err != nil { - return 0, fmt.Errorf("failed to delete objects: %w", err) - } - - // if nothing got deleted we are done - if rowsAffected == 0 { - break - } - numDeleted += rowsAffected - - // increase the batch size if deletion was faster than the threshold - if duration < batchDurationThreshold && batchSizeIdx < len(objectDeleteBatchSizes)-1 { - batchSizeIdx++ - } - } - return numDeleted, nil -} - -func invalidateSlabHealthByFCID(tx *gorm.DB, fcids []fileContractID) error { - if len(fcids) == 0 { - return nil - } - - for { - now := time.Now().Unix() - if resp := tx.Exec(` - UPDATE slabs SET health_valid_until = ? WHERE id in ( - SELECT * - FROM ( - SELECT slabs.id - FROM slabs - INNER JOIN sectors se ON se.db_slab_id = slabs.id - INNER JOIN contract_sectors cs ON cs.db_sector_id = se.id - INNER JOIN contracts c ON c.id = cs.db_contract_id - WHERE c.fcid IN (?) AND slabs.health_valid_until >= ? - LIMIT ? - ) slab_ids - )`, now, fcids, now, refreshHealthBatchSize); resp.Error != nil { - return fmt.Errorf("failed to invalidate slab health: %w", resp.Error) - } else if resp.RowsAffected < refreshHealthBatchSize { - break // done + var affected int64 + err := s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) (err error) { + affected, err = tx.InvalidateSlabHealthByFCID(ctx, fcids, refreshHealthBatchSize) + return + }) + if err != nil { + return fmt.Errorf("failed to invalidate slab health: %w", err) + } else if affected < refreshHealthBatchSize { + return nil // done } time.Sleep(time.Second) } - return nil -} - -func (s *SQLStore) invalidateSlabHealthByFCID(ctx context.Context, fcids []fileContractID) error { - return s.retryTransaction(ctx, func(tx *gorm.DB) error { - return invalidateSlabHealthByFCID(tx, fcids) - }) -} - -// nolint:unparam -func sqlConcat(db *gorm.DB, a, b string) string { - if isSQLite(db) { - return fmt.Sprintf("%s || %s", a, b) - } - return fmt.Sprintf("CONCAT(%s, %s)", a, b) -} - -func sqlRandomTimestamp(db *gorm.DB, now time.Time, minDuration, maxDuration time.Duration) clause.Expr { - if isSQLite(db) { - return gorm.Expr("ABS(RANDOM()) % (? - ?) + ?", int(maxDuration.Seconds()), int(minDuration.Seconds()), now.Add(minDuration).Unix()) - } - return gorm.Expr("FLOOR(? + RAND() * (? - ?))", now.Add(minDuration).Unix(), int(maxDuration.Seconds()), int(minDuration.Seconds())) -} - -// nolint:unparam -func sqlWhereBucket(objTable string, bucket string) clause.Expr { - return gorm.Expr(fmt.Sprintf("%s.db_bucket_id = (SELECT id FROM buckets WHERE buckets.name = ?)", objTable), bucket) } // TODO: we can use ObjectEntries instead of ListObject if we want to use '/' as // a delimiter for now (see backend.go) but it would be interesting to have // arbitrary 'delim' support in ListObjects. -func (s *SQLStore) ListObjects(ctx context.Context, bucket, prefix, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) { - // fetch one more to see if there are more entries - if limit <= -1 { - limit = math.MaxInt - } else { - limit++ - } - - // build prefix expr - prefixExpr := buildPrefixExpr(prefix) - - // build order clause - orderBy, err := buildOrderClause(sortBy, sortDir) - if err != nil { - return api.ObjectsListResponse{}, err - } - - // build marker expr - markerExpr, markerOrderBy, err := buildMarkerExpr(s.db, bucket, prefix, marker, sortBy, sortDir) - if err != nil { - return api.ObjectsListResponse{}, err - } - var rows []rawObjectMetadata - if err := s.db. - Select("o.object_id as Name, o.size as Size, o.health as Health, o.mime_type as MimeType, o.created_at as ModTime, o.etag as ETag"). - Model(&dbObject{}). - Table("objects o"). - Joins("INNER JOIN buckets b ON o.db_bucket_id = b.id"). - Where("b.name = ? AND ? AND ?", bucket, prefixExpr, markerExpr). - Order(orderBy). - Order(markerOrderBy). - Order("Name ASC"). - Limit(int(limit)). - Scan(&rows).Error; err != nil { - return api.ObjectsListResponse{}, err - } - - var hasMore bool - var nextMarker string - if len(rows) == limit { - hasMore = true - rows = rows[:len(rows)-1] - nextMarker = rows[len(rows)-1].Name - } - - var objects []api.ObjectMetadata - for _, row := range rows { - objects = append(objects, row.convert()) - } - - return api.ObjectsListResponse{ - HasMore: hasMore, - NextMarker: nextMarker, - Objects: objects, - }, nil +func (s *SQLStore) ListObjects(ctx context.Context, bucket, prefix, sortBy, sortDir, marker string, limit int) (resp api.ObjectsListResponse, err error) { + err = s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + resp, err = tx.ListObjects(ctx, bucket, prefix, sortBy, sortDir, marker, limit) + return err + }) + return } func (ss *SQLStore) processConsensusChangeContracts(cc modules.ConsensusChange) { @@ -3059,117 +2048,6 @@ func (ss *SQLStore) processConsensusChangeContracts(cc modules.ConsensusChange) } } -func buildMarkerExpr(db *gorm.DB, bucket, prefix, marker, sortBy, sortDir string) (markerExpr clause.Expr, orderBy clause.OrderBy, err error) { - // no marker - if marker == "" { - return exprTRUE, clause.OrderBy{}, nil - } - - // for markers to work we need to order by object_id - orderBy = clause.OrderBy{ - Columns: []clause.OrderByColumn{ - { - Column: clause.Column{Name: "object_id"}, - Desc: false, - }, - }, - } - - desc := strings.EqualFold(sortDir, api.ObjectSortDirDesc) - switch sortBy { - case "", api.ObjectSortByName: - if desc { - markerExpr = gorm.Expr("object_id < ?", marker) - } else { - markerExpr = gorm.Expr("object_id > ?", marker) - } - case api.ObjectSortByHealth: - // fetch marker health - var markerHealth float64 - if marker != "" && sortBy == api.ObjectSortByHealth { - if err := db. - Select("o.health"). - Model(&dbObject{}). - Table("objects o"). - Joins("INNER JOIN buckets b ON o.db_bucket_id = b.id"). - Where("b.name = ? AND ? AND ?", bucket, buildPrefixExpr(prefix), gorm.Expr("o.object_id >= ?", marker)). - Limit(1). - Scan(&markerHealth). - Error; err != nil { - return exprTRUE, clause.OrderBy{}, err - } - } - - if desc { - markerExpr = gorm.Expr("(Health <= ? AND object_id > ?) OR Health < ?", markerHealth, marker, markerHealth) - } else { - markerExpr = gorm.Expr("Health > ? OR (Health >= ? AND object_id > ?)", markerHealth, markerHealth, marker) - } - case api.ObjectSortBySize: - // fetch marker size - var markerSize float64 - if marker != "" && sortBy == api.ObjectSortBySize { - if err := db. - Select("o.size"). - Model(&dbObject{}). - Table("objects o"). - Joins("INNER JOIN buckets b ON o.db_bucket_id = b.id"). - Where("b.name = ? AND ? AND ?", bucket, buildPrefixExpr(prefix), gorm.Expr("o.object_id >= ?", marker)). - Limit(1). - Scan(&markerSize). - Error; err != nil { - return exprTRUE, clause.OrderBy{}, err - } - } - - if desc { - markerExpr = gorm.Expr("(Size <= ? AND object_id > ?) OR Size < ?", markerSize, marker, markerSize) - } else { - markerExpr = gorm.Expr("Size > ? OR (Size >= ? AND object_id > ?)", markerSize, markerSize, marker) - } - default: - err = fmt.Errorf("unhandled sortBy parameter '%s'", sortBy) - } - return -} - -func buildOrderClause(sortBy, sortDir string) (clause.OrderByColumn, error) { - if err := validateSort(sortBy, sortDir); err != nil { - return clause.OrderByColumn{}, err - } - - orderByColumns := map[string]string{ - "": "object_id", - api.ObjectSortByName: "object_id", - api.ObjectSortByHealth: "Health", - api.ObjectSortBySize: "Size", - } - - return clause.OrderByColumn{ - Column: clause.Column{Name: orderByColumns[sortBy]}, - Desc: strings.EqualFold(sortDir, api.ObjectSortDirDesc), - }, nil -} - -func buildPrefixExpr(prefix string) clause.Expr { - if prefix != "" { - return gorm.Expr("o.object_id LIKE ? AND SUBSTR(o.object_id, 1, ?) = ?", prefix+"%", utf8.RuneCountInString(prefix), prefix) - } else { - return exprTRUE - } -} - -func updateAllObjectsHealth(tx *gorm.DB) error { - return tx.Exec(` -UPDATE objects -SET health = ( - SELECT COALESCE(MIN(slabs.health), 1) - FROM slabs - INNER JOIN slices sli ON sli.db_slab_id = slabs.id - WHERE sli.db_object_id = objects.id) -`).Error -} - func validateSort(sortBy, sortDir string) error { allowed := func(s string, allowed ...string) bool { for _, a := range allowed { diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 6233b4941..7462f6187 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -18,8 +18,11 @@ import ( rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/config" + isql "go.sia.tech/renterd/internal/sql" "go.sia.tech/renterd/internal/test" "go.sia.tech/renterd/object" + sql "go.sia.tech/renterd/stores/sql" "gorm.io/gorm" "gorm.io/gorm/schema" "lukechampine.com/frand" @@ -41,6 +44,22 @@ func (s *SQLStore) RemoveObjectsBlocking(ctx context.Context, bucket, prefix str return s.waitForPruneLoop(ts) } +func (s *SQLStore) RenameObjectBlocking(ctx context.Context, bucket, keyOld, keyNew string, force bool) error { + ts := time.Now() + if err := s.RenameObject(ctx, bucket, keyOld, keyNew, force); err != nil { + return err + } + return s.waitForPruneLoop(ts) +} + +func (s *SQLStore) RenameObjectsBlocking(ctx context.Context, bucket, prefixOld, prefixNew string, force bool) error { + ts := time.Now() + if err := s.RenameObjects(ctx, bucket, prefixOld, prefixNew, force); err != nil { + return err + } + return s.waitForPruneLoop(ts) +} + func (s *SQLStore) UpdateObjectBlocking(ctx context.Context, bucket, path, contractSet, eTag, mimeType string, metadata api.ObjectUserMetadata, o object.Object) error { var ts time.Time _, err := s.Object(ctx, bucket, path) @@ -76,6 +95,17 @@ func randomMultisigUC() types.UnlockConditions { return uc } +func updateAllObjectsHealth(tx *gorm.DB) error { + return tx.Exec(` +UPDATE objects +SET health = ( + SELECT COALESCE(MIN(slabs.health), 1) + FROM slabs + INNER JOIN slices sli ON sli.db_slab_id = slabs.id + WHERE sli.db_object_id = objects.id) +`).Error +} + // TestObjectBasic tests the hydration of raw objects works when we fetch // objects from the metadata store. func TestObjectBasic(t *testing.T) { @@ -660,7 +690,7 @@ func TestRenewedContract(t *testing.T) { // Assert we can't fetch the renewed contract. _, err = ss.RenewedContract(context.Background(), fcid1) if !errors.Is(err, api.ErrContractNotFound) { - t.Fatal("unexpected") + t.Fatal("unexpected", err) } // Renew it. @@ -844,7 +874,7 @@ func TestAncestorsContracts(t *testing.T) { t.Fatal("wrong number of contracts returned", len(contracts)) } for i := 0; i < len(contracts)-1; i++ { - if !reflect.DeepEqual(contracts[i], api.ArchivedContract{ + expected := api.ArchivedContract{ ID: fcids[len(fcids)-2-i], HostKey: hk, RenewedTo: fcids[len(fcids)-1-i], @@ -853,7 +883,9 @@ func TestAncestorsContracts(t *testing.T) { State: api.ContractStatePending, WindowStart: 400, WindowEnd: 500, - }) { + } + if !reflect.DeepEqual(contracts[i], expected) { + t.Log(cmp.Diff(contracts[i], expected)) t.Fatal("wrong contract", i, contracts[i]) } } @@ -1047,11 +1079,12 @@ func TestSQLMetadataStore(t *testing.T) { one := uint(1) expectedObj := dbObject{ - DBBucketID: ss.DefaultBucketID(), - Health: 1, - ObjectID: objID, - Key: obj1Key, - Size: obj1.TotalSize(), + DBDirectoryID: 1, + DBBucketID: ss.DefaultBucketID(), + Health: 1, + ObjectID: objID, + Key: obj1Key, + Size: obj1.TotalSize(), Slabs: []dbSlice{ { DBObjectID: &one, @@ -1446,6 +1479,7 @@ func TestObjectEntries(t *testing.T) { // assertMetadata asserts both ModTime, MimeType and ETag and clears them so the // entries are ready for comparison assertMetadata := func(entries []api.ObjectMetadata) { + t.Helper() for i := range entries { // assert mod time if !strings.HasSuffix(entries[i].Name, "/") && entries[i].ModTime.IsZero() { @@ -1454,14 +1488,15 @@ func TestObjectEntries(t *testing.T) { entries[i].ModTime = api.TimeRFC3339{} // assert mime type - if entries[i].MimeType != testMimeType { + isDir := strings.HasSuffix(entries[i].Name, "/") + if (isDir && entries[i].MimeType != "") || (!isDir && entries[i].MimeType != testMimeType) { t.Fatal("unexpected mime type", entries[i].MimeType) } entries[i].MimeType = "" // assert etag - if entries[i].ETag == "" { - t.Fatal("etag should be set") + if isDir != (entries[i].ETag == "") { + t.Fatal("etag should be set for files and empty for dirs") } entries[i].ETag = "" } @@ -1907,7 +1942,7 @@ func TestUnhealthySlabsNoContracts(t *testing.T) { // delete the sector - we manually invalidate the slabs for the contract // before deletion. - err = invalidateSlabHealthByFCID(ss.db, []fileContractID{fileContractID(fcid1)}) + err = ss.invalidateSlabHealthByFCID(context.Background(), []types.FileContractID{(fcid1)}) if err != nil { t.Fatal(err) } @@ -2467,6 +2502,43 @@ func TestRenameObjects(t *testing.T) { t.Fatal("unexpected path", obj.Name) } } + + // Assert directories are correct + expectedDirs := []struct { + id uint + parentID uint + name string + }{ + { + id: 1, + parentID: 0, + name: "/", + }, + { + id: 2, + parentID: 1, + name: "/fileÅ›/", + }, + } + var directories []dbDirectory + test.Retry(100, 100*time.Millisecond, func() error { + if err := ss.db.Find(&directories).Error; err != nil { + return err + } else if len(directories) != len(expectedDirs) { + return fmt.Errorf("unexpected number of directories, %v != %v", len(directories), len(expectedDirs)) + } + return nil + }) + + for i, dir := range directories { + if dir.ID != expectedDirs[i].id { + t.Fatalf("unexpected directory id, %v != %v", dir.ID, expectedDirs[i].id) + } else if dir.DBParentID != expectedDirs[i].parentID { + t.Fatalf("unexpected directory parent id, %v != %v", dir.DBParentID, expectedDirs[i].parentID) + } else if dir.Name != expectedDirs[i].name { + t.Fatalf("unexpected directory name, %v != %v", dir.Name, expectedDirs[i].name) + } + } } // TestObjectsStats is a unit test for ObjectsStats. @@ -2663,7 +2735,13 @@ func TestPartialSlab(t *testing.T) { t.Fatal("wrong data") } - var buffer dbBufferedSlab + type bufferedSlab struct { + ID uint + DBSlab dbSlab `gorm:"foreignKey:DBBufferedSlabID"` + Filename string + } + + var buffer bufferedSlab sk, _ := slabs[0].Key.MarshalBinary() if err := ss.db.Joins("DBSlab").Take(&buffer, "DBSlab.key = ?", secretKey(sk)).Error; err != nil { t.Fatal(err) @@ -2685,8 +2763,8 @@ func TestPartialSlab(t *testing.T) { Key: object.GenerateEncryptionKey(), MinShards: 1, Shards: []object.Sector{ - newTestShard(hk1, fcid1, types.Hash256{1}), - newTestShard(hk2, fcid2, types.Hash256{2}), + newTestShard(hk1, fcid1, frand.Entropy256()), + newTestShard(hk2, fcid2, frand.Entropy256()), }, }, Offset: 0, @@ -2725,7 +2803,7 @@ func TestPartialSlab(t *testing.T) { } else if !bytes.Equal(data, slab2Data) { t.Fatal("wrong data") } - buffer = dbBufferedSlab{} + buffer = bufferedSlab{} sk, _ = slabs[0].Key.MarshalBinary() if err := ss.db.Joins("DBSlab").Take(&buffer, "DBSlab.key = ?", secretKey(sk)).Error; err != nil { t.Fatal(err) @@ -2766,13 +2844,13 @@ func TestPartialSlab(t *testing.T) { } else if !bytes.Equal(slab3Data, append(data1, data2...)) { t.Fatal("wrong data") } - buffer = dbBufferedSlab{} + buffer = bufferedSlab{} sk, _ = slabs[0].Key.MarshalBinary() if err := ss.db.Joins("DBSlab").Take(&buffer, "DBSlab.key = ?", secretKey(sk)).Error; err != nil { t.Fatal(err) } assertBuffer(buffer1Name, rhpv2.SectorSize, true, false) - buffer = dbBufferedSlab{} + buffer = bufferedSlab{} sk, _ = slabs[1].Key.MarshalBinary() if err := ss.db.Joins("DBSlab").Take(&buffer, "DBSlab.key = ?", secretKey(sk)).Error; err != nil { t.Fatal(err) @@ -2801,11 +2879,11 @@ func TestPartialSlab(t *testing.T) { assertBuffer(buffer1Name, rhpv2.SectorSize, true, true) assertBuffer(buffer2Name, 1, false, false) - var foo []dbBufferedSlab + var foo []bufferedSlab if err := ss.db.Find(&foo).Error; err != nil { t.Fatal(err) } - buffer = dbBufferedSlab{} + buffer = bufferedSlab{} if err := ss.db.Take(&buffer, "id = ?", packedSlabs[0].BufferID).Error; err != nil { t.Fatal(err) } @@ -2824,7 +2902,7 @@ func TestPartialSlab(t *testing.T) { t.Fatal(err) } - buffer = dbBufferedSlab{} + buffer = bufferedSlab{} if err := ss.db.Take(&buffer, "id = ?", packedSlabs[0].BufferID).Error; !errors.Is(err, gorm.ErrRecordNotFound) { t.Fatal("shouldn't be able to find buffer", err) } @@ -3242,7 +3320,7 @@ func TestBucketObjects(t *testing.T) { } // Rename object foo/bar in bucket 1 to foo/baz but not in bucket 2. - if err := ss.RenameObject(context.Background(), b1, "/foo/bar", "/foo/baz", false); err != nil { + if err := ss.RenameObjectBlocking(context.Background(), b1, "/foo/bar", "/foo/baz", false); err != nil { t.Fatal(err) } else if entries, _, err := ss.ObjectEntries(context.Background(), b1, "/foo/", "", "", "", "", 0, -1); err != nil { t.Fatal(err) @@ -3259,7 +3337,7 @@ func TestBucketObjects(t *testing.T) { } // Rename foo/bar in bucket 2 using the batch rename. - if err := ss.RenameObjects(context.Background(), b2, "/foo/bar", "/foo/bam", false); err != nil { + if err := ss.RenameObjectsBlocking(context.Background(), b2, "/foo/bar", "/foo/bam", false); err != nil { t.Fatal(err) } else if entries, _, err := ss.ObjectEntries(context.Background(), b1, "/foo/", "", "", "", "", 0, -1); err != nil { t.Fatal(err) @@ -3676,6 +3754,29 @@ func TestDeleteHostSector(t *testing.T) { } else if hi.Interactions.LostSectors != 0 { t.Fatalf("expected 0 lost sector, got %v", hi.Interactions.LostSectors) } + + // Prune the sector from hk2. + if n, err := ss.DeleteHostSector(context.Background(), hk2, root); err != nil { + t.Fatal(err) + } else if n != 2 { + t.Fatal("no sectors were pruned", n) + } + + hi, err = ss.Host(context.Background(), hk2) + if err != nil { + t.Fatal(err) + } else if hi.Interactions.LostSectors != 2 { + t.Fatalf("expected 0 lost sector, got %v", hi.Interactions.LostSectors) + } + + // Fetch the sector and check the public key has the default value + if err := ss.db.Model(&dbSector{}).Find(§ors).Error; err != nil { + t.Fatal(err) + } else if len(sectors) != 1 { + t.Fatal("expected 1 sector", len(sectors)) + } else if sector := sectors[0]; sector.LatestHost != [32]byte{} { + t.Fatal("expected latest host to be empty", sector.LatestHost) + } } func newTestShards(hk types.PublicKey, fcid types.FileContractID, root types.Hash256) []object.Sector { return []object.Sector{ @@ -3739,7 +3840,7 @@ func TestUpdateSlabSanityChecks(t *testing.T) { if err := ss.UpdateSlab(context.Background(), object.Slab{ Key: slab.Key, Shards: shards[:len(shards)-1], - }, testContractSet); !errors.Is(err, errInvalidNumberOfShards) { + }, testContractSet); !errors.Is(err, isql.ErrInvalidNumberOfShards) { t.Fatal(err) } @@ -3753,7 +3854,7 @@ func TestUpdateSlabSanityChecks(t *testing.T) { Key: slab.Key, Shards: reversedShards, } - if err := ss.UpdateSlab(context.Background(), reversedSlab, testContractSet); !errors.Is(err, errShardRootChanged) { + if err := ss.UpdateSlab(context.Background(), reversedSlab, testContractSet); !errors.Is(err, isql.ErrShardRootChanged) { t.Fatal(err) } } @@ -3935,7 +4036,7 @@ func TestRefreshHealth(t *testing.T) { } // add test hosts - hks, err := ss.addTestHosts(2) + hks, err := ss.addTestHosts(8) if err != nil { t.Fatal(err) } @@ -3955,10 +4056,13 @@ func TestRefreshHealth(t *testing.T) { if added, err := ss.addTestObject(o1, object.Object{ Key: object.GenerateEncryptionKey(), Slabs: []object.SlabSlice{{Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), + MinShards: 2, + Key: object.GenerateEncryptionKey(), Shards: []object.Sector{ newTestShard(hks[0], fcids[0], types.Hash256{0}), newTestShard(hks[1], fcids[1], types.Hash256{1}), + newTestShard(hks[2], fcids[2], types.Hash256{2}), + newTestShard(hks[3], fcids[3], types.Hash256{3}), }, }}}, }); err != nil { @@ -3971,10 +4075,13 @@ func TestRefreshHealth(t *testing.T) { if added, err := ss.addTestObject(o2, object.Object{ Key: object.GenerateEncryptionKey(), Slabs: []object.SlabSlice{{Slab: object.Slab{ - Key: object.GenerateEncryptionKey(), + MinShards: 2, + Key: object.GenerateEncryptionKey(), Shards: []object.Sector{ - newTestShard(hks[0], fcids[0], types.Hash256{2}), - newTestShard(hks[1], fcids[1], types.Hash256{3}), + newTestShard(hks[4], fcids[4], types.Hash256{4}), + newTestShard(hks[5], fcids[5], types.Hash256{5}), + newTestShard(hks[6], fcids[6], types.Hash256{6}), + newTestShard(hks[7], fcids[7], types.Hash256{7}), }, }}}, }); err != nil { @@ -3983,8 +4090,8 @@ func TestRefreshHealth(t *testing.T) { t.Fatal("expected health to be 1, got", added.Health) } - // update contract set and refresh health, assert health is .5 - err = ss.SetContractSet(context.Background(), testContractSet, fcids[:1]) + // update contract set to not contain the first contract + err = ss.SetContractSet(context.Background(), testContractSet, fcids[1:]) if err != nil { t.Fatal(err) } @@ -3994,42 +4101,24 @@ func TestRefreshHealth(t *testing.T) { } if health(o1) != .5 { t.Fatal("expected health to be .5, got", health(o1)) - } else if health(o2) != .5 { - t.Fatal("expected health to be .5, got", health(o2)) - } - - // set the health of s1 to be lower than .5 - err = ss.overrideSlabHealth(o1, 0.4) - if err != nil { - t.Fatal(err) - } - - // refresh health and assert only object 1's health got updated - err = ss.RefreshHealth(context.Background()) - if err != nil { - t.Fatal(err) - } - if health(o1) != .4 { - t.Fatal("expected health to be .4, got", health(o1)) - } else if health(o2) != .5 { - t.Fatal("expected health to be .5, got", health(o2)) + } else if health(o2) != 1 { + t.Fatal("expected health to be 1, got", health(o2)) } - // set the health of s2 to be higher than .5 - err = ss.overrideSlabHealth(o2, 0.6) + // update contract set again to increase health of o1 again and lower health + // of o2 + err = ss.SetContractSet(context.Background(), testContractSet, fcids[:6]) if err != nil { t.Fatal(err) } - - // refresh health and assert only object 2's health got updated err = ss.RefreshHealth(context.Background()) if err != nil { t.Fatal(err) } - if health(o1) != .4 { + if health(o1) != 1 { t.Fatal("expected health to be .4, got", health(o1)) - } else if health(o2) != .6 { - t.Fatal("expected health to be .6, got", health(o2)) + } else if health(o2) != 0 { + t.Fatal("expected health to be 0, got", health(o2)) } // add another object that is empty @@ -4042,22 +4131,11 @@ func TestRefreshHealth(t *testing.T) { t.Fatal("expected health to be 1, got", added.Health) } - // update its health to .1 - if err := ss.db. - Model(&dbObject{}). - Where("object_id", o3). - Update("health", 0.1). - Error; err != nil { - t.Fatal(err) - } else if health(o3) != .1 { - t.Fatalf("expected health to be .1, got %v", health(o3)) - } - - // a refresh should not update its health + // a refresh should keep the health at 1 if err := ss.RefreshHealth(context.Background()); err != nil { t.Fatal(err) - } else if health(o3) != .1 { - t.Fatalf("expected health to be .1, got %v", health(o3)) + } else if health(o3) != 1 { + t.Fatalf("expected health to be 1, got %v", health(o3)) } } @@ -4072,26 +4150,36 @@ func TestSlabCleanup(t *testing.T) { } // create buffered slab - bs := dbBufferedSlab{ - Filename: "foo", + bsID := uint(1) + if err := ss.db.Exec("INSERT INTO buffered_slabs (filename) VALUES ('foo');").Error; err != nil { + t.Fatal(err) } - if err := ss.db.Create(&bs).Error; err != nil { + + var dirID int64 + err := ss.bMain.Transaction(context.Background(), func(tx sql.DatabaseTx) error { + var err error + dirID, err = tx.MakeDirsForPath(context.Background(), "1") + return err + }) + if err != nil { t.Fatal(err) } // create objects obj1 := dbObject{ - ObjectID: "1", - DBBucketID: ss.DefaultBucketID(), - Health: 1, + DBDirectoryID: uint(dirID), + ObjectID: "1", + DBBucketID: ss.DefaultBucketID(), + Health: 1, } if err := ss.db.Create(&obj1).Error; err != nil { t.Fatal(err) } obj2 := dbObject{ - ObjectID: "2", - DBBucketID: ss.DefaultBucketID(), - Health: 1, + DBDirectoryID: uint(dirID), + ObjectID: "2", + DBBucketID: ss.DefaultBucketID(), + Health: 1, } if err := ss.db.Create(&obj2).Error; err != nil { t.Fatal(err) @@ -4126,7 +4214,7 @@ func TestSlabCleanup(t *testing.T) { } // delete the object - err := ss.RemoveObjectBlocking(context.Background(), api.DefaultBucketName, obj1.ObjectID) + err = ss.RemoveObjectBlocking(context.Background(), api.DefaultBucketName, obj1.ObjectID) if err != nil { t.Fatal(err) } @@ -4152,7 +4240,7 @@ func TestSlabCleanup(t *testing.T) { // create another object that references a slab with buffer ek, _ = object.GenerateEncryptionKey().MarshalBinary() bufferedSlab := dbSlab{ - DBBufferedSlabID: bs.ID, + DBBufferedSlabID: bsID, DBContractSet: cs, Health: 1, Key: ek, @@ -4162,9 +4250,10 @@ func TestSlabCleanup(t *testing.T) { t.Fatal(err) } obj3 := dbObject{ - ObjectID: "3", - DBBucketID: ss.DefaultBucketID(), - Health: 1, + DBDirectoryID: uint(dirID), + ObjectID: "3", + DBBucketID: ss.DefaultBucketID(), + Health: 1, } if err := ss.db.Create(&obj3).Error; err != nil { t.Fatal(err) @@ -4593,8 +4682,7 @@ func TestTypeCurrency(t *testing.T) { // same transaction, deadlocks become more likely due to the gap locks MySQL // uses. func TestUpdateObjectParallel(t *testing.T) { - dbURI, _, _, _ := DBConfigFromEnv() - if dbURI == "" { + if config.MySQLConfigFromEnv().URI == "" { // it's pretty much impossile to optimise for both sqlite and mysql at // the same time so we skip this test for SQLite for now // TODO: once we moved away from gorm and implement separate interfaces @@ -4762,3 +4850,91 @@ func TestFetchUsedContracts(t *testing.T) { t.Fatal("contracts should point to the renewed contract") } } + +func TestDirectories(t *testing.T) { + ss := newTestSQLStore(t, defaultTestSQLStoreConfig) + defer ss.Close() + + objects := []string{ + "/foo", + "/bar/baz", + "///somefile", + "/dir/fakedir/", + "/", + "/bar/fileinsamedirasbefore", + } + + for _, o := range objects { + var dirID int64 + err := ss.bMain.Transaction(context.Background(), func(tx sql.DatabaseTx) error { + var err error + dirID, err = tx.MakeDirsForPath(context.Background(), o) + return err + }) + if err != nil { + t.Fatal(err) + } else if dirID == 0 { + t.Fatalf("unexpected dir id %v", dirID) + } + } + + expectedDirs := []struct { + name string + id uint + parentID uint + }{ + { + name: "/", + id: 1, + parentID: 0, + }, + { + name: "/bar/", + id: 2, + parentID: 1, + }, + { + name: "//", + id: 3, + parentID: 1, + }, + { + name: "///", + id: 4, + parentID: 3, + }, + { + name: "/dir/", + id: 2, + parentID: 1, + }, + } + + var dbDirs []dbDirectory + if err := ss.db.Find(&dbDirs).Error; err != nil { + t.Fatal(err) + } else if len(dbDirs) != len(expectedDirs) { + t.Fatalf("expected %v dirs, got %v", len(expectedDirs), len(dbDirs)) + } + + for i, dbDir := range dbDirs { + if dbDir.ID != uint(i+1) { + t.Fatalf("unexpected id %v", dbDir.ID) + } else if dbDir.Name != expectedDirs[i].name { + t.Fatalf("unexpected name '%v' != '%v'", dbDir.Name, expectedDirs[i].name) + } + } + + now := time.Now() + ss.Retry(100, 100*time.Millisecond, func() error { + ss.triggerSlabPruning() + return ss.waitForPruneLoop(now) + }) + + var n int64 + if err := ss.db.Model(&dbDirectory{}).Count(&n).Error; err != nil { + t.Fatal(err) + } else if n != 1 { + t.Fatal("expected 1 dir, got", n) + } +} diff --git a/stores/metrics.go b/stores/metrics.go index 8351ef43e..45003dfb9 100644 --- a/stores/metrics.go +++ b/stores/metrics.go @@ -2,673 +2,98 @@ package stores import ( "context" - "errors" - "fmt" - "math" - "math/bits" "time" - "go.sia.tech/core/types" "go.sia.tech/renterd/api" - "gorm.io/gorm" - "gorm.io/gorm/clause" + sql "go.sia.tech/renterd/stores/sql" ) -const ( - contractMetricGranularity = 5 * time.Minute -) - -type ( - // dbContractMetric tracks information about a contract's funds. It is - // supposed to be reported by a worker every time a contract is revised. - dbContractMetric struct { - Model - - Timestamp unixTimeMS `gorm:"index;NOT NULL"` - - FCID fileContractID `gorm:"index;size:32;NOT NULL;column:fcid"` - Host publicKey `gorm:"index;size:32;NOT NULL"` - - RemainingCollateralLo unsigned64 `gorm:"index:idx_remaining_collateral;NOT NULL"` - RemainingCollateralHi unsigned64 `gorm:"index:idx_remaining_collateral;NOT NULL"` - RemainingFundsLo unsigned64 `gorm:"index:idx_remaining_funds;NOT NULL"` - RemainingFundsHi unsigned64 `gorm:"index:idx_remaining_funds;NOT NULL"` - RevisionNumber unsigned64 `gorm:"index;NOT NULL"` - - UploadSpendingLo unsigned64 `gorm:"index:idx_upload_spending;NOT NULL"` - UploadSpendingHi unsigned64 `gorm:"index:idx_upload_spending;NOT NULL"` - DownloadSpendingLo unsigned64 `gorm:"index:idx_download_spending;NOT NULL"` - DownloadSpendingHi unsigned64 `gorm:"index:idx_download_spending;NOT NULL"` - FundAccountSpendingLo unsigned64 `gorm:"index:idx_fund_account_spending;NOT NULL"` - FundAccountSpendingHi unsigned64 `gorm:"index:idx_fund_account_spending;NOT NULL"` - DeleteSpendingLo unsigned64 `gorm:"index:idx_delete_spending;NOT NULL"` - DeleteSpendingHi unsigned64 `gorm:"index:idx_delete_spending;NOT NULL"` - ListSpendingLo unsigned64 `gorm:"index:idx_list_spending;NOT NULL"` - ListSpendingHi unsigned64 `gorm:"index:idx_list_spending;NOT NULL"` - } - - // dbContractPruneMetric tracks information about contract pruning. Such as - // the number of bytes pruned, how much data there is left to prune and how - // long it took, along with potential errors that occurred while trying to - // prune the contract. - dbContractPruneMetric struct { - Model - - Timestamp unixTimeMS `gorm:"index;NOT NULL"` - - FCID fileContractID `gorm:"index;size:32;NOT NULL;column:fcid"` - Host publicKey `gorm:"index;size:32;NOT NULL"` - HostVersion string `gorm:"index"` - - Pruned unsigned64 `gorm:"index;NOT NULL"` - Remaining unsigned64 `gorm:"index;NOT NULL"` - Duration time.Duration `gorm:"index;NOT NULL"` - } - - // dbContractSetMetric tracks information about a specific contract set. - // Such as the number of contracts it contains. Intended to be reported by - // the bus every time the set is updated. - dbContractSetMetric struct { - Model - Timestamp unixTimeMS `gorm:"index;NOT NULL"` - - Name string `gorm:"index;NOT NULL"` - Contracts int `gorm:"index;NOT NULL"` - } - - // dbContractSetChurnMetric contains information about contracts being added - // to / removed from a contract set. Expected to be reported by the entity - // updating the set. e.g. the autopilot. - dbContractSetChurnMetric struct { - Model - Timestamp unixTimeMS `gorm:"index;NOT NULL"` - - Name string `gorm:"index;NOT NULL"` - FCID fileContractID `gorm:"index;size:32;NOT NULL"` - Direction string `gorm:"index;NOT NULL"` // "added" or "removed" - Reason string `gorm:"index;NOT NULL"` - } - - // dbPerformanceMetric is a generic metric used to track the performance of - // an action. Such an action could be a ReadSector operation. Expected to be - // reported by workers. - dbPerformanceMetric struct { - Model - Timestamp unixTimeMS `gorm:"index;NOT NULL"` - - Action string `gorm:"index;NOT NULL"` - Host publicKey `gorm:"index;size:32;NOT NULL"` - Origin string `gorm:"index;NOT NULL"` - Duration time.Duration `gorm:"index;NOT NULL"` - } - - // dbWalletMetric tracks information about a specific wallet. - dbWalletMetric struct { - Model - Timestamp unixTimeMS `gorm:"index;NOT NULL"` - - ConfirmedLo unsigned64 `gorm:"index:idx_confirmed;NOT NULL"` - ConfirmedHi unsigned64 `gorm:"index:idx_confirmed;NOT NULL"` - SpendableLo unsigned64 `gorm:"index:idx_spendable;NOT NULL"` - SpendableHi unsigned64 `gorm:"index:idx_spendable;NOT NULL"` - UnconfirmedLo unsigned64 `gorm:"index:idx_unconfirmed;NOT NULL"` - UnconfirmedHi unsigned64 `gorm:"index:idx_unconfirmed;NOT NULL"` - } -) - -func (dbContractMetric) TableName() string { return "contracts" } -func (dbContractPruneMetric) TableName() string { return "contract_prunes" } -func (dbContractSetMetric) TableName() string { return "contract_sets" } -func (dbContractSetChurnMetric) TableName() string { return "contract_sets_churn" } -func (dbPerformanceMetric) TableName() string { return "performance" } -func (dbWalletMetric) TableName() string { return "wallets" } - -func (s *SQLStore) ContractMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.ContractMetricsQueryOpts) ([]api.ContractMetric, error) { - metrics, err := s.contractMetrics(ctx, start, n, interval, opts) - if err != nil { - return nil, err - } - resp := make([]api.ContractMetric, len(metrics)) - toCurr := func(lo, hi unsigned64) types.Currency { - return types.NewCurrency(uint64(lo), uint64(hi)) - } - for i := range resp { - resp[i] = api.ContractMetric{ - Timestamp: api.TimeRFC3339(time.Time(metrics[i].Timestamp).UTC()), - ContractID: types.FileContractID(metrics[i].FCID), - HostKey: types.PublicKey(metrics[i].Host), - RemainingCollateral: toCurr(metrics[i].RemainingCollateralLo, metrics[i].RemainingCollateralHi), - RemainingFunds: toCurr(metrics[i].RemainingFundsLo, metrics[i].RemainingFundsHi), - RevisionNumber: uint64(metrics[i].RevisionNumber), - UploadSpending: toCurr(metrics[i].UploadSpendingLo, metrics[i].UploadSpendingHi), - DownloadSpending: toCurr(metrics[i].DownloadSpendingLo, metrics[i].DownloadSpendingHi), - FundAccountSpending: toCurr(metrics[i].FundAccountSpendingLo, metrics[i].FundAccountSpendingHi), - DeleteSpending: toCurr(metrics[i].DeleteSpendingLo, metrics[i].DeleteSpendingHi), - ListSpending: toCurr(metrics[i].ListSpendingLo, metrics[i].ListSpendingHi), - } - } - return resp, nil +func (s *SQLStore) ContractMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.ContractMetricsQueryOpts) (metrics []api.ContractMetric, err error) { + err = s.bMetrics.Transaction(ctx, func(tx sql.MetricsDatabaseTx) (txErr error) { + metrics, txErr = tx.ContractMetrics(ctx, start, n, interval, opts) + return + }) + return } -func (s *SQLStore) ContractPruneMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.ContractPruneMetricsQueryOpts) ([]api.ContractPruneMetric, error) { - metrics, err := s.contractPruneMetrics(ctx, start, n, interval, opts) - if err != nil { - return nil, err - } - - resp := make([]api.ContractPruneMetric, len(metrics)) - for i := range resp { - resp[i] = api.ContractPruneMetric{ - Timestamp: api.TimeRFC3339(metrics[i].Timestamp), - - ContractID: types.FileContractID(metrics[i].FCID), - HostKey: types.PublicKey(metrics[i].Host), - HostVersion: metrics[i].HostVersion, - - Pruned: uint64(metrics[i].Pruned), - Remaining: uint64(metrics[i].Remaining), - Duration: metrics[i].Duration, - } - } - return resp, nil +func (s *SQLStore) ContractPruneMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.ContractPruneMetricsQueryOpts) (metrics []api.ContractPruneMetric, err error) { + err = s.bMetrics.Transaction(ctx, func(tx sql.MetricsDatabaseTx) (txErr error) { + metrics, txErr = tx.ContractPruneMetrics(ctx, start, n, interval, opts) + return + }) + return } -func (s *SQLStore) ContractSetChurnMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.ContractSetChurnMetricsQueryOpts) ([]api.ContractSetChurnMetric, error) { - metrics, err := s.contractSetChurnMetrics(ctx, start, n, interval, opts) - if err != nil { - return nil, err - } - resp := make([]api.ContractSetChurnMetric, len(metrics)) - for i := range resp { - resp[i] = api.ContractSetChurnMetric{ - Direction: metrics[i].Direction, - ContractID: types.FileContractID(metrics[i].FCID), - Name: metrics[i].Name, - Reason: metrics[i].Reason, - Timestamp: api.TimeRFC3339(time.Time(metrics[i].Timestamp).UTC()), - } - } - return resp, nil +func (s *SQLStore) ContractSetChurnMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.ContractSetChurnMetricsQueryOpts) (metrics []api.ContractSetChurnMetric, err error) { + err = s.bMetrics.Transaction(ctx, func(tx sql.MetricsDatabaseTx) (txErr error) { + metrics, txErr = tx.ContractSetChurnMetrics(ctx, start, n, interval, opts) + return + }) + return } -func (s *SQLStore) ContractSetMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.ContractSetMetricsQueryOpts) ([]api.ContractSetMetric, error) { - metrics, err := s.contractSetMetrics(ctx, start, n, interval, opts) - if err != nil { - return nil, err - } - resp := make([]api.ContractSetMetric, len(metrics)) - for i := range resp { - resp[i] = api.ContractSetMetric{ - Contracts: metrics[i].Contracts, - Name: metrics[i].Name, - Timestamp: api.TimeRFC3339(time.Time(metrics[i].Timestamp).UTC()), - } - } - return resp, nil +func (s *SQLStore) ContractSetMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.ContractSetMetricsQueryOpts) (metrics []api.ContractSetMetric, err error) { + err = s.bMetrics.Transaction(ctx, func(tx sql.MetricsDatabaseTx) (txErr error) { + metrics, txErr = tx.ContractSetMetrics(ctx, start, n, interval, opts) + return + }) + return } -func (s *SQLStore) PerformanceMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.PerformanceMetricsQueryOpts) ([]api.PerformanceMetric, error) { - metrics, err := s.performanceMetrics(ctx, start, n, interval, opts) - if err != nil { - return nil, err - } - resp := make([]api.PerformanceMetric, len(metrics)) - for i := range resp { - resp[i] = api.PerformanceMetric{ - Action: metrics[i].Action, - HostKey: types.PublicKey(metrics[i].Host), - Origin: metrics[i].Origin, - Duration: metrics[i].Duration, - Timestamp: api.TimeRFC3339(time.Time(metrics[i].Timestamp).UTC()), - } - } - return resp, nil +func (s *SQLStore) PerformanceMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.PerformanceMetricsQueryOpts) (metrics []api.PerformanceMetric, err error) { + err = s.bMetrics.Transaction(ctx, func(tx sql.MetricsDatabaseTx) (txErr error) { + metrics, txErr = tx.PerformanceMetrics(ctx, start, n, interval, opts) + return + }) + return } func (s *SQLStore) RecordContractMetric(ctx context.Context, metrics ...api.ContractMetric) error { - dbMetrics := make([]dbContractMetric, len(metrics)) - for i, metric := range metrics { - dbMetrics[i] = dbContractMetric{ - Timestamp: unixTimeMS(metric.Timestamp), - FCID: fileContractID(metric.ContractID), - Host: publicKey(metric.HostKey), - RemainingCollateralLo: unsigned64(metric.RemainingCollateral.Lo), - RemainingCollateralHi: unsigned64(metric.RemainingCollateral.Hi), - RemainingFundsLo: unsigned64(metric.RemainingFunds.Lo), - RemainingFundsHi: unsigned64(metric.RemainingFunds.Hi), - RevisionNumber: unsigned64(metric.RevisionNumber), - UploadSpendingLo: unsigned64(metric.UploadSpending.Lo), - UploadSpendingHi: unsigned64(metric.UploadSpending.Hi), - DownloadSpendingLo: unsigned64(metric.DownloadSpending.Lo), - DownloadSpendingHi: unsigned64(metric.DownloadSpending.Hi), - FundAccountSpendingLo: unsigned64(metric.FundAccountSpending.Lo), - FundAccountSpendingHi: unsigned64(metric.FundAccountSpending.Hi), - DeleteSpendingLo: unsigned64(metric.DeleteSpending.Lo), - DeleteSpendingHi: unsigned64(metric.DeleteSpending.Hi), - ListSpendingLo: unsigned64(metric.ListSpending.Lo), - ListSpendingHi: unsigned64(metric.ListSpending.Hi), - } - } - return s.dbMetrics.Transaction(func(tx *gorm.DB) error { - // delete any existing metric for the same contract that has happened - // within the same 5' window by diving the timestamp by 5' and use integer division. - for _, metric := range metrics { - intervalStart := metric.Timestamp.Std().Truncate(contractMetricGranularity) - intervalEnd := intervalStart.Add(contractMetricGranularity) - err := tx. - Where("timestamp >= ?", unixTimeMS(intervalStart)). - Where("timestamp < ?", unixTimeMS(intervalEnd)). - Where("fcid", fileContractID(metric.ContractID)). - Delete(&dbContractMetric{}). - Error - if err != nil { - return err - } - } - return tx.Create(&dbMetrics).Error + return s.bMetrics.Transaction(ctx, func(tx sql.MetricsDatabaseTx) error { + return tx.RecordContractMetric(ctx, metrics...) }) } func (s *SQLStore) RecordContractPruneMetric(ctx context.Context, metrics ...api.ContractPruneMetric) error { - dbMetrics := make([]dbContractPruneMetric, len(metrics)) - for i, metric := range metrics { - dbMetrics[i] = dbContractPruneMetric{ - Timestamp: unixTimeMS(metric.Timestamp), - - FCID: fileContractID(metric.ContractID), - Host: publicKey(metric.HostKey), - HostVersion: metric.HostVersion, - - Pruned: unsigned64(metric.Pruned), - Remaining: unsigned64(metric.Remaining), - Duration: metric.Duration, - } - } - return s.dbMetrics.Transaction(func(tx *gorm.DB) error { - return tx.Create(&dbMetrics).Error + return s.bMetrics.Transaction(ctx, func(tx sql.MetricsDatabaseTx) error { + return tx.RecordContractPruneMetric(ctx, metrics...) }) } func (s *SQLStore) RecordContractSetChurnMetric(ctx context.Context, metrics ...api.ContractSetChurnMetric) error { - dbMetrics := make([]dbContractSetChurnMetric, len(metrics)) - for i, metric := range metrics { - dbMetrics[i] = dbContractSetChurnMetric{ - Direction: string(metric.Direction), - FCID: fileContractID(metric.ContractID), - Name: metric.Name, - Reason: metric.Reason, - Timestamp: unixTimeMS(metric.Timestamp), - } - } - return s.dbMetrics.Transaction(func(tx *gorm.DB) error { - return tx.Create(&dbMetrics).Error + return s.bMetrics.Transaction(ctx, func(tx sql.MetricsDatabaseTx) error { + return tx.RecordContractSetChurnMetric(ctx, metrics...) }) } func (s *SQLStore) RecordContractSetMetric(ctx context.Context, metrics ...api.ContractSetMetric) error { - dbMetrics := make([]dbContractSetMetric, len(metrics)) - for i, metric := range metrics { - dbMetrics[i] = dbContractSetMetric{ - Contracts: metric.Contracts, - Name: metric.Name, - Timestamp: unixTimeMS(metric.Timestamp), - } - } - return s.dbMetrics.Transaction(func(tx *gorm.DB) error { - return tx.Create(&dbMetrics).Error - }) -} - -func (s *SQLStore) RecordWalletMetric(ctx context.Context, metrics ...api.WalletMetric) error { - dbMetrics := make([]dbWalletMetric, len(metrics)) - for i, metric := range metrics { - dbMetrics[i] = dbWalletMetric{ - Timestamp: unixTimeMS(metric.Timestamp), - ConfirmedLo: unsigned64(metric.Confirmed.Lo), - ConfirmedHi: unsigned64(metric.Confirmed.Hi), - SpendableLo: unsigned64(metric.Spendable.Lo), - SpendableHi: unsigned64(metric.Spendable.Hi), - UnconfirmedLo: unsigned64(metric.Unconfirmed.Lo), - UnconfirmedHi: unsigned64(metric.Unconfirmed.Hi), - } - } - return s.dbMetrics.Transaction(func(tx *gorm.DB) error { - return tx.Create(&dbMetrics).Error + return s.bMetrics.Transaction(ctx, func(tx sql.MetricsDatabaseTx) error { + return tx.RecordContractSetMetric(ctx, metrics...) }) } func (s *SQLStore) RecordPerformanceMetric(ctx context.Context, metrics ...api.PerformanceMetric) error { - dbMetrics := make([]dbPerformanceMetric, len(metrics)) - for i, metric := range metrics { - dbMetrics[i] = dbPerformanceMetric{ - Action: metric.Action, - Duration: metric.Duration, - Host: publicKey(metric.HostKey), - Origin: metric.Origin, - Timestamp: unixTimeMS(metric.Timestamp), - } - } - return s.dbMetrics.Transaction(func(tx *gorm.DB) error { - return tx.Create(&dbMetrics).Error + return s.bMetrics.Transaction(ctx, func(tx sql.MetricsDatabaseTx) error { + return tx.RecordPerformanceMetric(ctx, metrics...) }) } -func (s *SQLStore) WalletMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.WalletMetricsQueryOpts) ([]api.WalletMetric, error) { - metrics, err := s.walletMetrics(ctx, start, n, interval, opts) - if err != nil { - return nil, err - } - resp := make([]api.WalletMetric, len(metrics)) - toCurr := func(lo, hi unsigned64) types.Currency { - return types.NewCurrency(uint64(lo), uint64(hi)) - } - for i := range resp { - resp[i] = api.WalletMetric{ - Timestamp: api.TimeRFC3339(time.Time(metrics[i].Timestamp).UTC()), - Confirmed: toCurr(metrics[i].ConfirmedLo, metrics[i].ConfirmedHi), - Spendable: toCurr(metrics[i].SpendableLo, metrics[i].SpendableHi), - Unconfirmed: toCurr(metrics[i].UnconfirmedLo, metrics[i].UnconfirmedHi), - } - } - return resp, nil +func (s *SQLStore) RecordWalletMetric(ctx context.Context, metrics ...api.WalletMetric) error { + return s.bMetrics.Transaction(ctx, func(tx sql.MetricsDatabaseTx) error { + return tx.RecordWalletMetric(ctx, metrics...) + }) } -func (m dbContractMetric) Aggregate(o dbContractMetric) (out dbContractMetric) { - out = m - remainingCollateralLo, carry := bits.Add64(uint64(m.RemainingCollateralLo), uint64(o.RemainingCollateralLo), 0) - remainingCollateralHi, _ := bits.Add64(uint64(m.RemainingCollateralHi), uint64(o.RemainingCollateralHi), carry) - remainingFundsLo, carry := bits.Add64(uint64(m.RemainingFundsLo), uint64(o.RemainingFundsLo), 0) - remainingFundsHi, _ := bits.Add64(uint64(m.RemainingFundsHi), uint64(o.RemainingFundsHi), carry) - uploadSpendingLo, carry := bits.Add64(uint64(m.UploadSpendingLo), uint64(o.UploadSpendingLo), 0) - uploadSpendingHi, _ := bits.Add64(uint64(m.UploadSpendingHi), uint64(o.UploadSpendingHi), carry) - downloadSpendingLo, carry := bits.Add64(uint64(m.DownloadSpendingLo), uint64(o.DownloadSpendingLo), 0) - downloadSpendingHi, _ := bits.Add64(uint64(m.DownloadSpendingHi), uint64(o.DownloadSpendingHi), carry) - fundAccountSpendingLo, carry := bits.Add64(uint64(m.FundAccountSpendingLo), uint64(o.FundAccountSpendingLo), 0) - fundAccountSpendingHi, _ := bits.Add64(uint64(m.FundAccountSpendingHi), uint64(o.FundAccountSpendingHi), carry) - deleteSpendingLo, carry := bits.Add64(uint64(m.DeleteSpendingLo), uint64(o.DeleteSpendingLo), 0) - deleteSpendingHi, _ := bits.Add64(uint64(m.DeleteSpendingHi), uint64(o.DeleteSpendingHi), carry) - listSpendingLo, carry := bits.Add64(uint64(m.ListSpendingLo), uint64(o.ListSpendingLo), 0) - listSpendingHi, _ := bits.Add64(uint64(m.ListSpendingHi), uint64(o.ListSpendingHi), carry) - - out.RemainingCollateralLo = unsigned64(remainingCollateralLo) - out.RemainingCollateralHi = unsigned64(remainingCollateralHi) - out.RemainingFundsLo = unsigned64(remainingFundsLo) - out.RemainingFundsHi = unsigned64(remainingFundsHi) - out.UploadSpendingLo = unsigned64(uploadSpendingLo) - out.UploadSpendingHi = unsigned64(uploadSpendingHi) - out.DownloadSpendingLo = unsigned64(downloadSpendingLo) - out.DownloadSpendingHi = unsigned64(downloadSpendingHi) - out.FundAccountSpendingLo = unsigned64(fundAccountSpendingLo) - out.FundAccountSpendingHi = unsigned64(fundAccountSpendingHi) - out.DeleteSpendingLo = unsigned64(deleteSpendingLo) - out.DeleteSpendingHi = unsigned64(deleteSpendingHi) - out.ListSpendingLo = unsigned64(listSpendingLo) - out.ListSpendingHi = unsigned64(listSpendingHi) +func (s *SQLStore) WalletMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.WalletMetricsQueryOpts) (metrics []api.WalletMetric, err error) { + err = s.bMetrics.Transaction(ctx, func(tx sql.MetricsDatabaseTx) (txErr error) { + metrics, txErr = tx.WalletMetrics(ctx, start, n, interval, opts) + return + }) return } func (s *SQLStore) PruneMetrics(ctx context.Context, metric string, cutoff time.Time) error { - if metric == "" { - return errors.New("metric must be set") - } else if cutoff.IsZero() { - return errors.New("cutoff time must be set") - } - var model interface{} - switch metric { - case api.MetricContractPrune: - model = &dbContractPruneMetric{} - case api.MetricContractSet: - model = &dbContractSetMetric{} - case api.MetricContractSetChurn: - model = &dbContractSetChurnMetric{} - case api.MetricContract: - model = &dbContractMetric{} - case api.MetricPerformance: - model = &dbPerformanceMetric{} - case api.MetricWallet: - model = &dbWalletMetric{} - default: - return fmt.Errorf("unknown metric '%s'", metric) - } - return s.dbMetrics.Model(model). - Where("timestamp < ?", unixTimeMS(cutoff)). - Delete(model). - Error -} - -func (s *SQLStore) contractMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.ContractMetricsQueryOpts) ([]dbContractMetric, error) { - whereExpr := gorm.Expr("TRUE") - if opts.ContractID != (types.FileContractID{}) { - whereExpr = gorm.Expr("? AND fcid = ?", whereExpr, fileContractID(opts.ContractID)) - } - if opts.HostKey != (types.PublicKey{}) { - whereExpr = gorm.Expr("? AND host = ?", whereExpr, publicKey(opts.HostKey)) - } - - var metrics []dbContractMetric - var err error - if opts.ContractID == (types.FileContractID{}) && opts.HostKey == (types.PublicKey{}) { - // if neither contract nor host filters were set, we return the - // aggregate spending for each period - metrics, err = s.findAggregatedContractPeriods(ctx, start, n, interval) - } else { - // otherwise we return the first metric for each period like we usually - // do - err = s.findPeriods(ctx, dbContractMetric{}.TableName(), &metrics, start, n, interval, whereExpr) - } - if err != nil { - return nil, fmt.Errorf("failed to fetch contract metrics: %w", err) - } - for i, m := range metrics { - metrics[i].Timestamp = normaliseTimestamp(start, interval, m.Timestamp) - } - return metrics, nil -} - -func (s *SQLStore) contractPruneMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.ContractPruneMetricsQueryOpts) ([]dbContractPruneMetric, error) { - whereExpr := gorm.Expr("TRUE") - if opts.ContractID != (types.FileContractID{}) { - whereExpr = gorm.Expr("? AND fcid = ?", whereExpr, fileContractID(opts.ContractID)) - } - if opts.HostKey != (types.PublicKey{}) { - whereExpr = gorm.Expr("? AND host = ?", whereExpr, publicKey(opts.HostKey)) - } - if opts.HostVersion != "" { - whereExpr = gorm.Expr("? AND host_version = ?", whereExpr, opts.HostVersion) - } - - var metrics []dbContractPruneMetric - err := s.findPeriods(ctx, dbContractPruneMetric{}.TableName(), &metrics, start, n, interval, whereExpr) - if err != nil { - return nil, fmt.Errorf("failed to fetch contract metrics: %w", err) - } - - return metrics, nil -} - -func (s *SQLStore) contractSetChurnMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.ContractSetChurnMetricsQueryOpts) ([]dbContractSetChurnMetric, error) { - whereExpr := gorm.Expr("TRUE") - if opts.Name != "" { - whereExpr = gorm.Expr("? AND name = ?", whereExpr, opts.Name) - } - if opts.Direction != "" { - whereExpr = gorm.Expr("? AND direction = ?", whereExpr, opts.Direction) - } - if opts.Reason != "" { - whereExpr = gorm.Expr("? AND reason = ?", whereExpr, opts.Reason) - } - var metrics []dbContractSetChurnMetric - err := s.findPeriods(ctx, dbContractSetChurnMetric{}.TableName(), &metrics, start, n, interval, whereExpr) - if err != nil { - return nil, fmt.Errorf("failed to fetch contract set churn metrics: %w", err) - } - for i, m := range metrics { - metrics[i].Timestamp = normaliseTimestamp(start, interval, m.Timestamp) - } - return metrics, nil -} - -func (s *SQLStore) contractSetMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.ContractSetMetricsQueryOpts) ([]dbContractSetMetric, error) { - whereExpr := gorm.Expr("TRUE") - if opts.Name != "" { - whereExpr = gorm.Expr("name = ?", opts.Name) - } - - var metrics []dbContractSetMetric - err := s.findPeriods(ctx, dbContractSetMetric{}.TableName(), &metrics, start, n, interval, whereExpr) - if err != nil { - return nil, fmt.Errorf("failed to fetch contract set metrics: %w", err) - } - for i, m := range metrics { - metrics[i].Timestamp = normaliseTimestamp(start, interval, m.Timestamp) - } - return metrics, nil -} - -func normaliseTimestamp(start time.Time, interval time.Duration, t unixTimeMS) unixTimeMS { - startMS := start.UnixMilli() - toNormaliseMS := time.Time(t).UnixMilli() - intervalMS := interval.Milliseconds() - if startMS > toNormaliseMS { - return unixTimeMS(start) - } - normalizedMS := (toNormaliseMS-startMS)/intervalMS*intervalMS + start.UnixMilli() - return unixTimeMS(time.UnixMilli(normalizedMS)) -} - -func (s *SQLStore) findAggregatedContractPeriods(ctx context.Context, start time.Time, n uint64, interval time.Duration) ([]dbContractMetric, error) { - if n > api.MetricMaxIntervals { - return nil, api.ErrMaxIntervalsExceeded - } - end := start.Add(time.Duration(n) * interval) - - type metricWithPeriod struct { - Metric dbContractMetric `gorm:"embedded"` - Period int64 - } - var metricsWithPeriod []metricWithPeriod - - err := s.dbMetrics.WithContext(ctx).Transaction(func(tx *gorm.DB) error { - var fcids []fileContractID - if err := tx.Raw("SELECT DISTINCT fcid FROM contracts WHERE contracts.timestamp >= ? AND contracts.timestamp < ?", unixTimeMS(start), unixTimeMS(end)). - Scan(&fcids).Error; err != nil { - return fmt.Errorf("failed to fetch distinct contract ids: %w", err) - } - - var indexHint string - if !isSQLite(tx) { - indexHint = "USE INDEX (idx_contracts_fcid_timestamp)" - } - - for intervalStart := start; intervalStart.Before(end); intervalStart = intervalStart.Add(interval) { - intervalEnd := intervalStart.Add(interval) - for _, fcid := range fcids { - var metrics []dbContractMetric - err := tx.Raw(fmt.Sprintf("SELECT * FROM contracts %s WHERE contracts.timestamp >= ? AND contracts.timestamp < ? AND contracts.fcid = ? LIMIT 1", indexHint), unixTimeMS(intervalStart), unixTimeMS(intervalEnd), fileContractID(fcid)). - Scan(&metrics).Error - if err != nil { - return fmt.Errorf("failed to fetch contract metrics: %w", err) - } else if len(metrics) == 0 { - continue - } - metricsWithPeriod = append(metricsWithPeriod, metricWithPeriod{ - Metric: metrics[0], - Period: intervalStart.UnixMilli(), - }) - } - } - return nil + return s.bMetrics.Transaction(ctx, func(tx sql.MetricsDatabaseTx) error { + return tx.PruneMetrics(ctx, metric, cutoff) }) - if err != nil { - return nil, err - } - - currentPeriod := int64(math.MinInt64) - var metrics []dbContractMetric - for _, m := range metricsWithPeriod { - m.Metric.FCID = fileContractID{} - m.Metric.Host = publicKey{} - m.Metric.RevisionNumber = 0 - if m.Period != currentPeriod { - metrics = append(metrics, m.Metric) - currentPeriod = m.Period - } else { - metrics[len(metrics)-1] = metrics[len(metrics)-1].Aggregate(m.Metric) - } - } - return metrics, nil -} - -// findPeriods is the core of all methods retrieving metrics. By using integer -// division rounding combined with a GROUP BY operation, all rows of a table are -// split into intervals and the row with the lowest timestamp for each interval -// is returned. The result is then joined with the original table to retrieve -// only the metrics we want. -func (s *SQLStore) findPeriods(ctx context.Context, table string, dst interface{}, start time.Time, n uint64, interval time.Duration, whereExpr clause.Expr) error { - if n > api.MetricMaxIntervals { - return api.ErrMaxIntervalsExceeded - } - end := start.Add(time.Duration(n) * interval) - return s.dbMetrics.WithContext(ctx).Raw(fmt.Sprintf(` - WITH RECURSIVE periods AS ( - SELECT ? AS period_start - UNION ALL - SELECT period_start + ? - FROM periods - WHERE period_start < ? - ? - ) - SELECT %s.* FROM %s - INNER JOIN ( - SELECT - p.period_start as Period, - MIN(obj.id) AS id - FROM - periods p - INNER JOIN - %s obj ON obj.timestamp >= p.period_start AND obj.timestamp < p.period_start + ? - WHERE ? - GROUP BY - p.period_start - ) i ON %s.id = i.id ORDER BY Period ASC - `, table, table, table, table), - unixTimeMS(start), - interval.Milliseconds(), - unixTimeMS(end), - interval.Milliseconds(), - interval.Milliseconds(), - whereExpr, - ).Scan(dst). - Error -} - -func (s *SQLStore) walletMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.WalletMetricsQueryOpts) (metrics []dbWalletMetric, err error) { - err = s.findPeriods(ctx, dbWalletMetric{}.TableName(), &metrics, start, n, interval, gorm.Expr("TRUE")) - if err != nil { - return nil, fmt.Errorf("failed to fetch wallet metrics: %w", err) - } - for i, m := range metrics { - metrics[i].Timestamp = normaliseTimestamp(start, interval, m.Timestamp) - } - return -} - -func (s *SQLStore) performanceMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.PerformanceMetricsQueryOpts) ([]dbPerformanceMetric, error) { - whereExpr := gorm.Expr("TRUE") - if opts.Action != "" { - whereExpr = gorm.Expr("? AND action = ?", whereExpr, opts.Action) - } - if opts.HostKey != (types.PublicKey{}) { - whereExpr = gorm.Expr("? AND host = ?", whereExpr, publicKey(opts.HostKey)) - } - if opts.Origin != "" { - whereExpr = gorm.Expr("? AND origin = ?", whereExpr, opts.Origin) - } - - var metrics []dbPerformanceMetric - err := s.findPeriods(ctx, dbPerformanceMetric{}.TableName(), &metrics, start, n, interval, whereExpr) - if err != nil { - return nil, fmt.Errorf("failed to fetch performance metrics: %w", err) - } - - return metrics, nil } diff --git a/stores/metrics_test.go b/stores/metrics_test.go index ec97099ba..5725f7a65 100644 --- a/stores/metrics_test.go +++ b/stores/metrics_test.go @@ -8,42 +8,226 @@ import ( "time" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "go.sia.tech/core/types" "go.sia.tech/renterd/api" "lukechampine.com/frand" ) -func TestNormaliseTimestamp(t *testing.T) { - tests := []struct { - start time.Time - interval time.Duration - ti time.Time - result time.Time - }{ - { - start: time.UnixMilli(100), - interval: 10 * time.Millisecond, - ti: time.UnixMilli(105), - result: time.UnixMilli(100), - }, - { - start: time.UnixMilli(100), - interval: 10 * time.Millisecond, - ti: time.UnixMilli(115), - result: time.UnixMilli(110), - }, - { - start: time.UnixMilli(100), - interval: 10 * time.Millisecond, - ti: time.UnixMilli(125), - result: time.UnixMilli(120), - }, +func TestContractChurnSetMetrics(t *testing.T) { + ss := newTestSQLStore(t, defaultTestSQLStoreConfig) + defer ss.Close() + + // Create metrics to query. + sets := []string{"foo", "bar"} + directions := []string{api.ChurnDirAdded, api.ChurnDirRemoved} + reasons := []string{"reasonA", "reasonB"} + times := []time.Time{time.UnixMilli(3), time.UnixMilli(1), time.UnixMilli(2)} + var i byte + for _, set := range sets { + for _, dir := range directions { + for _, reason := range reasons { + for _, recordedTime := range times { + fcid := types.FileContractID{i} + if err := ss.RecordContractSetChurnMetric(context.Background(), api.ContractSetChurnMetric{ + Timestamp: api.TimeRFC3339(recordedTime), + Name: set, + Direction: dir, + Reason: reason, + ContractID: fcid, + }); err != nil { + t.Fatal(err) + } + i++ + } + } + } } - for _, test := range tests { - if result := time.Time(normaliseTimestamp(test.start, test.interval, unixTimeMS(test.ti))); !result.Equal(test.result) { - t.Fatalf("expected %v, got %v", test.result, result) + assertMetrics := func(start time.Time, n uint64, interval time.Duration, opts api.ContractSetChurnMetricsQueryOpts, expected int, cmp func(api.ContractSetChurnMetric)) { + t.Helper() + metrics, err := ss.ContractSetChurnMetrics(context.Background(), start, n, interval, opts) + if err != nil { + t.Fatal(err) + } + if len(metrics) != expected { + t.Fatalf("expected %v metrics, got %v", expected, len(metrics)) + } else if !sort.SliceIsSorted(metrics, func(i, j int) bool { + return time.Time(metrics[i].Timestamp).Before(time.Time(metrics[j].Timestamp)) + }) { + t.Fatal("expected metrics to be sorted by time") + } + for _, m := range metrics { + cmp(m) + } + } + + // Query without any filters. + start := time.UnixMilli(1) + assertMetrics(start, 3, time.Millisecond, api.ContractSetChurnMetricsQueryOpts{}, 3, func(m api.ContractSetChurnMetric) {}) + + // Query by set name. + assertMetrics(start, 3, time.Millisecond, api.ContractSetChurnMetricsQueryOpts{Name: sets[0]}, 3, func(m api.ContractSetChurnMetric) { + if m.Name != sets[0] { + t.Fatalf("expected name to be %v, got %v", sets[0], m.Name) + } + }) + + // Query by direction. + assertMetrics(start, 3, time.Millisecond, api.ContractSetChurnMetricsQueryOpts{Direction: directions[0]}, 3, func(m api.ContractSetChurnMetric) { + if m.Direction != directions[0] { + t.Fatalf("expected direction to be %v, got %v", directions[1], m.Direction) + } + }) + + // Query by reason. + assertMetrics(start, 3, time.Millisecond, api.ContractSetChurnMetricsQueryOpts{Reason: reasons[0]}, 3, func(m api.ContractSetChurnMetric) { + if m.Reason != reasons[0] { + t.Fatalf("expected reason to be %v, got %v", reasons[0], m.Reason) } + }) + + // Prune metrics + if err := ss.PruneMetrics(context.Background(), api.MetricContractSetChurn, time.UnixMilli(3)); err != nil { + t.Fatal(err) + } else if metrics, err := ss.ContractSetChurnMetrics(context.Background(), time.UnixMilli(1), 3, time.Millisecond, api.ContractSetChurnMetricsQueryOpts{}); err != nil { + t.Fatal(err) + } else if len(metrics) != 1 { + t.Fatalf("expected 1 metric, got %v", len(metrics)) + } +} + +func TestContractMetrics(t *testing.T) { + ss := newTestSQLStore(t, defaultTestSQLStoreConfig) + defer ss.Close() + + // Create metrics to query. + hosts := []types.PublicKey{types.GeneratePrivateKey().PublicKey(), types.GeneratePrivateKey().PublicKey()} + times := []time.Time{time.UnixMilli(3), time.UnixMilli(1), time.UnixMilli(2)} + var i byte + fcid2Metric := make(map[types.FileContractID]api.ContractMetric) + var metricsTimeAsc []api.ContractMetric + for _, host := range hosts { + for _, recordedTime := range times { + metric := api.ContractMetric{ + Timestamp: api.TimeRFC3339(recordedTime), + ContractID: types.FileContractID{i}, + HostKey: host, + RemainingCollateral: types.MaxCurrency, + RemainingFunds: types.NewCurrency(frand.Uint64n(math.MaxUint64), frand.Uint64n(math.MaxUint64)), + RevisionNumber: math.MaxUint64, + UploadSpending: types.NewCurrency(frand.Uint64n(math.MaxUint64), frand.Uint64n(math.MaxUint64)), + DownloadSpending: types.NewCurrency(frand.Uint64n(math.MaxUint64), frand.Uint64n(math.MaxUint64)), + FundAccountSpending: types.NewCurrency(frand.Uint64n(math.MaxUint64), frand.Uint64n(math.MaxUint64)), + DeleteSpending: types.NewCurrency(frand.Uint64n(math.MaxUint64), frand.Uint64n(math.MaxUint64)), + ListSpending: types.NewCurrency64(1), + } + fcid2Metric[metric.ContractID] = metric + metricsTimeAsc = append(metricsTimeAsc, metric) + if err := ss.RecordContractMetric(context.Background(), metric); err != nil { + t.Fatal(err) + } + i++ + } + } + sort.SliceStable(metricsTimeAsc, func(i, j int) bool { + return metricsTimeAsc[i].Timestamp.Std().UnixMilli() < metricsTimeAsc[j].Timestamp.Std().UnixMilli() + }) + + assertMetrics := func(start time.Time, n uint64, interval time.Duration, opts api.ContractMetricsQueryOpts, expected int, cmpFn func(api.ContractMetric)) { + t.Helper() + metrics, err := ss.ContractMetrics(context.Background(), start, n, interval, opts) + if err != nil { + t.Fatal(err) + } + if len(metrics) != expected { + t.Fatalf("expected %v metrics, got %v", expected, len(metrics)) + } else if !sort.SliceIsSorted(metrics, func(i, j int) bool { + return time.Time(metrics[i].Timestamp).Before(time.Time(metrics[j].Timestamp)) + }) { + t.Fatal("expected metrics to be sorted by time") + } + for _, m := range metrics { + expectedMetric := fcid2Metric[m.ContractID] + expectedMetric.Timestamp = api.TimeRFC3339(normaliseTimestamp(start, interval, unixTimeMS(expectedMetric.Timestamp))) + if !cmp.Equal(m, expectedMetric, cmp.Comparer(api.CompareTimeRFC3339)) { + t.Fatal("unexpected metric", cmp.Diff(m, expectedMetric, cmp.Comparer(api.CompareTimeRFC3339))) + } + cmpFn(m) + } + } + + // Query by host. + start := time.UnixMilli(1) + assertMetrics(start, 3, time.Millisecond, api.ContractMetricsQueryOpts{HostKey: hosts[0]}, 3, func(m api.ContractMetric) { + if m.HostKey != hosts[0] { + t.Fatalf("expected host to be %v, got %v", hosts[0], m.HostKey) + } + }) + + // Query by fcid. + fcid := types.FileContractID{2} + assertMetrics(start, 3, time.Millisecond, api.ContractMetricsQueryOpts{ContractID: fcid}, 1, func(m api.ContractMetric) { + if m.ContractID != fcid { + t.Fatalf("expected fcid to be %v, got %v", fcid, m.ContractID) + } + }) + // Query without any filters. This will cause aggregate values to be returned. + metrics, err := ss.ContractMetrics(context.Background(), start, 3, time.Millisecond, api.ContractMetricsQueryOpts{}) + if err != nil { + t.Fatal(err) + } else if len(metrics) != 3 { + t.Fatalf("expected 3 metrics, got %v", len(metrics)) + } + for i, m := range metrics { + var expectedMetric api.ContractMetric + expectedMetric.Timestamp = api.TimeRFC3339(normaliseTimestamp(start, time.Millisecond, unixTimeMS(metricsTimeAsc[2*i].Timestamp))) + expectedMetric.ContractID = types.FileContractID{} + expectedMetric.HostKey = types.PublicKey{} + expectedMetric.RemainingCollateral, _ = metricsTimeAsc[2*i].RemainingCollateral.AddWithOverflow(metricsTimeAsc[2*i+1].RemainingCollateral) + expectedMetric.RemainingFunds, _ = metricsTimeAsc[2*i].RemainingFunds.AddWithOverflow(metricsTimeAsc[2*i+1].RemainingFunds) + expectedMetric.RevisionNumber = 0 + expectedMetric.UploadSpending, _ = metricsTimeAsc[2*i].UploadSpending.AddWithOverflow(metricsTimeAsc[2*i+1].UploadSpending) + expectedMetric.DownloadSpending, _ = metricsTimeAsc[2*i].DownloadSpending.AddWithOverflow(metricsTimeAsc[2*i+1].DownloadSpending) + expectedMetric.FundAccountSpending, _ = metricsTimeAsc[2*i].FundAccountSpending.AddWithOverflow(metricsTimeAsc[2*i+1].FundAccountSpending) + expectedMetric.DeleteSpending, _ = metricsTimeAsc[2*i].DeleteSpending.AddWithOverflow(metricsTimeAsc[2*i+1].DeleteSpending) + expectedMetric.ListSpending, _ = metricsTimeAsc[2*i].ListSpending.AddWithOverflow(metricsTimeAsc[2*i+1].ListSpending) + if !cmp.Equal(m, expectedMetric, cmp.Comparer(api.CompareTimeRFC3339)) { + t.Fatal(i, "unexpected metric", cmp.Diff(m, expectedMetric, cmp.Comparer(api.CompareTimeRFC3339))) + } + } + + // Prune metrics + if err := ss.PruneMetrics(context.Background(), api.MetricContract, time.UnixMilli(3)); err != nil { + t.Fatal(err) + } else if metrics, err := ss.ContractMetrics(context.Background(), time.UnixMilli(1), 3, time.Millisecond, api.ContractMetricsQueryOpts{}); err != nil { + t.Fatal(err) + } else if len(metrics) != 1 { + t.Fatalf("expected 1 metric, got %v", len(metrics)) + } + + // Drop all metrics. + if err := ss.PruneMetrics(context.Background(), api.MetricContract, time.Now()); err != nil { + t.Fatal(err) + } + + // Record multiple metrics for the same contract - one per second over 10 minutes + for i := int64(0); i < 600; i++ { + err := ss.RecordContractMetric(context.Background(), api.ContractMetric{ + ContractID: types.FileContractID{1}, + Timestamp: api.TimeRFC3339(time.Unix(i, 0)), + }) + if err != nil { + t.Fatal(err) + } + } + + // Check how many metrics were recorded. + var n int64 + if err := ss.DBMetrics().QueryRow(context.Background(), "SELECT COUNT(*) FROM contracts").Scan(&n); err != nil { + t.Fatal(err) + } else if n != 2 { + t.Fatalf("expected 2 metrics, got %v", n) } } @@ -92,8 +276,8 @@ func TestContractPruneMetrics(t *testing.T) { t.Fatal("expected metrics to be sorted by time") } for _, m := range metrics { - if !cmp.Equal(m, fcid2Metric[m.ContractID], cmp.Comparer(api.CompareTimeRFC3339)) { - t.Fatal("unexpected metric", cmp.Diff(m, fcid2Metric[m.ContractID])) + if !cmp.Equal(m, fcid2Metric[m.ContractID], cmpopts.IgnoreUnexported(api.ContractPruneMetric{}), cmp.Comparer(api.CompareTimeRFC3339)) { + t.Fatal("unexpected metric", m, fcid2Metric[m.ContractID]) } cmpFn(m) } @@ -148,7 +332,7 @@ func TestContractSetMetrics(t *testing.T) { } else if m := metrics[0]; m.Contracts != 0 { t.Fatalf("expected 0 contracts, got %v", m.Contracts) } else if ti := time.Time(m.Timestamp); !ti.Equal(testStart) { - t.Fatal("expected time to match start time") + t.Fatalf("expected time to match start time, %v != %v", ti, testStart) } else if m.Name != testContractSet { t.Fatalf("expected name to be %v, got %v", testContractSet, m.Name) } @@ -212,86 +396,37 @@ func TestContractSetMetrics(t *testing.T) { } } -func TestContractChurnSetMetrics(t *testing.T) { - ss := newTestSQLStore(t, defaultTestSQLStoreConfig) - defer ss.Close() - - // Create metrics to query. - sets := []string{"foo", "bar"} - directions := []string{api.ChurnDirAdded, api.ChurnDirRemoved} - reasons := []string{"reasonA", "reasonB"} - times := []time.Time{time.UnixMilli(3), time.UnixMilli(1), time.UnixMilli(2)} - var i byte - for _, set := range sets { - for _, dir := range directions { - for _, reason := range reasons { - for _, recordedTime := range times { - fcid := types.FileContractID{i} - if err := ss.RecordContractSetChurnMetric(context.Background(), api.ContractSetChurnMetric{ - Timestamp: api.TimeRFC3339(recordedTime), - Name: set, - Direction: dir, - Reason: reason, - ContractID: fcid, - }); err != nil { - t.Fatal(err) - } - i++ - } - } - } - } - - assertMetrics := func(start time.Time, n uint64, interval time.Duration, opts api.ContractSetChurnMetricsQueryOpts, expected int, cmp func(api.ContractSetChurnMetric)) { - t.Helper() - metrics, err := ss.ContractSetChurnMetrics(context.Background(), start, n, interval, opts) - if err != nil { - t.Fatal(err) - } - if len(metrics) != expected { - t.Fatalf("expected %v metrics, got %v", expected, len(metrics)) - } else if !sort.SliceIsSorted(metrics, func(i, j int) bool { - return time.Time(metrics[i].Timestamp).Before(time.Time(metrics[j].Timestamp)) - }) { - t.Fatal("expected metrics to be sorted by time") - } - for _, m := range metrics { - cmp(m) - } +func TestNormaliseTimestamp(t *testing.T) { + tests := []struct { + start time.Time + interval time.Duration + ti time.Time + result time.Time + }{ + { + start: time.UnixMilli(100), + interval: 10 * time.Millisecond, + ti: time.UnixMilli(105), + result: time.UnixMilli(100), + }, + { + start: time.UnixMilli(100), + interval: 10 * time.Millisecond, + ti: time.UnixMilli(115), + result: time.UnixMilli(110), + }, + { + start: time.UnixMilli(100), + interval: 10 * time.Millisecond, + ti: time.UnixMilli(125), + result: time.UnixMilli(120), + }, } - // Query without any filters. - start := time.UnixMilli(1) - assertMetrics(start, 3, time.Millisecond, api.ContractSetChurnMetricsQueryOpts{}, 3, func(m api.ContractSetChurnMetric) {}) - - // Query by set name. - assertMetrics(start, 3, time.Millisecond, api.ContractSetChurnMetricsQueryOpts{Name: sets[0]}, 3, func(m api.ContractSetChurnMetric) { - if m.Name != sets[0] { - t.Fatalf("expected name to be %v, got %v", sets[0], m.Name) - } - }) - - // Query by direction. - assertMetrics(start, 3, time.Millisecond, api.ContractSetChurnMetricsQueryOpts{Direction: directions[0]}, 3, func(m api.ContractSetChurnMetric) { - if m.Direction != directions[0] { - t.Fatalf("expected direction to be %v, got %v", directions[1], m.Direction) - } - }) - - // Query by reason. - assertMetrics(start, 3, time.Millisecond, api.ContractSetChurnMetricsQueryOpts{Reason: reasons[0]}, 3, func(m api.ContractSetChurnMetric) { - if m.Reason != reasons[0] { - t.Fatalf("expected reason to be %v, got %v", reasons[0], m.Reason) + for _, test := range tests { + if result := time.Time(normaliseTimestamp(test.start, test.interval, unixTimeMS(test.ti))); !result.Equal(test.result) { + t.Fatalf("expected %v, got %v", test.result, result) } - }) - - // Prune metrics - if err := ss.PruneMetrics(context.Background(), api.MetricContractSetChurn, time.UnixMilli(3)); err != nil { - t.Fatal(err) - } else if metrics, err := ss.ContractSetChurnMetrics(context.Background(), time.UnixMilli(1), 3, time.Millisecond, api.ContractSetChurnMetricsQueryOpts{}); err != nil { - t.Fatal(err) - } else if len(metrics) != 1 { - t.Fatalf("expected 1 metric, got %v", len(metrics)) } } @@ -380,140 +515,6 @@ func TestPerformanceMetrics(t *testing.T) { } } -func TestContractMetrics(t *testing.T) { - ss := newTestSQLStore(t, defaultTestSQLStoreConfig) - defer ss.Close() - - // Create metrics to query. - hosts := []types.PublicKey{types.GeneratePrivateKey().PublicKey(), types.GeneratePrivateKey().PublicKey()} - times := []time.Time{time.UnixMilli(3), time.UnixMilli(1), time.UnixMilli(2)} - var i byte - fcid2Metric := make(map[types.FileContractID]api.ContractMetric) - var metricsTimeAsc []api.ContractMetric - for _, host := range hosts { - for _, recordedTime := range times { - metric := api.ContractMetric{ - Timestamp: api.TimeRFC3339(recordedTime), - ContractID: types.FileContractID{i}, - HostKey: host, - RemainingCollateral: types.MaxCurrency, - RemainingFunds: types.NewCurrency(frand.Uint64n(math.MaxUint64), frand.Uint64n(math.MaxUint64)), - RevisionNumber: math.MaxUint64, - UploadSpending: types.NewCurrency(frand.Uint64n(math.MaxUint64), frand.Uint64n(math.MaxUint64)), - DownloadSpending: types.NewCurrency(frand.Uint64n(math.MaxUint64), frand.Uint64n(math.MaxUint64)), - FundAccountSpending: types.NewCurrency(frand.Uint64n(math.MaxUint64), frand.Uint64n(math.MaxUint64)), - DeleteSpending: types.NewCurrency(frand.Uint64n(math.MaxUint64), frand.Uint64n(math.MaxUint64)), - ListSpending: types.NewCurrency64(1), - } - fcid2Metric[metric.ContractID] = metric - metricsTimeAsc = append(metricsTimeAsc, metric) - if err := ss.RecordContractMetric(context.Background(), metric); err != nil { - t.Fatal(err) - } - i++ - } - } - sort.SliceStable(metricsTimeAsc, func(i, j int) bool { - return metricsTimeAsc[i].Timestamp.Std().UnixMilli() < metricsTimeAsc[j].Timestamp.Std().UnixMilli() - }) - - assertMetrics := func(start time.Time, n uint64, interval time.Duration, opts api.ContractMetricsQueryOpts, expected int, cmpFn func(api.ContractMetric)) { - t.Helper() - metrics, err := ss.ContractMetrics(context.Background(), start, n, interval, opts) - if err != nil { - t.Fatal(err) - } - if len(metrics) != expected { - t.Fatalf("expected %v metrics, got %v", expected, len(metrics)) - } else if !sort.SliceIsSorted(metrics, func(i, j int) bool { - return time.Time(metrics[i].Timestamp).Before(time.Time(metrics[j].Timestamp)) - }) { - t.Fatal("expected metrics to be sorted by time") - } - for _, m := range metrics { - expectedMetric := fcid2Metric[m.ContractID] - expectedMetric.Timestamp = api.TimeRFC3339(normaliseTimestamp(start, interval, unixTimeMS(expectedMetric.Timestamp))) - if !cmp.Equal(m, expectedMetric, cmp.Comparer(api.CompareTimeRFC3339)) { - t.Fatal("unexpected metric", cmp.Diff(m, expectedMetric, cmp.Comparer(api.CompareTimeRFC3339))) - } - cmpFn(m) - } - } - - // Query by host. - start := time.UnixMilli(1) - assertMetrics(start, 3, time.Millisecond, api.ContractMetricsQueryOpts{HostKey: hosts[0]}, 3, func(m api.ContractMetric) { - if m.HostKey != hosts[0] { - t.Fatalf("expected host to be %v, got %v", hosts[0], m.HostKey) - } - }) - - // Query by fcid. - fcid := types.FileContractID{2} - assertMetrics(start, 3, time.Millisecond, api.ContractMetricsQueryOpts{ContractID: fcid}, 1, func(m api.ContractMetric) { - if m.ContractID != fcid { - t.Fatalf("expected fcid to be %v, got %v", fcid, m.ContractID) - } - }) - // Query without any filters. This will cause aggregate values to be returned. - metrics, err := ss.ContractMetrics(context.Background(), start, 3, time.Millisecond, api.ContractMetricsQueryOpts{}) - if err != nil { - t.Fatal(err) - } else if len(metrics) != 3 { - t.Fatalf("expected 3 metrics, got %v", len(metrics)) - } - for i, m := range metrics { - var expectedMetric api.ContractMetric - expectedMetric.Timestamp = api.TimeRFC3339(normaliseTimestamp(start, time.Millisecond, unixTimeMS(metricsTimeAsc[2*i].Timestamp))) - expectedMetric.ContractID = types.FileContractID{} - expectedMetric.HostKey = types.PublicKey{} - expectedMetric.RemainingCollateral, _ = metricsTimeAsc[2*i].RemainingCollateral.AddWithOverflow(metricsTimeAsc[2*i+1].RemainingCollateral) - expectedMetric.RemainingFunds, _ = metricsTimeAsc[2*i].RemainingFunds.AddWithOverflow(metricsTimeAsc[2*i+1].RemainingFunds) - expectedMetric.RevisionNumber = 0 - expectedMetric.UploadSpending, _ = metricsTimeAsc[2*i].UploadSpending.AddWithOverflow(metricsTimeAsc[2*i+1].UploadSpending) - expectedMetric.DownloadSpending, _ = metricsTimeAsc[2*i].DownloadSpending.AddWithOverflow(metricsTimeAsc[2*i+1].DownloadSpending) - expectedMetric.FundAccountSpending, _ = metricsTimeAsc[2*i].FundAccountSpending.AddWithOverflow(metricsTimeAsc[2*i+1].FundAccountSpending) - expectedMetric.DeleteSpending, _ = metricsTimeAsc[2*i].DeleteSpending.AddWithOverflow(metricsTimeAsc[2*i+1].DeleteSpending) - expectedMetric.ListSpending, _ = metricsTimeAsc[2*i].ListSpending.AddWithOverflow(metricsTimeAsc[2*i+1].ListSpending) - if !cmp.Equal(m, expectedMetric, cmp.Comparer(api.CompareTimeRFC3339)) { - t.Fatal(i, "unexpected metric", cmp.Diff(m, expectedMetric, cmp.Comparer(api.CompareTimeRFC3339))) - } - } - - // Prune metrics - if err := ss.PruneMetrics(context.Background(), api.MetricContract, time.UnixMilli(3)); err != nil { - t.Fatal(err) - } else if metrics, err := ss.ContractMetrics(context.Background(), time.UnixMilli(1), 3, time.Millisecond, api.ContractMetricsQueryOpts{}); err != nil { - t.Fatal(err) - } else if len(metrics) != 1 { - t.Fatalf("expected 1 metric, got %v", len(metrics)) - } - - // Drop all metrics. - if err := ss.dbMetrics.Where("TRUE").Delete(&dbContractMetric{}).Error; err != nil { - t.Fatal(err) - } - - // Record multiple metrics for the same contract - one per second over 10 minutes - for i := int64(0); i < 600; i++ { - err := ss.RecordContractMetric(context.Background(), api.ContractMetric{ - ContractID: types.FileContractID{1}, - Timestamp: api.TimeRFC3339(time.Unix(i, 0)), - }) - if err != nil { - t.Fatal(err) - } - } - - // Check how many metrics were recorded. - var n int64 - if err := ss.dbMetrics.Model(&dbContractMetric{}).Count(&n).Error; err != nil { - t.Fatal(err) - } else if n != 2 { - t.Fatalf("expected 2 metrics, got %v", n) - } -} - func TestWalletMetrics(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() @@ -553,3 +554,14 @@ func TestWalletMetrics(t *testing.T) { t.Fatalf("expected 1 metric, got %v", len(metrics)) } } + +func normaliseTimestamp(start time.Time, interval time.Duration, t unixTimeMS) unixTimeMS { + startMS := start.UnixMilli() + toNormaliseMS := time.Time(t).UnixMilli() + intervalMS := interval.Milliseconds() + if startMS > toNormaliseMS { + return unixTimeMS(start) + } + normalizedMS := (toNormaliseMS-startMS)/intervalMS*intervalMS + start.UnixMilli() + return unixTimeMS(time.UnixMilli(normalizedMS)) +} diff --git a/stores/migrations.go b/stores/migrations.go deleted file mode 100644 index 4ac6b755e..000000000 --- a/stores/migrations.go +++ /dev/null @@ -1,84 +0,0 @@ -package stores - -import ( - "errors" - "fmt" - - "github.com/go-gormigrate/gormigrate/v2" - "go.sia.tech/renterd/internal/utils" - "go.uber.org/zap" - "gorm.io/gorm" -) - -var ( - errRunV072 = errors.New("can't upgrade to >=v1.0.0 from your current version - please upgrade to v0.7.2 first (https://github.com/SiaFoundation/renterd/releases/tag/v0.7.2)") - errMySQLNoSuperPrivilege = errors.New("You do not have the SUPER privilege and binary logging is enabled") -) - -func performMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { - dbIdentifier := "main" - migrations := []*gormigrate.Migration{ - { - ID: "00001_init", - Migrate: func(tx *gorm.DB) error { return errRunV072 }, - }, - { - ID: "00001_object_metadata", - Migrate: func(tx *gorm.DB) error { - return performMigration(tx, dbIdentifier, "00001_object_metadata", logger) - }, - }, - { - ID: "00002_prune_slabs_trigger", - Migrate: func(tx *gorm.DB) error { - err := performMigration(tx, dbIdentifier, "00002_prune_slabs_trigger", logger) - if utils.IsErr(err, errMySQLNoSuperPrivilege) { - logger.Warn("migration 00002_prune_slabs_trigger requires the user to have the SUPER privilege to register triggers") - } - return err - }, - }, - { - ID: "00003_idx_objects_size", - Migrate: func(tx *gorm.DB) error { - return performMigration(tx, dbIdentifier, "00003_idx_objects_size", logger) - }, - }, - { - ID: "00004_prune_slabs_cascade", - Migrate: func(tx *gorm.DB) error { - return performMigration(tx, dbIdentifier, "00004_prune_slabs_cascade", logger) - }, - }, - { - ID: "00005_zero_size_object_health", - Migrate: func(tx *gorm.DB) error { - return performMigration(tx, dbIdentifier, "00005_zero_size_object_health", logger) - }, - }, - { - ID: "00006_idx_objects_created_at", - Migrate: func(tx *gorm.DB) error { - return performMigration(tx, dbIdentifier, "00006_idx_objects_created_at", logger) - }, - }, - { - ID: "00007_host_checks", - Migrate: func(tx *gorm.DB) error { - return performMigration(tx, dbIdentifier, "00007_host_checks", logger) - }, - }, - } - - // Create migrator. - m := gormigrate.New(db, gormigrate.DefaultOptions, migrations) - - // Set init function. - m.InitSchema(initSchema(dbIdentifier, logger)) - - // Perform migrations. - if err := m.Migrate(); err != nil { - return fmt.Errorf("failed to migrate: %v", err) - } - return nil -} diff --git a/stores/migrations_metrics.go b/stores/migrations_metrics.go deleted file mode 100644 index 25895c4f2..000000000 --- a/stores/migrations_metrics.go +++ /dev/null @@ -1,37 +0,0 @@ -package stores - -import ( - "fmt" - - "github.com/go-gormigrate/gormigrate/v2" - "go.uber.org/zap" - "gorm.io/gorm" -) - -func performMetricsMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { - dbIdentifier := "metrics" - migrations := []*gormigrate.Migration{ - { - ID: "00001_init", - Migrate: func(tx *gorm.DB) error { return errRunV072 }, - }, - { - ID: "00001_idx_contracts_fcid_timestamp", - Migrate: func(tx *gorm.DB) error { - return performMigration(tx, dbIdentifier, "00001_idx_contracts_fcid_timestamp", logger) - }, - }, - } - - // Create migrator. - m := gormigrate.New(db, gormigrate.DefaultOptions, migrations) - - // Set init function. - m.InitSchema(initSchema(dbIdentifier, logger)) - - // Perform migrations. - if err := m.Migrate(); err != nil { - return fmt.Errorf("failed to migrate: %v", err) - } - return nil -} diff --git a/stores/migrations_utils.go b/stores/migrations_utils.go deleted file mode 100644 index 0692b367f..000000000 --- a/stores/migrations_utils.go +++ /dev/null @@ -1,57 +0,0 @@ -package stores - -import ( - "fmt" - - gormigrate "github.com/go-gormigrate/gormigrate/v2" - "go.uber.org/zap" - "gorm.io/gorm" -) - -// initSchema is executed only on a clean database. Otherwise the individual -// migrations are executed. -func initSchema(name string, logger *zap.SugaredLogger) gormigrate.InitSchemaFunc { - return func(tx *gorm.DB) error { - logger.Infof("initializing '%s' schema", name) - - // init schema - err := execSQLFile(tx, name, "schema") - if err != nil { - return fmt.Errorf("failed to init schema: %w", err) - } - - logger.Info("initialization complete") - return nil - } -} - -func performMigration(db *gorm.DB, kind, migration string, logger *zap.SugaredLogger) error { - logger.Infof("performing %s migration '%s'", kind, migration) - - // execute migration - err := execSQLFile(db, kind, fmt.Sprintf("migration_%s", migration)) - if err != nil { - return fmt.Errorf("migration '%s' failed: %w", migration, err) - } - - logger.Infof("migration '%s' complete", migration) - return nil -} - -func execSQLFile(db *gorm.DB, folder, filename string) error { - // build path - protocol := "mysql" - if isSQLite(db) { - protocol = "sqlite" - } - path := fmt.Sprintf("migrations/%s/%s/%s.sql", protocol, folder, filename) - - // read file - file, err := migrations.ReadFile(path) - if err != nil { - return err - } - - // execute it - return db.Exec(string(file)).Error -} diff --git a/stores/multipart.go b/stores/multipart.go index 5fde55d7b..95aad7104 100644 --- a/stores/multipart.go +++ b/stores/multipart.go @@ -2,313 +2,67 @@ package stores import ( "context" - "encoding/hex" - "errors" "fmt" - "math" "sort" - "strings" - "unicode/utf8" - "go.sia.tech/core/types" "go.sia.tech/renterd/api" "go.sia.tech/renterd/object" - "gorm.io/gorm" - "lukechampine.com/frand" + sql "go.sia.tech/renterd/stores/sql" ) -type ( - dbMultipartUpload struct { - Model - - Key secretKey - UploadID string `gorm:"uniqueIndex;NOT NULL;size:64"` - ObjectID string `gorm:"index:idx_multipart_uploads_object_id;NOT NULL"` - DBBucket dbBucket `gorm:"constraint:OnDelete:CASCADE"` // CASCADE to delete uploads when bucket is deleted - DBBucketID uint `gorm:"index:idx_multipart_uploads_db_bucket_id;NOT NULL"` - Parts []dbMultipartPart // no CASCADE, parts are deleted via trigger - Metadata []dbObjectUserMetadata `gorm:"constraint:OnDelete:SET NULL"` // CASCADE to delete parts too - MimeType string `gorm:"index:idx_multipart_uploads_mime_type"` - } - - dbMultipartPart struct { - Model - Etag string `gorm:"index"` - PartNumber int `gorm:"index"` - Size uint64 - DBMultipartUploadID uint `gorm:"index;NOT NULL"` - Slabs []dbSlice // no CASCADE, slices are deleted via trigger - } -) - -func (dbMultipartUpload) TableName() string { - return "multipart_uploads" -} - -func (dbMultipartPart) TableName() string { - return "multipart_parts" -} - func (s *SQLStore) CreateMultipartUpload(ctx context.Context, bucket, path string, ec object.EncryptionKey, mimeType string, metadata api.ObjectUserMetadata) (api.MultipartCreateResponse, error) { - // Marshal key - key, err := ec.MarshalBinary() + var uploadID string + err := s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) (err error) { + uploadID, err = tx.InsertMultipartUpload(ctx, bucket, path, ec, mimeType, metadata) + return + }) if err != nil { return api.MultipartCreateResponse{}, err } - var uploadID string - err = s.retryTransaction(ctx, func(tx *gorm.DB) error { - // Get bucket id. - var bucketID uint - err := tx.Table("(SELECT id from buckets WHERE buckets.name = ?) bucket_id", bucket). - Take(&bucketID).Error - if errors.Is(err, gorm.ErrRecordNotFound) { - return fmt.Errorf("bucket %v not found: %w", bucket, api.ErrBucketNotFound) - } else if err != nil { - return fmt.Errorf("failed to fetch bucket id: %w", err) - } - - // Create multipart upload - uploadIDEntropy := frand.Entropy256() - uploadID = hex.EncodeToString(uploadIDEntropy[:]) - multipartUpload := dbMultipartUpload{ - DBBucketID: bucketID, - Key: key, - UploadID: uploadID, - ObjectID: path, - MimeType: mimeType, - } - if err := tx.Create(&multipartUpload).Error; err != nil { - return fmt.Errorf("failed to create multipart upload: %w", err) - } - - // Create multipart metadata - if err := s.createMultipartMetadata(tx, multipartUpload.ID, metadata); err != nil { - return fmt.Errorf("failed to create multipart metadata: %w", err) - } - - return nil - }) return api.MultipartCreateResponse{ UploadID: uploadID, }, err } func (s *SQLStore) AddMultipartPart(ctx context.Context, bucket, path, contractSet, eTag, uploadID string, partNumber int, slices []object.SlabSlice) (err error) { - // collect all used contracts - usedContracts := make(map[types.PublicKey]map[types.FileContractID]struct{}) - for _, s := range slices { - for _, shard := range s.Shards { - for h, fcids := range shard.Contracts { - for _, fcid := range fcids { - if _, exists := usedContracts[h]; !exists { - usedContracts[h] = make(map[types.FileContractID]struct{}) - } - usedContracts[h][fcid] = struct{}{} - } - } - } - } - return s.retryTransaction(ctx, func(tx *gorm.DB) error { - // Fetch contract set. - var cs dbContractSet - if err := tx.Take(&cs, "name = ?", contractSet).Error; err != nil { - return fmt.Errorf("contract set %v not found: %w", contractSet, err) - } - // Fetch the used contracts. - contracts, err := fetchUsedContracts(tx, usedContracts) - if err != nil { - return fmt.Errorf("failed to fetch used contracts: %w", err) - } - // Find multipart upload. - var mu dbMultipartUpload - err = tx.Where("upload_id", uploadID). - Take(&mu). - Error - if err != nil { - return fmt.Errorf("failed to fetch multipart upload: %w", err) - } - // Delete a potentially existing part. - err = tx.Model(&dbMultipartPart{}). - Where("db_multipart_upload_id = ? AND part_number = ?", mu.ID, partNumber). - Delete(&dbMultipartPart{}). - Error - if err != nil { - return fmt.Errorf("failed to delete existing part: %w", err) - } - var size uint64 - for _, slice := range slices { - size += uint64(slice.Length) - } - // Create a new part. - part := dbMultipartPart{ - Etag: eTag, - PartNumber: partNumber, - DBMultipartUploadID: mu.ID, - Size: size, - } - err = tx.Create(&part).Error - if err != nil { - return fmt.Errorf("failed to create part: %w", err) - } - // Create the slices. - err = s.createSlices(tx, nil, &part.ID, cs.ID, contracts, slices) - if err != nil { - return fmt.Errorf("failed to create slices: %w", err) - } - return nil + return s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + return tx.AddMultipartPart(ctx, bucket, path, contractSet, eTag, uploadID, partNumber, slices) }) } func (s *SQLStore) MultipartUpload(ctx context.Context, uploadID string) (resp api.MultipartUpload, err error) { - err = s.retryTransaction(ctx, func(tx *gorm.DB) error { - var dbUpload dbMultipartUpload - err := tx. - Model(&dbMultipartUpload{}). - Joins("DBBucket"). - Where("upload_id", uploadID). - Take(&dbUpload). - Error - if errors.Is(err, gorm.ErrRecordNotFound) { - return api.ErrMultipartUploadNotFound - } else if err != nil { - return err - } - resp, err = dbUpload.convert() - return err + err = s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) (err error) { + resp, err = tx.MultipartUpload(ctx, uploadID) + return }) return } func (s *SQLStore) MultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker string, limit int) (resp api.MultipartListUploadsResponse, err error) { - limitUsed := limit > 0 - if !limitUsed { - limit = math.MaxInt64 - } else { - limit++ - } - - // both markers must be used together - if (keyMarker == "" && uploadIDMarker != "") || (keyMarker != "" && uploadIDMarker == "") { - return api.MultipartListUploadsResponse{}, errors.New("both keyMarker and uploadIDMarker must be set or neither") - } - markerExpr := exprTRUE - if keyMarker != "" { - markerExpr = gorm.Expr("object_id > ? OR (object_id = ? AND upload_id > ?)", keyMarker, keyMarker, uploadIDMarker) - } - - prefixExpr := exprTRUE - if prefix != "" { - prefixExpr = gorm.Expr("SUBSTR(object_id, 1, ?) = ?", utf8.RuneCountInString(prefix), prefix) - } - - err = s.retryTransaction(ctx, func(tx *gorm.DB) error { - var dbUploads []dbMultipartUpload - err := tx. - Model(&dbMultipartUpload{}). - Joins("DBBucket"). - Where("DBBucket.name", bucket). - Where("?", markerExpr). - Where("?", prefixExpr). - Order("object_id ASC, upload_id ASC"). - Limit(limit). - Find(&dbUploads). - Error - if err != nil { - return err - } - // Check if there are more uploads beyond 'limit'. - if limitUsed && len(dbUploads) == int(limit) { - resp.HasMore = true - dbUploads = dbUploads[:len(dbUploads)-1] - resp.NextPathMarker = dbUploads[len(dbUploads)-1].ObjectID - resp.NextUploadIDMarker = dbUploads[len(dbUploads)-1].UploadID - } - for _, upload := range dbUploads { - u, err := upload.convert() - if err != nil { - return err - } - resp.Uploads = append(resp.Uploads, u) - } - return nil + err = s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) (err error) { + resp, err = tx.MultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, limit) + return }) return } func (s *SQLStore) MultipartUploadParts(ctx context.Context, bucket, object string, uploadID string, marker int, limit int64) (resp api.MultipartListPartsResponse, _ error) { - limitUsed := limit > 0 - if !limitUsed { - limit = math.MaxInt64 - } else { - limit++ - } - - err := s.retryTransaction(ctx, func(tx *gorm.DB) error { - var dbParts []dbMultipartPart - err := tx. - Model(&dbMultipartPart{}). - Joins("INNER JOIN multipart_uploads mus ON mus.id = multipart_parts.db_multipart_upload_id"). - Joins("INNER JOIN buckets b ON b.name = ? AND b.id = mus.db_bucket_id", bucket). - Where("mus.object_id = ? AND mus.upload_id = ? AND part_number > ?", object, uploadID, marker). - Order("part_number ASC"). - Limit(int(limit)). - Find(&dbParts). - Error - if err != nil { - return err - } - // Check if there are more parts beyond 'limit'. - if limitUsed && len(dbParts) == int(limit) { - resp.HasMore = true - dbParts = dbParts[:len(dbParts)-1] - resp.NextMarker = dbParts[len(dbParts)-1].PartNumber - } - for _, part := range dbParts { - resp.Parts = append(resp.Parts, api.MultipartListPartItem{ - PartNumber: part.PartNumber, - LastModified: api.TimeRFC3339(part.CreatedAt.UTC()), - ETag: part.Etag, - Size: int64(part.Size), - }) - } - return nil + err := s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) (err error) { + resp, err = tx.MultipartUploadParts(ctx, bucket, object, uploadID, marker, limit) + return }) return resp, err } func (s *SQLStore) AbortMultipartUpload(ctx context.Context, bucket, path string, uploadID string) error { - return s.retryTransaction(ctx, func(tx *gorm.DB) error { - // delete multipart upload optimistically - res := tx. - Where("upload_id", uploadID). - Where("object_id", path). - Where("db_bucket_id = (SELECT id FROM buckets WHERE buckets.name = ?)", bucket). - Delete(&dbMultipartUpload{}) - if res.Error != nil { - return fmt.Errorf("failed to fetch multipart upload: %w", res.Error) - } - // if the upload wasn't found, find out why - if res.RowsAffected == 0 { - var mu dbMultipartUpload - err := tx.Where("upload_id = ?", uploadID). - Joins("DBBucket"). - Take(&mu). - Error - if errors.Is(err, gorm.ErrRecordNotFound) { - return api.ErrMultipartUploadNotFound - } else if err != nil { - return fmt.Errorf("failed to fetch multipart upload: %w", err) - } else if mu.ObjectID != path { - return fmt.Errorf("object id mismatch: %v != %v: %w", mu.ObjectID, path, api.ErrObjectNotFound) - } else if mu.DBBucket.Name != bucket { - return fmt.Errorf("bucket name mismatch: %v != %v: %w", mu.DBBucket.Name, bucket, api.ErrBucketNotFound) - } - return errors.New("failed to delete multipart upload for unknown reason") - } - // Prune the dangling slabs. - s.triggerSlabPruning() - return nil + err := s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + return tx.AbortMultipartUpload(ctx, bucket, path, uploadID) }) + if err != nil { + return err + } + s.triggerSlabPruning() + return nil } func (s *SQLStore) CompleteMultipartUpload(ctx context.Context, bucket, path string, uploadID string, parts []api.MultipartCompletedPart, opts api.CompleteMultipartOptions) (_ api.MultipartCompleteResponse, err error) { @@ -323,161 +77,29 @@ func (s *SQLStore) CompleteMultipartUpload(ctx context.Context, bucket, path str return api.MultipartCompleteResponse{}, fmt.Errorf("duplicate part number %v", parts[i].PartNumber) } } + var eTag string - err = s.retryTransaction(ctx, func(tx *gorm.DB) error { + var prune bool + err = s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { // Delete potentially existing object. - _, err := s.deleteObject(tx, bucket, path) + prune, err = tx.DeleteObject(ctx, bucket, path) if err != nil { return fmt.Errorf("failed to delete object: %w", err) } - // Find multipart upload. - var mu dbMultipartUpload - err = tx.Where("upload_id = ?", uploadID). - Preload("Parts"). - Joins("DBBucket"). - Take(&mu). - Error + // Complete upload + eTag, err = tx.CompleteMultipartUpload(ctx, bucket, path, uploadID, parts, opts) if err != nil { - return fmt.Errorf("failed to fetch multipart upload: %w", err) - } - // Check object id. - if mu.ObjectID != path { - return fmt.Errorf("object id mismatch: %v != %v: %w", mu.ObjectID, path, api.ErrObjectNotFound) - } - - // Check bucket name. - if mu.DBBucket.Name != bucket { - return fmt.Errorf("bucket name mismatch: %v != %v: %w", mu.DBBucket.Name, bucket, api.ErrBucketNotFound) + return fmt.Errorf("failed to complete multipart upload: %w", err) } - - // Sort the parts. - sort.Slice(mu.Parts, func(i, j int) bool { - return mu.Parts[i].PartNumber < mu.Parts[j].PartNumber - }) - // Find relevant parts. - var dbParts []dbMultipartPart - var size uint64 - j := 0 - for _, part := range parts { - for { - if j >= len(mu.Parts) { - // ran out of parts in the database - return api.ErrPartNotFound - } else if mu.Parts[j].PartNumber > part.PartNumber { - // missing part - return api.ErrPartNotFound - } else if mu.Parts[j].PartNumber == part.PartNumber && mu.Parts[j].Etag == strings.Trim(part.ETag, "\"") { - // found a match - dbParts = append(dbParts, mu.Parts[j]) - size += mu.Parts[j].Size - j++ - break - } else { - // try next - j++ - } - } - } - - // Fetch all the slices in the right order. - var slices []dbSlice - h := types.NewHasher() - for _, part := range dbParts { - var partSlices []dbSlice - err = tx.Model(&dbSlice{}). - Joins("INNER JOIN multipart_parts mp ON mp.id = slices.db_multipart_part_id AND mp.id = ?", part.ID). - Joins("INNER JOIN multipart_uploads mus ON mus.id = mp.db_multipart_upload_id"). - Find(&partSlices). - Error - if err != nil { - return fmt.Errorf("failed to fetch slices: %w", err) - } - slices = append(slices, partSlices...) - if _, err = h.E.Write([]byte(part.Etag)); err != nil { - return fmt.Errorf("failed to hash etag: %w", err) - } - } - - // Compute ETag. - sum := h.Sum() - eTag = hex.EncodeToString(sum[:]) - - // Create the object. - obj := dbObject{ - DBBucketID: mu.DBBucketID, - ObjectID: path, - Key: mu.Key, - Size: int64(size), - MimeType: mu.MimeType, - Etag: eTag, - } - if err := tx.Create(&obj).Error; err != nil { - return fmt.Errorf("failed to create object: %w", err) - } - - // Assign the right object id and unassign the multipart upload. Also - // set the right object_index to make sure the slices are sorted - // correctly when retrieving the object later. - for i := range slices { - err = tx.Model(&dbSlice{}). - Where("id", slices[i].ID). - Updates(map[string]interface{}{ - "db_object_id": obj.ID, - "object_index": uint(i + 1), - "db_multipart_part_id": nil, - }).Error - if err != nil { - return fmt.Errorf("failed to update slice %v: %w", i, err) - } - } - - // Create new metadata. - if len(opts.Metadata) > 0 { - err = s.createUserMetadata(tx, obj.ID, opts.Metadata) - if err != nil { - return fmt.Errorf("failed to create metadata: %w", err) - } - } - - // Update user metadata. - if err := tx. - Model(&dbObjectUserMetadata{}). - Where("db_multipart_upload_id = ?", mu.ID). - Updates(map[string]interface{}{ - "db_object_id": obj.ID, - "db_multipart_upload_id": nil, - }).Error; err != nil { - return fmt.Errorf("failed to update user metadata: %w", err) - } - - // Delete the multipart upload. - if err := tx.Delete(&mu).Error; err != nil { - return fmt.Errorf("failed to delete multipart upload: %w", err) - } - - // Prune the slabs. - s.triggerSlabPruning() return nil }) if err != nil { return api.MultipartCompleteResponse{}, err + } else if prune { + s.triggerSlabPruning() } return api.MultipartCompleteResponse{ ETag: eTag, }, nil } - -func (u dbMultipartUpload) convert() (api.MultipartUpload, error) { - var key object.EncryptionKey - if err := key.UnmarshalBinary(u.Key); err != nil { - return api.MultipartUpload{}, fmt.Errorf("failed to unmarshal key: %w", err) - } - return api.MultipartUpload{ - Bucket: u.DBBucket.Name, - Key: key, - Path: u.ObjectID, - UploadID: u.UploadID, - CreatedAt: api.TimeRFC3339(u.CreatedAt.UTC()), - }, nil -} diff --git a/stores/multipart_test.go b/stores/multipart_test.go index 4bd202a51..762ea45a9 100644 --- a/stores/multipart_test.go +++ b/stores/multipart_test.go @@ -85,7 +85,6 @@ func TestMultipartUploadWithUploadPackingRegression(t *testing.T) { } // Complete the upload. Check that the number of slices stays the same. - ts := time.Now() var nSlicesBefore int64 var nSlicesAfter int64 if err := ss.db.Model(&dbSlice{}).Count(&nSlicesBefore).Error; err != nil { @@ -98,8 +97,6 @@ func TestMultipartUploadWithUploadPackingRegression(t *testing.T) { t.Fatal(err) } else if nSlicesBefore != nSlicesAfter { t.Fatalf("expected number of slices to stay the same, but got %v before and %v after", nSlicesBefore, nSlicesAfter) - } else if err := ss.waitForPruneLoop(ts); err != nil { - t.Fatal(err) } // Fetch the object. @@ -269,3 +266,44 @@ func TestMultipartUploads(t *testing.T) { t.Fatal("expected 3 iterations") } } + +func TestMultipartUploadEmptyObjects(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + ss := newTestSQLStore(t, defaultTestSQLStoreConfig) + defer ss.Close() + + // create 2 multipart parts + resp1, err := ss.CreateMultipartUpload(context.Background(), api.DefaultBucketName, "/foo1", object.NoOpKey, testMimeType, testMetadata) + if err != nil { + t.Fatal(err) + } + resp2, err := ss.CreateMultipartUpload(context.Background(), api.DefaultBucketName, "/foo2", object.NoOpKey, testMimeType, testMetadata) + if err != nil { + t.Fatal(err) + } + + // complete uploads in reverse order + cmu1, err := ss.CompleteMultipartUpload(context.Background(), api.DefaultBucketName, "/foo2", resp2.UploadID, []api.MultipartCompletedPart{}, api.CompleteMultipartOptions{}) + if err != nil { + t.Fatal(err) + } + cmu2, err := ss.CompleteMultipartUpload(context.Background(), api.DefaultBucketName, "/foo1", resp1.UploadID, []api.MultipartCompletedPart{}, api.CompleteMultipartOptions{}) + if err != nil { + t.Fatal(err) + } + + foo1, err := ss.ObjectMetadata(context.Background(), api.DefaultBucketName, "/foo1") + if err != nil { + t.Fatal(err) + } else if foo1.ETag != cmu1.ETag { + t.Fatal("unexpected etag") + } + foo2, err := ss.ObjectMetadata(context.Background(), api.DefaultBucketName, "/foo2") + if err != nil { + t.Fatal(err) + } else if foo2.ETag != cmu2.ETag { + t.Fatal("unexpected etag") + } +} diff --git a/stores/settingsdb.go b/stores/settingsdb.go index f7ba1e82d..08d0d3faf 100644 --- a/stores/settingsdb.go +++ b/stores/settingsdb.go @@ -2,35 +2,26 @@ package stores import ( "context" - "errors" "fmt" - "go.sia.tech/renterd/api" - "gorm.io/gorm" - "gorm.io/gorm/clause" + sql "go.sia.tech/renterd/stores/sql" ) -type ( - dbSetting struct { - Model - - Key string `gorm:"unique;index;NOT NULL"` - Value setting `gorm:"NOT NULL"` - } -) - -// TableName implements the gorm.Tabler interface. -func (dbSetting) TableName() string { return "settings" } - // DeleteSetting implements the bus.SettingStore interface. func (s *SQLStore) DeleteSetting(ctx context.Context, key string) error { - // Delete from cache. s.settingsMu.Lock() - delete(s.settings, key) - s.settingsMu.Unlock() + defer s.settingsMu.Unlock() + + // delete from database first + if err := s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + return tx.DeleteSettings(ctx, key) + }); err != nil { + return err + } - // Delete from database. - return s.db.Where(&dbSetting{Key: key}).Delete(&dbSetting{}).Error + // delete from cache + delete(s.settings, key) + return nil } // Setting implements the bus.SettingStore interface. @@ -44,43 +35,41 @@ func (s *SQLStore) Setting(ctx context.Context, key string) (string, error) { } // Check database. - var entry dbSetting - err := s.db.Where(&dbSetting{Key: key}). - Take(&entry).Error - if errors.Is(err, gorm.ErrRecordNotFound) { - return "", fmt.Errorf("key '%s' err: %w", key, api.ErrSettingNotFound) - } else if err != nil { - return "", err + var err error + err = s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + value, err = tx.Setting(ctx, key) + return err + }) + if err != nil { + return "", fmt.Errorf("failed to fetch setting from db: %w", err) } - s.settings[key] = string(entry.Value) - return string(entry.Value), nil + s.settings[key] = value + return value, nil } // Settings implements the bus.SettingStore interface. -func (s *SQLStore) Settings(ctx context.Context) ([]string, error) { - var keys []string - tx := s.db.Model(&dbSetting{}).Select("Key").Find(&keys) - return keys, tx.Error +func (s *SQLStore) Settings(ctx context.Context) (settings []string, err error) { + err = s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + settings, err = tx.Settings(ctx) + return err + }) + return } // UpdateSetting implements the bus.SettingStore interface. func (s *SQLStore) UpdateSetting(ctx context.Context, key, value string) error { - // Update db first. + // update db first s.settingsMu.Lock() defer s.settingsMu.Unlock() - err := s.db.Clauses(clause.OnConflict{ - Columns: []clause.Column{{Name: "key"}}, - DoUpdates: clause.AssignmentColumns([]string{"value"}), - }).Create(&dbSetting{ - Key: key, - Value: setting(value), - }).Error + err := s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + return tx.UpdateSetting(ctx, key, value) + }) if err != nil { return err } - // Update cache second. + // update cache second s.settings[key] = value return nil } diff --git a/stores/slabbuffer.go b/stores/slabbuffer.go index 2d16c8e33..77e2574a3 100644 --- a/stores/slabbuffer.go +++ b/stores/slabbuffer.go @@ -16,7 +16,8 @@ import ( "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" "go.sia.tech/renterd/object" - "gorm.io/gorm" + sql "go.sia.tech/renterd/stores/sql" + "go.uber.org/zap" "lukechampine.com/frand" ) @@ -40,9 +41,11 @@ type SlabBuffer struct { type bufferGroupID [6]byte type SlabBufferManager struct { + alerts alerts.Alerter bufferedSlabCompletionThreshold int64 + db sql.Database dir string - s *SQLStore + logger *zap.SugaredLogger mu sync.Mutex completeBuffers map[bufferGroupID][]*SlabBuffer @@ -50,84 +53,67 @@ type SlabBufferManager struct { buffersByKey map[string]*SlabBuffer } -func newSlabBufferManager(sqlStore *SQLStore, slabBufferCompletionThreshold int64, partialSlabDir string) (*SlabBufferManager, error) { +func newSlabBufferManager(ctx context.Context, a alerts.Alerter, db sql.Database, logger *zap.SugaredLogger, slabBufferCompletionThreshold int64, partialSlabDir string) (*SlabBufferManager, error) { if slabBufferCompletionThreshold < 0 || slabBufferCompletionThreshold > 1<<22 { return nil, fmt.Errorf("invalid slabBufferCompletionThreshold %v", slabBufferCompletionThreshold) } // load existing buffers - var buffers []dbBufferedSlab - err := sqlStore.db. - Joins("DBSlab"). - Find(&buffers). - Error + buffers, orphans, err := db.LoadSlabBuffers(ctx) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to load slab buffers: %w", err) } + mgr := &SlabBufferManager{ + alerts: a, bufferedSlabCompletionThreshold: slabBufferCompletionThreshold, + db: db, dir: partialSlabDir, - s: sqlStore, - completeBuffers: make(map[bufferGroupID][]*SlabBuffer), - incompleteBuffers: make(map[bufferGroupID][]*SlabBuffer), - buffersByKey: make(map[string]*SlabBuffer), + logger: logger, + + completeBuffers: make(map[bufferGroupID][]*SlabBuffer), + incompleteBuffers: make(map[bufferGroupID][]*SlabBuffer), + buffersByKey: make(map[string]*SlabBuffer), } - for _, buffer := range buffers { - if buffer.DBSlab.ID == 0 { - // Buffer doesn't have a slab. We can delete it. - sqlStore.logger.Warn(fmt.Sprintf("buffer %v has no associated slab, deleting it", buffer.Filename)) - if err := sqlStore.db.Delete(&buffer).Error; err != nil { - return nil, fmt.Errorf("failed to delete buffer %v: %v", buffer.ID, err) - } - if err := os.RemoveAll(filepath.Join(partialSlabDir, buffer.Filename)); err != nil { - return nil, fmt.Errorf("failed to remove buffer file %v: %v", buffer.Filename, err) - } - continue - } - // Get the encryption key. - var ec object.EncryptionKey - if err := ec.UnmarshalBinary(buffer.DBSlab.Key); err != nil { - return nil, err + + for _, orphan := range orphans { + // Buffer doesn't have a slab. We can delete it. + logger.Warn(fmt.Sprintf("buffer '%v' has no associated slab, deleting it", orphan)) + if err := os.RemoveAll(filepath.Join(partialSlabDir, orphan)); err != nil { + return nil, fmt.Errorf("failed to remove buffer file %v: %v", orphan, err) } + } + + for _, buffer := range buffers { // Open the file. file, err := os.OpenFile(filepath.Join(partialSlabDir, buffer.Filename), os.O_RDWR, 0600) if err != nil { - _ = sqlStore.alerts.RegisterAlert(sqlStore.shutdownCtx, alerts.Alert{ + _ = a.RegisterAlert(ctx, alerts.Alert{ ID: types.HashBytes([]byte(buffer.Filename)), Severity: alerts.SeverityCritical, Message: "failed to read buffer file on startup", Data: map[string]interface{}{ "filename": buffer.Filename, - "slabKey": ec, + "slabKey": buffer.Key, }, Timestamp: time.Now(), }) - sqlStore.logger.Errorf("failed to open buffer file %v for slab %v: %v", buffer.Filename, buffer.DBSlab.Key, err) + logger.Errorf("failed to open buffer file %v for slab %v: %v", buffer.Filename, buffer.Key, err) continue } - // Get the size of the buffer by looking at all slices using it - var size int64 - err = sqlStore.db.Model(&dbSlab{}). - Joins("INNER JOIN slices sli ON slabs.id = sli.db_slab_id"). - Select("COALESCE(MAX(offset+length), 0) as Size"). - Where("slabs.db_buffered_slab_id = ?", buffer.ID). - Scan(&size). - Error - if err != nil { - return nil, err - } + // Create the slab buffer. sb := &SlabBuffer{ - dbID: buffer.ID, + dbID: uint(buffer.ID), filename: buffer.Filename, - slabKey: ec, - maxSize: int64(bufferedSlabSize(buffer.DBSlab.MinShards)), + slabKey: buffer.Key, + maxSize: int64(bufferedSlabSize(buffer.MinShards)), file: file, - size: size, + size: buffer.Size, } // Add the buffer to the manager. - gid := bufferGID(buffer.DBSlab.MinShards, buffer.DBSlab.TotalShards, uint32(buffer.DBSlab.DBContractSetID)) - if size >= int64(sb.maxSize-slabBufferCompletionThreshold) { + gid := bufferGID(buffer.MinShards, buffer.TotalShards, uint32(buffer.ContractSetID)) + if sb.size >= int64(sb.maxSize-slabBufferCompletionThreshold) { mgr.completeBuffers[gid] = append(mgr.completeBuffers[gid], sb) } else { mgr.incompleteBuffers[gid] = append(mgr.incompleteBuffers[gid], sb) @@ -204,8 +190,8 @@ func (mgr *SlabBufferManager) AddPartialSlab(ctx context.Context, data []byte, m // If there is still data left, create a new buffer. if len(data) > 0 { var sb *SlabBuffer - err = mgr.s.retryTransaction(ctx, func(tx *gorm.DB) error { - sb, err = createSlabBuffer(tx, contractSet, mgr.dir, minShards, totalShards) + err = mgr.db.Transaction(ctx, func(tx sql.DatabaseTx) error { + sb, err = createSlabBuffer(ctx, tx, contractSet, mgr.dir, minShards, totalShards) return err }) if err != nil { @@ -306,8 +292,10 @@ func (mgr *SlabBufferManager) SlabBuffers() (sbs []api.SlabBuffer) { } func (mgr *SlabBufferManager) SlabsForUpload(ctx context.Context, lockingDuration time.Duration, minShards, totalShards uint8, set uint, limit int) (slabs []api.PackedSlab, _ error) { + // Deep copy complete buffers. We don't want to block the manager while we + // perform disk I/O. mgr.mu.Lock() - buffers := mgr.completeBuffers[bufferGID(minShards, totalShards, uint32(set))] + buffers := append([]*SlabBuffer{}, mgr.completeBuffers[bufferGID(minShards, totalShards, uint32(set))]...) mgr.mu.Unlock() for _, buffer := range buffers { @@ -317,7 +305,7 @@ func (mgr *SlabBufferManager) SlabsForUpload(ctx context.Context, lockingDuratio data := make([]byte, buffer.size) _, err := buffer.file.ReadAt(data, 0) if err != nil { - mgr.s.alerts.RegisterAlert(ctx, alerts.Alert{ + mgr.alerts.RegisterAlert(ctx, alerts.Alert{ ID: types.HashBytes([]byte(buffer.filename)), Severity: alerts.SeverityCritical, Message: "failed to read data from buffer", @@ -327,7 +315,7 @@ func (mgr *SlabBufferManager) SlabsForUpload(ctx context.Context, lockingDuratio }, Timestamp: time.Now(), }) - mgr.s.logger.Error(ctx, fmt.Sprintf("failed to read buffer %v: %s", buffer.filename, err)) + mgr.logger.Error(ctx, fmt.Sprintf("failed to read buffer %v: %s", buffer.filename, err)) return nil, err } slabs = append(slabs, api.PackedSlab{ @@ -359,9 +347,9 @@ func (mgr *SlabBufferManager) RemoveBuffers(fileNames ...string) { // an error because the buffers are not meant to be used anymore // anyway. if err := buffers[i].file.Close(); err != nil { - mgr.s.logger.Errorf("failed to close buffer %v: %v", buffers[i].filename, err) + mgr.logger.Errorf("failed to close buffer %v: %v", buffers[i].filename, err) } else if err := os.RemoveAll(filepath.Join(mgr.dir, buffers[i].filename)); err != nil { - mgr.s.logger.Errorf("failed to remove buffer %v: %v", buffers[i].filename, err) + mgr.logger.Errorf("failed to remove buffer %v: %v", buffers[i].filename, err) } delete(mgr.buffersByKey, buffers[i].slabKey.String()) buffers[i] = buffers[len(buffers)-1] @@ -470,31 +458,21 @@ func bufferedSlabSize(minShards uint8) int { return int(rhpv2.SectorSize) * int(minShards) } -func createSlabBuffer(tx *gorm.DB, contractSetID uint, dir string, minShards, totalShards uint8) (*SlabBuffer, error) { - ec := object.GenerateEncryptionKey() - key, err := ec.MarshalBinary() - if err != nil { - return nil, err - } +func createSlabBuffer(ctx context.Context, tx sql.DatabaseTx, contractSetID uint, dir string, minShards, totalShards uint8) (*SlabBuffer, error) { // Create a new buffer and slab. fileName := bufferFilename(contractSetID, minShards, totalShards) file, err := os.Create(filepath.Join(dir, fileName)) if err != nil { return nil, err } - createdSlab := dbBufferedSlab{ - DBSlab: dbSlab{ - DBContractSetID: contractSetID, - Key: key, - MinShards: minShards, - TotalShards: totalShards, - }, - Filename: fileName, + + ec := object.GenerateEncryptionKey() + bufferedSlabID, err := tx.InsertBufferedSlab(ctx, fileName, int64(contractSetID), ec, minShards, totalShards) + if err != nil { + return nil, fmt.Errorf("failed to insert buffered slab: %w", err) } - err = tx.Create(&createdSlab). - Error return &SlabBuffer{ - dbID: createdSlab.ID, + dbID: uint(bufferedSlabID), filename: fileName, slabKey: ec, maxSize: int64(bufferedSlabSize(minShards)), diff --git a/stores/slabbuffer_test.go b/stores/slabbuffer_test.go index bb0a7601d..4425fcff1 100644 --- a/stores/slabbuffer_test.go +++ b/stores/slabbuffer_test.go @@ -13,7 +13,7 @@ func TestRecordAppendToCompletedBuffer(t *testing.T) { defer ss.Close() completionThreshold := int64(1000) - mgr, err := newSlabBufferManager(ss.SQLStore, completionThreshold, t.TempDir()) + mgr, err := newSlabBufferManager(context.Background(), ss.alerts, ss.bMain, ss.logger, completionThreshold, t.TempDir()) if err != nil { t.Fatal(err) } @@ -66,7 +66,7 @@ func TestMarkBufferCompleteTwice(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() - mgr, err := newSlabBufferManager(ss.SQLStore, 0, t.TempDir()) + mgr, err := newSlabBufferManager(context.Background(), ss.alerts, ss.bMain, ss.logger, 0, t.TempDir()) if err != nil { t.Fatal(err) } diff --git a/stores/sql.go b/stores/sql.go index c46dc7fe3..8cc86be62 100644 --- a/stores/sql.go +++ b/stores/sql.go @@ -2,9 +2,9 @@ package stores import ( "context" - "embed" "errors" "fmt" + "math" "os" "strings" "sync" @@ -13,28 +13,17 @@ import ( "go.sia.tech/core/types" "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/stores/sql" + "go.sia.tech/renterd/stores/sql/mysql" + "go.sia.tech/renterd/stores/sql/sqlite" "go.sia.tech/siad/modules" "go.uber.org/zap" - "gorm.io/driver/mysql" - "gorm.io/driver/sqlite" + gmysql "gorm.io/driver/mysql" + gsqlite "gorm.io/driver/sqlite" "gorm.io/gorm" glogger "gorm.io/gorm/logger" ) -const ( - // maxSQLVars is the maximum number of variables in an sql query. This - // number matches the sqlite default of 32766 rounded down to the nearest - // 1000. This is also lower than the mysql default of 65535. - maxSQLVars = 32000 -) - -//go:embed all:migrations/* -var migrations embed.FS - -var ( - exprTRUE = gorm.Expr("TRUE") -) - type ( // Model defines the common fields of every table. Same as Model // but excludes soft deletion since it breaks cascading deletes. @@ -46,7 +35,7 @@ type ( // Config contains all params for creating a SQLStore Config struct { Conn gorm.Dialector - ConnMetrics gorm.Dialector + DBMetrics sql.MetricsDatabase Alerts alerts.Alerter PartialSlabDir string Migrate bool @@ -57,14 +46,17 @@ type ( Logger *zap.SugaredLogger GormLogger glogger.Interface RetryTransactionIntervals []time.Duration + LongQueryDuration time.Duration + LongTxDuration time.Duration } // SQLStore is a helper type for interacting with a SQL-based backend. SQLStore struct { - alerts alerts.Alerter - db *gorm.DB - dbMetrics *gorm.DB - logger *zap.SugaredLogger + alerts alerts.Alerter + db *gorm.DB + bMain sql.Database + bMetrics sql.MetricsDatabase + logger *zap.SugaredLogger slabBufferMgr *SlabBufferManager @@ -104,8 +96,6 @@ type ( wg sync.WaitGroup mu sync.Mutex - allowListCnt uint64 - blockListCnt uint64 lastPrunedAt time.Time closed bool @@ -128,7 +118,7 @@ type ( // cache: set to shared which is required for in-memory databases // _foreign_keys: enforce foreign_key relations func NewEphemeralSQLiteConnection(name string) gorm.Dialector { - return sqlite.Open(fmt.Sprintf("file:%s?mode=memory&cache=shared&_foreign_keys=1", name)) + return gsqlite.Open(fmt.Sprintf("file:%s?mode=memory&cache=shared&_foreign_keys=1", name)) } // NewSQLiteConnection opens a sqlite db at the given path. @@ -141,27 +131,19 @@ func NewEphemeralSQLiteConnection(name string) gorm.Dialector { // should be made configurable and set to TRUNCATE or any of the other options. // For reference see https://github.com/mattn/go-sqlite3#connection-string. func NewSQLiteConnection(path string) gorm.Dialector { - return sqlite.Open(fmt.Sprintf("file:%s?_busy_timeout=30000&_foreign_keys=1&_journal_mode=WAL", path)) + return gsqlite.Open(fmt.Sprintf("file:%s?_busy_timeout=30000&_foreign_keys=1&_journal_mode=WAL&_secure_delete=false&_cache_size=65536", path)) } // NewMetricsSQLiteConnection opens a sqlite db at the given path similarly to // NewSQLiteConnection but with weaker consistency guarantees since it's // optimised for recording metrics. func NewMetricsSQLiteConnection(path string) gorm.Dialector { - return sqlite.Open(fmt.Sprintf("file:%s?_busy_timeout=30000&_foreign_keys=1&_journal_mode=WAL&_synchronous=NORMAL", path)) + return gsqlite.Open(fmt.Sprintf("file:%s?_busy_timeout=30000&_foreign_keys=1&_journal_mode=WAL&_synchronous=NORMAL", path)) } // NewMySQLConnection creates a connection to a MySQL database. func NewMySQLConnection(user, password, addr, dbName string) gorm.Dialector { - return mysql.Open(fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8mb4&parseTime=True&loc=Local&multiStatements=true", user, password, addr, dbName)) -} - -func DBConfigFromEnv() (uri, user, password, dbName string) { - uri = os.Getenv("RENTERD_DB_URI") - user = os.Getenv("RENTERD_DB_USER") - password = os.Getenv("RENTERD_DB_PASSWORD") - dbName = os.Getenv("RENTERD_DB_NAME") - return + return gmysql.Open(fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8mb4&parseTime=True&loc=Local&multiStatements=true", user, password, addr, dbName)) } // NewSQLStore uses a given Dialector to connect to a SQL database. NOTE: Only @@ -184,24 +166,29 @@ func NewSQLStore(cfg Config) (*SQLStore, modules.ConsensusChangeID, error) { if err != nil { return nil, modules.ConsensusChangeID{}, fmt.Errorf("failed to open SQL db") } - dbMetrics, err := gorm.Open(cfg.ConnMetrics, &gorm.Config{ - Logger: cfg.GormLogger, // custom logger - }) + l := cfg.Logger.Named("sql") + + sqlDB, err := db.DB() if err != nil { - return nil, modules.ConsensusChangeID{}, fmt.Errorf("failed to open metrics db") + return nil, modules.ConsensusChangeID{}, fmt.Errorf("failed to fetch db: %v", err) } - l := cfg.Logger.Named("sql") - // Print SQLite version - var dbName string - var dbVersion string - if isSQLite(db) { - err = db.Raw("select sqlite_version()").Scan(&dbVersion).Error - dbName = "SQLite" + // Print DB version + var dbMain sql.Database + dbMetrics := cfg.DBMetrics + var mainErr error + if cfg.Conn.Name() == "sqlite" { + dbMain, mainErr = sqlite.NewMainDatabase(sqlDB, l, cfg.LongQueryDuration, cfg.LongTxDuration) } else { - err = db.Raw("select version()").Scan(&dbVersion).Error - dbName = "MySQL" + dbMain, mainErr = mysql.NewMainDatabase(sqlDB, l, cfg.LongQueryDuration, cfg.LongTxDuration) + } + if mainErr != nil { + return nil, modules.ConsensusChangeID{}, fmt.Errorf("failed to create main database: %v", mainErr) } + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + dbName, dbVersion, err := dbMain.Version(ctx) if err != nil { return nil, modules.ConsensusChangeID{}, fmt.Errorf("failed to fetch db version: %v", err) } @@ -209,26 +196,15 @@ func NewSQLStore(cfg Config) (*SQLStore, modules.ConsensusChangeID, error) { // Perform migrations. if cfg.Migrate { - if err := performMigrations(db, l); err != nil { + if err := dbMain.Migrate(context.Background()); err != nil { return nil, modules.ConsensusChangeID{}, fmt.Errorf("failed to perform migrations: %v", err) - } - if err := performMetricsMigrations(dbMetrics, l); err != nil { + } else if err := dbMetrics.Migrate(context.Background()); err != nil { return nil, modules.ConsensusChangeID{}, fmt.Errorf("failed to perform migrations for metrics db: %v", err) } } // Get latest consensus change ID or init db. - ci, ccid, err := initConsensusInfo(db) - if err != nil { - return nil, modules.ConsensusChangeID{}, err - } - - // Check allowlist and blocklist counts - allowlistCnt, err := tableCount(db, &dbAllowlistEntry{}) - if err != nil { - return nil, modules.ConsensusChangeID{}, err - } - blocklistCnt, err := tableCount(db, &dbBlocklistEntry{}) + ci, ccid, err := initConsensusInfo(ctx, dbMain) if err != nil { return nil, modules.ConsensusChangeID{}, err } @@ -253,14 +229,14 @@ func NewSQLStore(cfg Config) (*SQLStore, modules.ConsensusChangeID, error) { shutdownCtx, shutdownCtxCancel := context.WithCancel(context.Background()) ss := &SQLStore{ alerts: cfg.Alerts, + ccid: ccid, db: db, - dbMetrics: dbMetrics, + bMain: dbMain, + bMetrics: dbMetrics, logger: l, knownContracts: isOurContract, lastSave: time.Now(), persistInterval: cfg.PersistInterval, - allowListCnt: uint64(allowlistCnt), - blockListCnt: uint64(blocklistCnt), settings: make(map[string]string), slabPruneSigChan: make(chan struct{}, 1), unappliedContractState: make(map[types.FileContractID]contractState), @@ -273,7 +249,7 @@ func NewSQLStore(cfg Config) (*SQLStore, modules.ConsensusChangeID, error) { walletAddress: cfg.WalletAddress, chainIndex: types.ChainIndex{ Height: ci.Height, - ID: types.BlockID(ci.BlockID), + ID: types.BlockID(ci.ID), }, lastPrunedAt: time.Now(), @@ -283,7 +259,7 @@ func NewSQLStore(cfg Config) (*SQLStore, modules.ConsensusChangeID, error) { shutdownCtxCancel: shutdownCtxCancel, } - ss.slabBufferMgr, err = newSlabBufferManager(ss, cfg.SlabBufferCompletionThreshold, cfg.PartialSlabDir) + ss.slabBufferMgr, err = newSlabBufferManager(shutdownCtx, cfg.Alerts, dbMain, l.Named("slabbuffers"), cfg.SlabBufferCompletionThreshold, cfg.PartialSlabDir) if err != nil { return nil, modules.ConsensusChangeID{}, err } @@ -295,21 +271,15 @@ func NewSQLStore(cfg Config) (*SQLStore, modules.ConsensusChangeID, error) { func isSQLite(db *gorm.DB) bool { switch db.Dialector.(type) { - case *sqlite.Dialector: + case *gsqlite.Dialector: return true - case *mysql.Dialector: + case *gmysql.Dialector: return false default: panic(fmt.Sprintf("unknown dialector: %t", db.Dialector)) } } -func (ss *SQLStore) hasAllowlist() bool { - ss.mu.Lock() - defer ss.mu.Unlock() - return ss.allowListCnt > 0 -} - func (s *SQLStore) initSlabPruning() error { // start pruning loop s.wg.Add(1) @@ -319,50 +289,10 @@ func (s *SQLStore) initSlabPruning() error { }() // prune once to guarantee consistency on startup - return s.retryTransaction(s.shutdownCtx, pruneSlabs) -} - -func (ss *SQLStore) updateHasAllowlist(err *error) { - if *err != nil { - return - } - - cnt, cErr := tableCount(ss.db, &dbAllowlistEntry{}) - if cErr != nil { - *err = cErr - return - } - - ss.mu.Lock() - ss.allowListCnt = uint64(cnt) - ss.mu.Unlock() -} - -func (ss *SQLStore) hasBlocklist() bool { - ss.mu.Lock() - defer ss.mu.Unlock() - return ss.blockListCnt > 0 -} - -func (ss *SQLStore) updateHasBlocklist(err *error) { - if *err != nil { - return - } - - cnt, cErr := tableCount(ss.db, &dbBlocklistEntry{}) - if cErr != nil { - *err = cErr - return - } - - ss.mu.Lock() - ss.blockListCnt = uint64(cnt) - ss.mu.Unlock() -} - -func tableCount(db *gorm.DB, model interface{}) (cnt int64, err error) { - err = db.Model(model).Count(&cnt).Error - return + return s.bMain.Transaction(s.shutdownCtx, func(tx sql.DatabaseTx) error { + _, err := tx.PruneSlabs(s.shutdownCtx, math.MaxInt64) + return err + }) } // Close closes the underlying database connection of the store. @@ -370,20 +300,11 @@ func (s *SQLStore) Close() error { s.shutdownCtxCancel() s.wg.Wait() - db, err := s.db.DB() + err := s.bMain.Close() if err != nil { return err } - dbMetrics, err := s.dbMetrics.DB() - if err != nil { - return err - } - - err = db.Close() - if err != nil { - return err - } - err = dbMetrics.Close() + err = s.bMetrics.Close() if err != nil { return err } @@ -546,9 +467,8 @@ func (s *SQLStore) retryTransaction(ctx context.Context, fc func(tx *gorm.DB) er abortRetry := func(err error) bool { if err == nil || errors.Is(err, context.Canceled) || + errors.Is(err, context.DeadlineExceeded) || errors.Is(err, gorm.ErrRecordNotFound) || - errors.Is(err, errInvalidNumberOfShards) || - errors.Is(err, errShardRootChanged) || errors.Is(err, api.ErrContractNotFound) || errors.Is(err, api.ErrObjectNotFound) || errors.Is(err, api.ErrObjectCorrupted) || @@ -590,55 +510,28 @@ func (s *SQLStore) retryTransaction(ctx context.Context, fc func(tx *gorm.DB) er return fmt.Errorf("retryTransaction failed: %w", err) } -func initConsensusInfo(db *gorm.DB) (dbConsensusInfo, modules.ConsensusChangeID, error) { - var ci dbConsensusInfo - if err := db. - Where(&dbConsensusInfo{Model: Model{ID: consensusInfoID}}). - Attrs(dbConsensusInfo{ - Model: Model{ID: consensusInfoID}, - CCID: modules.ConsensusChangeBeginning[:], - }). - FirstOrCreate(&ci). - Error; err != nil { - return dbConsensusInfo{}, modules.ConsensusChangeID{}, err - } - var ccid modules.ConsensusChangeID - copy(ccid[:], ci.CCID) - return ci, ccid, nil +func initConsensusInfo(ctx context.Context, db sql.Database) (ci types.ChainIndex, ccid modules.ConsensusChangeID, err error) { + err = db.Transaction(ctx, func(tx sql.DatabaseTx) error { + ci, ccid, err = tx.InitConsensusInfo(ctx) + return err + }) + return } func (s *SQLStore) ResetConsensusSubscription(ctx context.Context) error { - // empty tables and reinit consensus_infos - var ci dbConsensusInfo - err := s.retryTransaction(ctx, func(tx *gorm.DB) error { - if err := s.db.Exec("DELETE FROM consensus_infos").Error; err != nil { - return err - } else if err := s.db.Exec("DELETE FROM siacoin_elements").Error; err != nil { - return err - } else if err := s.db.Exec("DELETE FROM transactions").Error; err != nil { - return err - } else if ci, _, err = initConsensusInfo(tx); err != nil { - return err - } - return nil + // reset db + var ci types.ChainIndex + var err error + err = s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + ci, err = tx.ResetConsensusSubscription(ctx) + return err }) if err != nil { return err } // reset in-memory state. s.persistMu.Lock() - s.chainIndex = types.ChainIndex{ - Height: ci.Height, - ID: types.BlockID(ci.BlockID), - } + s.chainIndex = ci s.persistMu.Unlock() return nil } - -func sumDurations(durations []time.Duration) time.Duration { - var sum time.Duration - for _, d := range durations { - sum += d - } - return sum -} diff --git a/stores/sql/consts.go b/stores/sql/consts.go new file mode 100644 index 000000000..340935623 --- /dev/null +++ b/stores/sql/consts.go @@ -0,0 +1,52 @@ +package sql + +import ( + "strings" + + "go.sia.tech/renterd/api" +) + +type ContractState uint8 + +const ( + contractStateInvalid ContractState = iota + contractStatePending + contractStateActive + contractStateComplete + contractStateFailed +) + +func (s *ContractState) LoadString(state string) error { + switch strings.ToLower(state) { + case api.ContractStateInvalid: + *s = contractStateInvalid + case api.ContractStatePending: + *s = contractStatePending + case api.ContractStateActive: + *s = contractStateActive + case api.ContractStateComplete: + *s = contractStateComplete + case api.ContractStateFailed: + *s = contractStateFailed + default: + *s = contractStateInvalid + } + return nil +} + +func (s ContractState) String() string { + switch s { + case contractStateInvalid: + return api.ContractStateInvalid + case contractStatePending: + return api.ContractStatePending + case contractStateActive: + return api.ContractStateActive + case contractStateComplete: + return api.ContractStateComplete + case contractStateFailed: + return api.ContractStateFailed + default: + return api.ContractStateUnknown + } +} diff --git a/stores/sql/database.go b/stores/sql/database.go new file mode 100644 index 000000000..57f94efa4 --- /dev/null +++ b/stores/sql/database.go @@ -0,0 +1,372 @@ +package sql + +import ( + "context" + "io" + "time" + + "go.sia.tech/core/types" + "go.sia.tech/renterd/api" + "go.sia.tech/renterd/object" + "go.sia.tech/renterd/webhooks" + "go.sia.tech/siad/modules" +) + +// The database interfaces define all methods that a SQL database must implement +// to be used by the SQLStore. +type ( + Database interface { + io.Closer + + // LoadSlabBuffers loads the slab buffers from the database. + LoadSlabBuffers(ctx context.Context) ([]LoadedSlabBuffer, []string, error) + + // Migrate runs all missing migrations on the database. + Migrate(ctx context.Context) error + + // Transaction starts a new transaction. + Transaction(ctx context.Context, fn func(DatabaseTx) error) error + + // Version returns the database version and name. + Version(ctx context.Context) (string, string, error) + } + + DatabaseTx interface { + // AbortMultipartUpload aborts a multipart upload and deletes it from + // the database. + AbortMultipartUpload(ctx context.Context, bucket, path string, uploadID string) error + + // Accounts returns all accounts from the db. + Accounts(ctx context.Context) ([]api.Account, error) + + // AddMultipartPart adds a part to an unfinished multipart upload. + AddMultipartPart(ctx context.Context, bucket, path, contractSet, eTag, uploadID string, partNumber int, slices object.SlabSlices) error + + // AddWebhook adds a new webhook to the database. If the webhook already + // exists, it is updated. + AddWebhook(ctx context.Context, wh webhooks.Webhook) error + + // AncestorContracts returns all ancestor contracts of the contract up + // until the given start height. + AncestorContracts(ctx context.Context, id types.FileContractID, startHeight uint64) ([]api.ArchivedContract, error) + + // ArchiveContract moves a contract from the regular contracts to the + // archived ones. + ArchiveContract(ctx context.Context, fcid types.FileContractID, reason string) error + + // Autopilot returns the autopilot with the given ID. Returns + // api.ErrAutopilotNotFound if the autopilot doesn't exist. + Autopilot(ctx context.Context, id string) (api.Autopilot, error) + + // Autopilots returns all autopilots. + Autopilots(ctx context.Context) ([]api.Autopilot, error) + + // Bucket returns the bucket with the given name. If the bucket doesn't + // exist, it returns api.ErrBucketNotFound. + Bucket(ctx context.Context, bucket string) (api.Bucket, error) + + // CompleteMultipartUpload completes a multipart upload by combining the + // provided parts into an object in bucket 'bucket' with key 'key'. The + // parts need to be provided in ascending partNumber order without + // duplicates but can contain gaps. + CompleteMultipartUpload(ctx context.Context, bucket, key, uploadID string, parts []api.MultipartCompletedPart, opts api.CompleteMultipartOptions) (string, error) + + // ContractRoots returns the roots of the contract with the given ID. + ContractRoots(ctx context.Context, fcid types.FileContractID) ([]types.Hash256, error) + + // Contracts returns contract metadata for all active contracts. The + // opts argument can be used to filter the result. + Contracts(ctx context.Context, opts api.ContractsOpts) ([]api.ContractMetadata, error) + + // ContractSets returns the names of all contract sets. + ContractSets(ctx context.Context) ([]string, error) + + // ContractSize returns the size of the contract with the given ID as + // well as the estimated number of bytes that can be pruned from it. + ContractSize(ctx context.Context, id types.FileContractID) (api.ContractSize, error) + + // ContractSizes returns the sizes of all contracts in the database as + // well as the estimated number of bytes that can be pruned from them. + ContractSizes(ctx context.Context) (map[types.FileContractID]api.ContractSize, error) + + // CopyObject copies an object from one bucket and key to another. If + // source and destination are the same, only the metadata and mimeType + // are overwritten with the provided ones. + CopyObject(ctx context.Context, srcBucket, dstBucket, srcKey, dstKey, mimeType string, metadata api.ObjectUserMetadata) (api.ObjectMetadata, error) + + // CreateBucket creates a new bucket with the given name and policy. If + // the bucket already exists, api.ErrBucketExists is returned. + CreateBucket(ctx context.Context, bucket string, policy api.BucketPolicy) error + + // DeleteHostSector deletes all contract sector links that a host has + // with the given root incrementing the lost sector count in the + // process. If another contract with a different host exists that + // contains the root, latest_host is updated to that host. + DeleteHostSector(ctx context.Context, hk types.PublicKey, root types.Hash256) (int, error) + + // DeleteSettings deletes the settings with the given key. + DeleteSettings(ctx context.Context, key string) error + + // DeleteWebhook deletes the webhook with the matching module, event and + // URL of the provided webhook. If the webhook doesn't exist, + // webhooks.ErrWebhookNotFound is returned. + DeleteWebhook(ctx context.Context, wh webhooks.Webhook) error + + // InsertBufferedSlab inserts a buffered slab into the database. This + // includes the creation of a buffered slab as well as the corresponding + // regular slab it is linked to. It returns the ID of the buffered slab + // that was created. + InsertBufferedSlab(ctx context.Context, fileName string, contractSetID int64, ec object.EncryptionKey, minShards, totalShards uint8) (int64, error) + + // InsertMultipartUpload creates a new multipart upload and returns a + // unique upload ID. + InsertMultipartUpload(ctx context.Context, bucket, path string, ec object.EncryptionKey, mimeType string, metadata api.ObjectUserMetadata) (string, error) + + // InvalidateSlabHealthByFCID invalidates the health of all slabs that + // are associated with any of the provided contracts. + InvalidateSlabHealthByFCID(ctx context.Context, fcids []types.FileContractID, limit int64) (int64, error) + + // DeleteBucket deletes a bucket. If the bucket isn't empty, it returns + // api.ErrBucketNotEmpty. If the bucket doesn't exist, it returns + // api.ErrBucketNotFound. + DeleteBucket(ctx context.Context, bucket string) error + + // DeleteObject deletes an object from the database and returns true if + // the requested object was actually deleted. + DeleteObject(ctx context.Context, bucket, key string) (bool, error) + + // DeleteObjects deletes a batch of objects starting with the given + // prefix and returns 'true' if any object was deleted. + DeleteObjects(ctx context.Context, bucket, prefix string, limit int64) (bool, error) + + // HostAllowlist returns the list of public keys of hosts on the + // allowlist. + HostAllowlist(ctx context.Context) ([]types.PublicKey, error) + + // HostBlocklist returns the list of host addresses on the blocklist. + HostBlocklist(ctx context.Context) ([]string, error) + + // InitConsensusInfo initializes the consensus info in the database or + // returns the latest one. + InitConsensusInfo(ctx context.Context) (types.ChainIndex, modules.ConsensusChangeID, error) + + // InsertObject inserts a new object into the database. + InsertObject(ctx context.Context, bucket, key, contractSet string, dirID int64, o object.Object, mimeType, eTag string, md api.ObjectUserMetadata) error + + // HostsForScanning returns a list of hosts to scan which haven't been + // scanned since at least maxLastScan. + HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]api.HostAddress, error) + + // ListBuckets returns a list of all buckets in the database. + ListBuckets(ctx context.Context) ([]api.Bucket, error) + + // ListObjects returns a list of objects from the given bucket. + ListObjects(ctx context.Context, bucket, prefix, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) + + // MakeDirsForPath creates all directories for a given object's path. + MakeDirsForPath(ctx context.Context, path string) (int64, error) + + // MultipartUpload returns the multipart upload with the given ID or + // api.ErrMultipartUploadNotFound if the upload doesn't exist. + MultipartUpload(ctx context.Context, uploadID string) (api.MultipartUpload, error) + + // MultipartUploadParts returns a list of all parts for a given + // multipart upload + MultipartUploadParts(ctx context.Context, bucket, key, uploadID string, marker int, limit int64) (api.MultipartListPartsResponse, error) + + // MultipartUploads returns a list of all multipart uploads. + MultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker string, limit int) (api.MultipartListUploadsResponse, error) + + // ObjectsStats returns overall stats about stored objects + ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) (api.ObjectsStatsResponse, error) + + // PruneEmptydirs prunes any directories that are empty. + PruneEmptydirs(ctx context.Context) error + + // PruneSlabs deletes slabs that are no longer referenced by any slice + // or slab buffer. + PruneSlabs(ctx context.Context, limit int64) (int64, error) + + // RecordHostScans records the results of host scans in the database + // such as recording the settings and price table of a host in case of + // success and updating the uptime and downtime of a host. + // NOTE: The price table is only updated if the known price table is + // expired since price tables from scans are not paid for and are + // therefore only useful for gouging checks. + RecordHostScans(ctx context.Context, scans []api.HostScan) error + + // RecordPriceTables records price tables for hosts in the database + // increasing the successful/failed interactions accordingly. + RecordPriceTables(ctx context.Context, priceTableUpdate []api.HostPriceTableUpdate) error + + // RemoveContractSet removes the contract set with the given name from + // the database. + RemoveContractSet(ctx context.Context, contractSet string) error + + // RemoveOfflineHosts removes all hosts that have been offline for + // longer than maxDownTime and been scanned at least minRecentFailures + // times. The contracts of those hosts are also removed. + RemoveOfflineHosts(ctx context.Context, minRecentFailures uint64, maxDownTime time.Duration) (int64, error) + + // RenameObject renames an object in the database from keyOld to keyNew + // and the new directory dirID. returns api.ErrObjectExists if the an + // object already exists at the target location or api.ErrObjectNotFound + // if the object at keyOld doesn't exist. If force is true, the instead + // of returning api.ErrObjectExists, the existing object will be + // deleted. + RenameObject(ctx context.Context, bucket, keyOld, keyNew string, dirID int64, force bool) error + + // RenameObjects renames all objects in the database with the given + // prefix to the new prefix. If 'force' is true, it will overwrite any + // existing objects with the new prefix. If no object can be renamed, + // `api.ErrOBjectNotFound` is returned. If 'force' is false and an + // object already exists with the new prefix, `api.ErrObjectExists` is + // returned. + RenameObjects(ctx context.Context, bucket, prefixOld, prefixNew string, dirID int64, force bool) error + + // RenewedContract returns the metadata of the contract that was renewed + // fro mthe specified contract or ErrContractNotFound otherwise. + RenewedContract(ctx context.Context, renewedFrom types.FileContractID) (api.ContractMetadata, error) + + // ResetConsenusSubscription resets the consensus subscription in the + // database. + ResetConsensusSubscription(ctx context.Context) (types.ChainIndex, error) + + // ResetLostSectors resets the lost sector count for the given host. + ResetLostSectors(ctx context.Context, hk types.PublicKey) error + + // SaveAccounts saves the given accounts in the db, overwriting any + // existing ones and setting the clean shutdown flag. + SaveAccounts(ctx context.Context, accounts []api.Account) error + + // SearchHosts returns a list of hosts that match the provided filters + SearchHosts(ctx context.Context, autopilotID, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) + + // SetUncleanShutdown sets the clean shutdown flag on the accounts to + // 'false' and also marks them as requiring a resync. + SetUncleanShutdown(ctx context.Context) error + + // Setting returns the setting with the given key from the database. + Setting(ctx context.Context, key string) (string, error) + + // Settings returns all available settings from the database. + Settings(ctx context.Context) ([]string, error) + + // SlabBuffers returns the filenames and associated contract sets of all + // slab buffers. + SlabBuffers(ctx context.Context) (map[string]string, error) + + // UpdateAutopilot updates the autopilot with the provided one or + // creates a new one if it doesn't exist yet. + UpdateAutopilot(ctx context.Context, ap api.Autopilot) error + + // UpdateBucketPolicy updates the policy of the bucket with the provided + // one, fully overwriting the existing policy. + UpdateBucketPolicy(ctx context.Context, bucket string, policy api.BucketPolicy) error + + // UpdateHostAllowlistEntries updates the allowlist in the database + UpdateHostAllowlistEntries(ctx context.Context, add, remove []types.PublicKey, clear bool) error + + // UpdateHostBlocklistEntries updates the blocklist in the database + UpdateHostBlocklistEntries(ctx context.Context, add, remove []string, clear bool) error + + // UpdateHostCheck updates the host check for the given host. + UpdateHostCheck(ctx context.Context, autopilot string, hk types.PublicKey, hc api.HostCheck) error + + // UpdateSetting updates the setting with the given key to the given + // value. + UpdateSetting(ctx context.Context, key, value string) error + + // UpdateSlab updates the slab in the database. That includes the following: + // - Optimistically set health to 100% + // - Invalidate health_valid_until + // - Update LatestHost for every shard + // The operation is not allowed to update the number of shards + // associated with a slab or the root/slabIndex of any shard. + UpdateSlab(ctx context.Context, s object.Slab, contractSet string, usedContracts []types.FileContractID) error + + // UpdateSlabHealth updates the health of up to 'limit' slab in the + // database if their health is not valid anymore. A random interval + // between 'minValidity' and 'maxValidity' is used to determine the time + // the health of the updated slabs becomes invalid + UpdateSlabHealth(ctx context.Context, limit int64, minValidity, maxValidity time.Duration) (int64, error) + + // Webhooks returns all registered webhooks. + Webhooks(ctx context.Context) ([]webhooks.Webhook, error) + } + + MetricsDatabase interface { + io.Closer + + // Migrate runs all missing migrations on the database. + Migrate(ctx context.Context) error + + // Transaction starts a new transaction. + Transaction(ctx context.Context, fn func(MetricsDatabaseTx) error) error + + // Version returns the database version and name. + Version(ctx context.Context) (string, string, error) + } + + MetricsDatabaseTx interface { + // ContractMetrics returns contract metrics for the given time range + // and options. + ContractMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.ContractMetricsQueryOpts) ([]api.ContractMetric, error) + + // ContractPruneMetrics returns the contract prune metrics for the given + // time range and options. + ContractPruneMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.ContractPruneMetricsQueryOpts) ([]api.ContractPruneMetric, error) + + // ContractSetChurnMetrics returns the contract set churn metrics for + // the given time range and options. + ContractSetChurnMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.ContractSetChurnMetricsQueryOpts) ([]api.ContractSetChurnMetric, error) + + // ContractSetMetrics returns the contract set metrics for the given + // time range and options. + ContractSetMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.ContractSetMetricsQueryOpts) ([]api.ContractSetMetric, error) + + // PerformanceMetrics returns performance metrics for the given time range + PerformanceMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.PerformanceMetricsQueryOpts) ([]api.PerformanceMetric, error) + + // PruneMetrics deletes metrics of a certain type older than the given + // cutoff time. + PruneMetrics(ctx context.Context, metric string, cutoff time.Time) error + + // RecordContractMetric records contract metrics. + RecordContractMetric(ctx context.Context, metrics ...api.ContractMetric) error + + // RecordContractPruneMetric records contract prune metrics. + RecordContractPruneMetric(ctx context.Context, metrics ...api.ContractPruneMetric) error + + // RecordContractSetChurnMetric records contract set churn metrics. + RecordContractSetChurnMetric(ctx context.Context, metrics ...api.ContractSetChurnMetric) error + + // RecordContractSetMetric records contract set metrics. + RecordContractSetMetric(ctx context.Context, metrics ...api.ContractSetMetric) error + + // RecordPerformanceMetric records performance metrics. + RecordPerformanceMetric(ctx context.Context, metrics ...api.PerformanceMetric) error + + // RecordWalletMetric records wallet metrics. + RecordWalletMetric(ctx context.Context, metrics ...api.WalletMetric) error + + // WalletMetrics returns wallet metrics for the given time range + WalletMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.WalletMetricsQueryOpts) ([]api.WalletMetric, error) + } + + LoadedSlabBuffer struct { + ID int64 + ContractSetID int64 + Filename string + Key object.EncryptionKey + MinShards uint8 + Size int64 + TotalShards uint8 + } + + UsedContract struct { + ID int64 + FCID FileContractID + RenewedFrom FileContractID + } +) diff --git a/stores/sql/main.go b/stores/sql/main.go new file mode 100644 index 000000000..b5ee9670d --- /dev/null +++ b/stores/sql/main.go @@ -0,0 +1,1903 @@ +package sql + +import ( + "context" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "math" + "math/big" + "strings" + "time" + "unicode/utf8" + + dsql "database/sql" + + rhpv2 "go.sia.tech/core/rhp/v2" + "go.sia.tech/core/types" + "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/sql" + "go.sia.tech/renterd/object" + "go.sia.tech/renterd/webhooks" + "go.sia.tech/siad/modules" + "lukechampine.com/frand" +) + +const consensuInfoID = 1 + +var ErrNegativeOffset = errors.New("offset can not be negative") + +func AbortMultipartUpload(ctx context.Context, tx sql.Tx, bucket, key string, uploadID string) error { + res, err := tx.Exec(ctx, ` + DELETE + FROM multipart_uploads + WHERE object_id = ? AND upload_id = ? AND db_bucket_id = ( + SELECT id + FROM buckets + WHERE name = ? + )`, key, uploadID, bucket) + if err != nil { + return fmt.Errorf("failed to delete multipart upload: %w", err) + } else if n, err := res.RowsAffected(); err != nil { + return fmt.Errorf("failed to fetch rows affected: %w", err) + } else if n > 0 { + return nil + } + + // find out why the upload wasn't deleted + var muKey, bucketName string + err = tx.QueryRow(ctx, "SELECT object_id, b.name FROM multipart_uploads mu INNER JOIN buckets b ON mu.db_bucket_id = b.id WHERE upload_id = ?", uploadID). + Scan(&muKey, &bucketName) + if errors.Is(err, dsql.ErrNoRows) { + return api.ErrMultipartUploadNotFound + } else if err != nil { + return fmt.Errorf("failed to fetch multipart upload: %w", err) + } else if muKey != key { + return fmt.Errorf("object id mismatch: %v != %v: %w", muKey, key, api.ErrObjectNotFound) + } else if bucketName != bucket { + return fmt.Errorf("bucket name mismatch: %v != %v: %w", bucketName, bucket, api.ErrBucketNotFound) + } + return errors.New("failed to delete multipart upload for unknown reason") +} + +func Accounts(ctx context.Context, tx sql.Tx) ([]api.Account, error) { + rows, err := tx.Query(ctx, "SELECT account_id, clean_shutdown, host, balance, drift, requires_sync FROM ephemeral_accounts") + if err != nil { + return nil, fmt.Errorf("failed to fetch accounts: %w", err) + } + defer rows.Close() + + var accounts []api.Account + for rows.Next() { + a := api.Account{Balance: new(big.Int), Drift: new(big.Int)} // init big.Int + if err := rows.Scan((*PublicKey)(&a.ID), &a.CleanShutdown, (*PublicKey)(&a.HostKey), (*BigInt)(a.Balance), (*BigInt)(a.Drift), &a.RequiresSync); err != nil { + return nil, fmt.Errorf("failed to scan account: %w", err) + } + accounts = append(accounts, a) + } + return accounts, nil +} + +func AncestorContracts(ctx context.Context, tx sql.Tx, fcid types.FileContractID, startHeight uint64) ([]api.ArchivedContract, error) { + rows, err := tx.Query(ctx, ` + WITH RECURSIVE ancestors AS + ( + SELECT * + FROM archived_contracts + WHERE renewed_to = ? + UNION ALL + SELECT archived_contracts.* + FROM ancestors, archived_contracts + WHERE archived_contracts.renewed_to = ancestors.fcid + ) + SELECT fcid, host, renewed_to, upload_spending, download_spending, fund_account_spending, delete_spending, + proof_height, revision_height, revision_number, size, start_height, state, window_start, window_end + FROM ancestors + WHERE start_height >= ? + `, FileContractID(fcid), startHeight) + if err != nil { + return nil, fmt.Errorf("failed to fetch ancestor contracts: %w", err) + } + defer rows.Close() + + var contracts []api.ArchivedContract + for rows.Next() { + var c api.ArchivedContract + var state ContractState + err := rows.Scan((*FileContractID)(&c.ID), (*PublicKey)(&c.HostKey), (*FileContractID)(&c.RenewedTo), + (*Currency)(&c.Spending.Uploads), (*Currency)(&c.Spending.Downloads), (*Currency)(&c.Spending.FundAccount), + (*Currency)(&c.Spending.Deletions), &c.ProofHeight, + &c.RevisionHeight, &c.RevisionNumber, &c.Size, &c.StartHeight, &state, &c.WindowStart, + &c.WindowEnd) + if err != nil { + return nil, fmt.Errorf("failed to scan contract: %w", err) + } + c.State = state.String() + contracts = append(contracts, c) + } + return contracts, nil +} + +func ArchiveContract(ctx context.Context, tx sql.Tx, fcid types.FileContractID, reason string) error { + _, err := tx.Exec(ctx, ` + INSERT INTO archived_contracts (created_at, fcid, renewed_from, contract_price, state, total_cost, + proof_height, revision_height, revision_number, size, start_height, window_start, window_end, + upload_spending, download_spending, fund_account_spending, delete_spending, list_spending, renewed_to, + host, reason) + SELECT ?, fcid, renewed_from, contract_price, state, total_cost, proof_height, revision_height, revision_number, + size, start_height, window_start, window_end, upload_spending, download_spending, fund_account_spending, + delete_spending, list_spending, NULL, h.public_key, ? + FROM contracts c + INNER JOIN hosts h ON h.id = c.host_id + WHERE fcid = ? + `, time.Now(), reason, FileContractID(fcid)) + if err != nil { + return fmt.Errorf("failed to copy contract to archived_contracts: %w", err) + } + res, err := tx.Exec(ctx, "DELETE FROM contracts WHERE fcid = ?", FileContractID(fcid)) + if err != nil { + return fmt.Errorf("failed to delete contract from contracts: %w", err) + } else if n, err := res.RowsAffected(); err != nil { + return fmt.Errorf("failed to fetch rows affected: %w", err) + } else if n == 0 { + return fmt.Errorf("expected to delete 1 row, deleted %d", n) + } + return nil +} + +func Autopilot(ctx context.Context, tx sql.Tx, id string) (api.Autopilot, error) { + row := tx.QueryRow(ctx, "SELECT identifier, config, current_period FROM autopilots WHERE identifier = ?", id) + ap, err := scanAutopilot(row) + if errors.Is(err, dsql.ErrNoRows) { + return api.Autopilot{}, api.ErrAutopilotNotFound + } else if err != nil { + return api.Autopilot{}, fmt.Errorf("failed to fetch autopilot: %w", err) + } + return ap, nil +} + +func Autopilots(ctx context.Context, tx sql.Tx) ([]api.Autopilot, error) { + rows, err := tx.Query(ctx, "SELECT identifier, config, current_period FROM autopilots") + if err != nil { + return nil, fmt.Errorf("failed to fetch autopilots: %w", err) + } + defer rows.Close() + + var autopilots []api.Autopilot + for rows.Next() { + ap, err := scanAutopilot(rows) + if err != nil { + return nil, fmt.Errorf("failed to scan autopilot: %w", err) + } + autopilots = append(autopilots, ap) + } + return autopilots, nil +} + +func Bucket(ctx context.Context, tx sql.Tx, bucket string) (api.Bucket, error) { + b, err := scanBucket(tx.QueryRow(ctx, "SELECT created_at, name, COALESCE(policy, '{}') FROM buckets WHERE name = ?", bucket)) + if err != nil { + return api.Bucket{}, fmt.Errorf("failed to fetch bucket: %w", err) + } + return b, nil +} + +func ContractRoots(ctx context.Context, tx sql.Tx, fcid types.FileContractID) ([]types.Hash256, error) { + rows, err := tx.Query(ctx, ` + SELECT s.root + FROM contract_sectors cs + INNER JOIN sectors s ON s.id = cs.db_sector_id + INNER JOIN contracts c ON c.id = cs.db_contract_id + WHERE c.fcid = ? + `, FileContractID(fcid)) + if err != nil { + return nil, fmt.Errorf("failed to fetch contract roots: %w", err) + } + defer rows.Close() + + var roots []types.Hash256 + for rows.Next() { + var root types.Hash256 + if err := rows.Scan((*Hash256)(&root)); err != nil { + return nil, fmt.Errorf("failed to scan root: %w", err) + } + roots = append(roots, root) + } + return roots, nil +} + +func Contracts(ctx context.Context, tx sql.Tx, opts api.ContractsOpts) ([]api.ContractMetadata, error) { + var whereExprs []string + var whereArgs []any + if opts.ContractSet != "" { + var contractSetID int64 + err := tx.QueryRow(ctx, "SELECT id FROM contract_sets WHERE contract_sets.name = ?", opts.ContractSet). + Scan(&contractSetID) + if errors.Is(err, dsql.ErrNoRows) { + return nil, api.ErrContractSetNotFound + } + whereExprs = append(whereExprs, "cs.id = ?") + whereArgs = append(whereArgs, contractSetID) + } + return QueryContracts(ctx, tx, whereExprs, whereArgs) +} + +func ContractSets(ctx context.Context, tx sql.Tx) ([]string, error) { + rows, err := tx.Query(ctx, "SELECT name FROM contract_sets") + if err != nil { + return nil, fmt.Errorf("failed to fetch contract sets: %w", err) + } + defer rows.Close() + + var sets []string + for rows.Next() { + var cs string + if err := rows.Scan(&cs); err != nil { + return nil, fmt.Errorf("failed to scan contract set: %w", err) + } + sets = append(sets, cs) + } + return sets, nil +} + +func ContractSizes(ctx context.Context, tx sql.Tx) (map[types.FileContractID]api.ContractSize, error) { + // the following query consists of two parts: + // 1. fetch all contracts that have no sectors and consider their size as + // prunable + // 2. fetch all contracts that have sectors and calculate the prunable size + // based on the number of sectors + rows, err := tx.Query(ctx, ` + SELECT c.fcid, c.size, c.size + FROM contracts c + WHERE NOT EXISTS ( + SELECT 1 + FROM contract_sectors cs + WHERE cs.db_contract_id = c.id + ) + UNION ALL + SELECT fcid, size, CASE WHEN contract_size > sector_size THEN contract_size - sector_size ELSE 0 END + FROM ( + SELECT c.fcid, c.size, MAX(c.size) as contract_size, COUNT(*) * ? as sector_size + FROM contracts c + INNER JOIN contract_sectors cs ON cs.db_contract_id = c.id + GROUP BY c.fcid + ) i + `, rhpv2.SectorSize) + if err != nil { + return nil, fmt.Errorf("failed to fetch contract sizes: %w", err) + } + + sizes := make(map[types.FileContractID]api.ContractSize) + for rows.Next() { + var fcid types.FileContractID + var cs api.ContractSize + if err := rows.Scan((*FileContractID)(&fcid), &cs.Size, &cs.Prunable); err != nil { + return nil, fmt.Errorf("failed to scan contract size: %w", err) + } + sizes[fcid] = cs + } + return sizes, nil +} + +func CopyObject(ctx context.Context, tx sql.Tx, srcBucket, dstBucket, srcKey, dstKey, mimeType string, metadata api.ObjectUserMetadata) (api.ObjectMetadata, error) { + // stmt to fetch bucket id + bucketIDStmt, err := tx.Prepare(ctx, "SELECT id FROM buckets WHERE name = ?") + if err != nil { + return api.ObjectMetadata{}, fmt.Errorf("failed to prepare statement to fetch bucket id: %w", err) + } + defer bucketIDStmt.Close() + + // fetch source bucket + var srcBID int64 + err = bucketIDStmt.QueryRow(ctx, srcBucket).Scan(&srcBID) + if errors.Is(err, dsql.ErrNoRows) { + return api.ObjectMetadata{}, fmt.Errorf("%w: source bucket", api.ErrBucketNotFound) + } else if err != nil { + return api.ObjectMetadata{}, fmt.Errorf("failed to fetch src bucket id: %w", err) + } + + // fetch src object id + var srcObjID int64 + err = tx.QueryRow(ctx, "SELECT id FROM objects WHERE db_bucket_id = ? AND object_id = ?", srcBID, srcKey). + Scan(&srcObjID) + if errors.Is(err, dsql.ErrNoRows) { + return api.ObjectMetadata{}, api.ErrObjectNotFound + } else if err != nil { + return api.ObjectMetadata{}, fmt.Errorf("failed to fetch object id: %w", err) + } + + // helper to fetch metadata + fetchMetadata := func(objID int64) (om api.ObjectMetadata, err error) { + err = tx.QueryRow(ctx, "SELECT etag, health, created_at, object_id, size, mime_type FROM objects WHERE id = ?", objID). + Scan(&om.ETag, &om.Health, (*time.Time)(&om.ModTime), &om.Name, &om.Size, &om.MimeType) + if err != nil { + return api.ObjectMetadata{}, fmt.Errorf("failed to fetch new object: %w", err) + } + return om, nil + } + + if srcBucket == dstBucket && srcKey == dstKey { + // No copying is happening. We just update the metadata on the src + // object. + if _, err := tx.Exec(ctx, "UPDATE objects SET mime_type = ? WHERE id = ?", mimeType, srcObjID); err != nil { + return api.ObjectMetadata{}, fmt.Errorf("failed to update mime type: %w", err) + } else if err := UpdateMetadata(ctx, tx, srcObjID, metadata); err != nil { + return api.ObjectMetadata{}, fmt.Errorf("failed to update metadata: %w", err) + } + return fetchMetadata(srcObjID) + } + + // fetch destination bucket + var dstBID int64 + err = bucketIDStmt.QueryRow(ctx, dstBucket).Scan(&dstBID) + if errors.Is(err, dsql.ErrNoRows) { + return api.ObjectMetadata{}, fmt.Errorf("%w: destination bucket", api.ErrBucketNotFound) + } else if err != nil { + return api.ObjectMetadata{}, fmt.Errorf("failed to fetch dest bucket id: %w", err) + } + + // copy object + res, err := tx.Exec(ctx, `INSERT INTO objects (created_at, object_id, db_directory_id, db_bucket_id,`+"`key`"+`, size, mime_type, etag) + SELECT ?, ?, db_directory_id, ?, `+"`key`"+`, size, ?, etag + FROM objects + WHERE id = ?`, time.Now(), dstKey, dstBID, mimeType, srcObjID) + if err != nil { + return api.ObjectMetadata{}, fmt.Errorf("failed to insert object: %w", err) + } + dstObjID, err := res.LastInsertId() + if err != nil { + return api.ObjectMetadata{}, fmt.Errorf("failed to fetch object id: %w", err) + } + + // copy slices + _, err = tx.Exec(ctx, `INSERT INTO slices (created_at, db_object_id, object_index, db_slab_id, offset, length) + SELECT ?, ?, object_index, db_slab_id, offset, length + FROM slices + WHERE db_object_id = ?`, time.Now(), dstObjID, srcObjID) + if err != nil { + return api.ObjectMetadata{}, fmt.Errorf("failed to copy slices: %w", err) + } + + // create metadata + if err := InsertMetadata(ctx, tx, &dstObjID, nil, metadata); err != nil { + return api.ObjectMetadata{}, fmt.Errorf("failed to insert metadata: %w", err) + } + + // fetch copied object + return fetchMetadata(dstObjID) +} + +func DeleteHostSector(ctx context.Context, tx sql.Tx, hk types.PublicKey, root types.Hash256) (int, error) { + // update the latest_host field of the sector + _, err := tx.Exec(ctx, ` + UPDATE sectors + SET latest_host = COALESCE(( + SELECT * FROM ( + SELECT h.public_key + FROM hosts h + INNER JOIN contracts c ON c.host_id = h.id + INNER JOIN contract_sectors cs ON cs.db_contract_id = c.id + INNER JOIN sectors s ON s.id = cs.db_sector_id + WHERE s.root = ? AND h.public_key != ? + LIMIT 1 + ) AS _ + ), ?) + WHERE root = ? AND latest_host = ? + `, Hash256(root), PublicKey(hk), PublicKey{}, Hash256(root), PublicKey(hk)) + if err != nil { + return 0, fmt.Errorf("failed to update sector: %w", err) + } + + // remove potential links between the host's contracts and the sector + res, err := tx.Exec(ctx, ` + DELETE FROM contract_sectors + WHERE db_sector_id = ( + SELECT s.id + FROM sectors s + WHERE root = ? + ) AND db_contract_id IN ( + SELECT c.id + FROM contracts c + INNER JOIN hosts h ON h.id = c.host_id + WHERE h.public_key = ? + ) + `, Hash256(root), PublicKey(hk)) + if err != nil { + return 0, fmt.Errorf("failed to delete contract sectors: %w", err) + } + deletedSectors, err := res.RowsAffected() + if err != nil { + return 0, fmt.Errorf("failed to check number of deleted contract sectors: %w", err) + } else if deletedSectors == 0 { + return 0, nil // nothing to do + } + + // invalidate the health of related slabs + _, err = tx.Exec(ctx, ` + UPDATE slabs + SET health_valid_until = 0 + WHERE id IN ( + SELECT db_slab_id + FROM sectors + WHERE root = ? + ) + `, Hash256(root)) + if err != nil { + return 0, fmt.Errorf("failed to invalidate slab health: %w", err) + } + + // increment host's lost sector count + _, err = tx.Exec(ctx, ` + UPDATE hosts + SET lost_sectors = lost_sectors + ? + WHERE public_key = ? + `, deletedSectors, PublicKey(hk)) + if err != nil { + return 0, fmt.Errorf("failed to update lost sectors: %w", err) + } + return int(deletedSectors), nil +} + +func DeleteSettings(ctx context.Context, tx sql.Tx, key string) error { + if _, err := tx.Exec(ctx, "DELETE FROM settings WHERE `key` = ?", key); err != nil { + return fmt.Errorf("failed to delete setting '%s': %w", key, err) + } + return nil +} + +func DeleteWebhook(ctx context.Context, tx sql.Tx, wh webhooks.Webhook) error { + res, err := tx.Exec(ctx, "DELETE FROM webhooks WHERE module = ? AND event = ? AND url = ?", wh.Module, wh.Event, wh.URL) + if err != nil { + return fmt.Errorf("failed to delete webhook: %w", err) + } else if n, err := res.RowsAffected(); err != nil { + return fmt.Errorf("failed to check rows affected: %w", err) + } else if n == 0 { + return webhooks.ErrWebhookNotFound + } + return nil +} + +func HostAllowlist(ctx context.Context, tx sql.Tx) ([]types.PublicKey, error) { + rows, err := tx.Query(ctx, "SELECT entry FROM host_allowlist_entries") + if err != nil { + return nil, fmt.Errorf("failed to fetch host allowlist: %w", err) + } + defer rows.Close() + + var allowlist []types.PublicKey + for rows.Next() { + var pk PublicKey + if err := rows.Scan(&pk); err != nil { + return nil, fmt.Errorf("failed to scan public key: %w", err) + } + allowlist = append(allowlist, types.PublicKey(pk)) + } + return allowlist, nil +} + +func HostBlocklist(ctx context.Context, tx sql.Tx) ([]string, error) { + rows, err := tx.Query(ctx, "SELECT entry FROM host_blocklist_entries") + if err != nil { + return nil, fmt.Errorf("failed to fetch host blocklist: %w", err) + } + defer rows.Close() + + var blocklist []string + for rows.Next() { + var entry string + if err := rows.Scan(&entry); err != nil { + return nil, fmt.Errorf("failed to scan blocklist entry: %w", err) + } + blocklist = append(blocklist, entry) + } + return blocklist, nil +} + +func HostsForScanning(ctx context.Context, tx sql.Tx, maxLastScan time.Time, offset, limit int) ([]api.HostAddress, error) { + if offset < 0 { + return nil, ErrNegativeOffset + } else if limit == -1 { + limit = math.MaxInt64 + } + + rows, err := tx.Query(ctx, "SELECT public_key, net_address FROM hosts WHERE last_scan < ? LIMIT ? OFFSET ?", + maxLastScan.UnixNano(), limit, offset) + if err != nil { + return nil, fmt.Errorf("failed to fetch hosts for scanning: %w", err) + } + defer rows.Close() + + var hosts []api.HostAddress + for rows.Next() { + var ha api.HostAddress + if err := rows.Scan((*PublicKey)(&ha.PublicKey), &ha.NetAddress); err != nil { + return nil, fmt.Errorf("failed to scan host row: %w", err) + } + hosts = append(hosts, ha) + } + return hosts, nil +} + +func InsertBufferedSlab(ctx context.Context, tx sql.Tx, fileName string, contractSetID int64, ec object.EncryptionKey, minShards, totalShards uint8) (int64, error) { + // insert buffered slab + res, err := tx.Exec(ctx, `INSERT INTO buffered_slabs (created_at, filename) VALUES (?, ?)`, + time.Now(), fileName) + if err != nil { + return 0, fmt.Errorf("failed to insert buffered slab: %w", err) + } + bufferedSlabID, err := res.LastInsertId() + if err != nil { + return 0, fmt.Errorf("failed to fetch buffered slab id: %w", err) + } + + key, err := ec.MarshalBinary() + if err != nil { + return 0, err + } + _, err = tx.Exec(ctx, ` + INSERT INTO slabs (created_at, db_contract_set_id, db_buffered_slab_id, `+"`key`"+`, min_shards, total_shards) + VALUES (?, ?, ?, ?, ?, ?)`, + time.Now(), contractSetID, bufferedSlabID, SecretKey(key), minShards, totalShards) + if err != nil { + return 0, fmt.Errorf("failed to insert slab: %w", err) + } + return bufferedSlabID, nil +} + +func InsertMultipartUpload(ctx context.Context, tx sql.Tx, bucket, key string, ec object.EncryptionKey, mimeType string, metadata api.ObjectUserMetadata) (string, error) { + // fetch bucket id + var bucketID int64 + err := tx.QueryRow(ctx, "SELECT id FROM buckets WHERE buckets.name = ?", bucket). + Scan(&bucketID) + if errors.Is(err, dsql.ErrNoRows) { + return "", fmt.Errorf("bucket %v not found: %w", bucket, api.ErrBucketNotFound) + } else if err != nil { + return "", fmt.Errorf("failed to fetch bucket id: %w", err) + } + + // marshal key + ecBytes, err := ec.MarshalBinary() + if err != nil { + return "", err + } + + // insert multipart upload + uploadIDEntropy := frand.Entropy256() + uploadID := hex.EncodeToString(uploadIDEntropy[:]) + var muID int64 + res, err := tx.Exec(ctx, ` + INSERT INTO multipart_uploads (created_at, `+"`key`"+`, upload_id, object_id, db_bucket_id, mime_type) + VALUES (?, ?, ?, ?, ?, ?) + `, time.Now(), SecretKey(ecBytes), uploadID, key, bucketID, mimeType) + if err != nil { + return "", fmt.Errorf("failed to create multipart upload: %w", err) + } else if muID, err = res.LastInsertId(); err != nil { + return "", fmt.Errorf("failed to fetch multipart upload id: %w", err) + } + + // insert metadata + if err := InsertMetadata(ctx, tx, nil, &muID, metadata); err != nil { + return "", fmt.Errorf("failed to insert multipart metadata: %w", err) + } + return uploadID, nil +} + +func InsertObject(ctx context.Context, tx sql.Tx, key string, dirID, bucketID, size int64, ec []byte, mimeType, eTag string) (int64, error) { + res, err := tx.Exec(ctx, `INSERT INTO objects (created_at, object_id, db_directory_id, db_bucket_id, `+"`key`"+`, size, mime_type, etag) + VALUES (?, ?, ?, ?, ?, ?, ?, ?)`, + time.Now(), + key, + dirID, + bucketID, + SecretKey(ec), + size, + mimeType, + eTag) + if err != nil { + return 0, err + } + return res.LastInsertId() +} + +func LoadSlabBuffers(ctx context.Context, db *sql.DB) (bufferedSlabs []LoadedSlabBuffer, orphanedBuffers []string, err error) { + err = db.Transaction(ctx, func(tx sql.Tx) error { + // collect all buffers + rows, err := db.Query(ctx, ` + SELECT bs.id, bs.filename, sla.db_contract_set_id, sla.key, sla.min_shards, sla.total_shards + FROM buffered_slabs bs + INNER JOIN slabs sla ON sla.db_buffered_slab_id = bs.id + `) + if err != nil { + return err + } + defer rows.Close() + + for rows.Next() { + var bs LoadedSlabBuffer + var sk SecretKey + if err := rows.Scan(&bs.ID, &bs.Filename, &bs.ContractSetID, &sk, &bs.MinShards, &bs.TotalShards); err != nil { + return fmt.Errorf("failed to scan buffered slab: %w", err) + } else if err := bs.Key.UnmarshalBinary(sk[:]); err != nil { + return fmt.Errorf("failed to unmarshal secret key: %w", err) + } + bufferedSlabs = append(bufferedSlabs, bs) + } + + // fill in sizes + for i := range bufferedSlabs { + err = tx.QueryRow(ctx, ` + SELECT COALESCE(MAX(offset+length), 0) + FROM slabs sla + INNER JOIN slices sli ON sla.id = sli.db_slab_id + WHERE sla.db_buffered_slab_id = ? + `, bufferedSlabs[i].ID).Scan(&bufferedSlabs[i].Size) + if err != nil { + return fmt.Errorf("failed to fetch buffered slab size: %w", err) + } + } + + // find orphaned buffers and delete them + rows, err = tx.Query(ctx, ` + SELECT bs.id, bs.filename + FROM buffered_slabs bs + LEFT JOIN slabs ON slabs.db_buffered_slab_id = bs.id + WHERE slabs.id IS NULL + `) + if err != nil { + return fmt.Errorf("failed to fetch orphaned buffers: %w", err) + } + var toDelete []int64 + for rows.Next() { + var id int64 + var filename string + if err := rows.Scan(&id, &filename); err != nil { + return fmt.Errorf("failed to scan orphaned buffer: %w", err) + } + orphanedBuffers = append(orphanedBuffers, filename) + toDelete = append(toDelete, id) + } + for _, id := range toDelete { + if _, err := tx.Exec(ctx, "DELETE FROM buffered_slabs WHERE id = ?", id); err != nil { + return fmt.Errorf("failed to delete orphaned buffer: %w", err) + } + } + return nil + }) + return +} + +func UpdateMetadata(ctx context.Context, tx sql.Tx, objID int64, md api.ObjectUserMetadata) error { + if err := DeleteMetadata(ctx, tx, objID); err != nil { + return err + } else if err := InsertMetadata(ctx, tx, &objID, nil, md); err != nil { + return err + } + return nil +} + +func DeleteMetadata(ctx context.Context, tx sql.Tx, objID int64) error { + _, err := tx.Exec(ctx, "DELETE FROM object_user_metadata WHERE db_object_id = ?", objID) + return err +} + +func InsertMetadata(ctx context.Context, tx sql.Tx, objID, muID *int64, md api.ObjectUserMetadata) error { + if len(md) == 0 { + return nil + } else if (objID == nil) == (muID == nil) { + return errors.New("either objID or muID must be set") + } + insertMetadataStmt, err := tx.Prepare(ctx, "INSERT INTO object_user_metadata (created_at, db_object_id, db_multipart_upload_id, `key`, value) VALUES (?, ?, ?, ?, ?)") + if err != nil { + return fmt.Errorf("failed to prepare statement to insert object metadata: %w", err) + } + defer insertMetadataStmt.Close() + + for k, v := range md { + if _, err := insertMetadataStmt.Exec(ctx, time.Now(), objID, muID, k, v); err != nil { + return fmt.Errorf("failed to insert object metadata: %w", err) + } + } + return nil +} + +func ContractSize(ctx context.Context, tx sql.Tx, id types.FileContractID) (api.ContractSize, error) { + var contractID, size uint64 + if err := tx.QueryRow(ctx, "SELECT id, size FROM contracts WHERE fcid = ?", FileContractID(id)). + Scan(&contractID, &size); errors.Is(err, dsql.ErrNoRows) { + return api.ContractSize{}, api.ErrContractNotFound + } else if err != nil { + return api.ContractSize{}, err + } + + var nSectors uint64 + if err := tx.QueryRow(ctx, "SELECT COUNT(*) FROM contract_sectors WHERE db_contract_id = ?", contractID). + Scan(&nSectors); err != nil { + return api.ContractSize{}, err + } + sectorsSize := nSectors * rhpv2.SectorSize + + var prunable uint64 + if size > sectorsSize { + prunable = size - sectorsSize + } + return api.ContractSize{ + Size: size, + Prunable: prunable, + }, nil +} + +func DeleteBucket(ctx context.Context, tx sql.Tx, bucket string) error { + var id int64 + err := tx.QueryRow(ctx, "SELECT id FROM buckets WHERE name = ?", bucket).Scan(&id) + if errors.Is(err, dsql.ErrNoRows) { + return api.ErrBucketNotFound + } else if err != nil { + return fmt.Errorf("failed to fetch bucket id: %w", err) + } + var empty bool + err = tx.QueryRow(ctx, "SELECT NOT EXISTS(SELECT 1 FROM objects WHERE db_bucket_id = ?)", id).Scan(&empty) + if err != nil { + return fmt.Errorf("failed to check if bucket is empty: %w", err) + } else if !empty { + return api.ErrBucketNotEmpty + } + _, err = tx.Exec(ctx, "DELETE FROM buckets WHERE id = ?", id) + if err != nil { + return fmt.Errorf("failed to delete bucket: %w", err) + } + return nil +} + +func FetchUsedContracts(ctx context.Context, tx sql.Tx, fcids []types.FileContractID) (map[types.FileContractID]UsedContract, error) { + if len(fcids) == 0 { + return make(map[types.FileContractID]UsedContract), nil + } + + // flatten map to get all used contract ids + usedFCIDs := make([]FileContractID, 0, len(fcids)) + for _, fcid := range fcids { + usedFCIDs = append(usedFCIDs, FileContractID(fcid)) + } + + placeholders := make([]string, len(usedFCIDs)) + for i := range usedFCIDs { + placeholders[i] = "?" + } + placeholdersStr := strings.Join(placeholders, ", ") + + args := make([]interface{}, len(usedFCIDs)*2) + for i := range args { + args[i] = usedFCIDs[i%len(usedFCIDs)] + } + + // fetch all contracts, take into account renewals + rows, err := tx.Query(ctx, fmt.Sprintf(`SELECT id, fcid, renewed_from + FROM contracts + WHERE contracts.fcid IN (%s) OR renewed_from IN (%s) + `, placeholdersStr, placeholdersStr), args...) + if err != nil { + return nil, fmt.Errorf("failed to fetch used contracts: %w", err) + } + defer rows.Close() + + var contracts []UsedContract + for rows.Next() { + var c UsedContract + if err := rows.Scan(&c.ID, &c.FCID, &c.RenewedFrom); err != nil { + return nil, fmt.Errorf("failed to scan used contract: %w", err) + } + contracts = append(contracts, c) + } + + fcidMap := make(map[types.FileContractID]struct{}, len(fcids)) + for _, fcid := range fcids { + fcidMap[fcid] = struct{}{} + } + + // build map of used contracts + usedContracts := make(map[types.FileContractID]UsedContract, len(contracts)) + for _, c := range contracts { + if _, used := fcidMap[types.FileContractID(c.FCID)]; used { + usedContracts[types.FileContractID(c.FCID)] = c + } + if _, used := fcidMap[types.FileContractID(c.RenewedFrom)]; used { + usedContracts[types.FileContractID(c.RenewedFrom)] = c + } + } + return usedContracts, nil +} + +func PrepareSlabHealth(ctx context.Context, tx sql.Tx, limit int64, now time.Time) error { + _, err := tx.Exec(ctx, "DROP TABLE IF EXISTS slabs_health") + if err != nil { + return fmt.Errorf("failed to drop temporary table: %w", err) + } + _, err = tx.Exec(ctx, ` + CREATE TEMPORARY TABLE slabs_health AS + SELECT slabs.id as id, CASE WHEN (slabs.min_shards = slabs.total_shards) + THEN + CASE WHEN (COUNT(DISTINCT(CASE WHEN cs.name IS NULL THEN NULL ELSE c.host_id END)) < slabs.min_shards) + THEN -1 + ELSE 1 + END + ELSE (CAST(COUNT(DISTINCT(CASE WHEN cs.name IS NULL THEN NULL ELSE c.host_id END)) AS FLOAT) - CAST(slabs.min_shards AS FLOAT)) / Cast(slabs.total_shards - slabs.min_shards AS FLOAT) + END as health + FROM slabs + INNER JOIN sectors s ON s.db_slab_id = slabs.id + LEFT JOIN contract_sectors se ON s.id = se.db_sector_id + LEFT JOIN contracts c ON se.db_contract_id = c.id + LEFT JOIN contract_set_contracts csc ON csc.db_contract_id = c.id AND csc.db_contract_set_id = slabs.db_contract_set_id + LEFT JOIN contract_sets cs ON cs.id = csc.db_contract_set_id + WHERE slabs.health_valid_until <= ? + GROUP BY slabs.id + LIMIT ? + `, now.Unix(), limit) + if err != nil { + return fmt.Errorf("failed to create temporary table: %w", err) + } + if _, err := tx.Exec(ctx, "CREATE INDEX slabs_health_id ON slabs_health (id)"); err != nil { + return fmt.Errorf("failed to create index on temporary table: %w", err) + } + return err +} + +func ListBuckets(ctx context.Context, tx sql.Tx) ([]api.Bucket, error) { + rows, err := tx.Query(ctx, "SELECT created_at, name, COALESCE(policy, '{}') FROM buckets") + if err != nil { + return nil, fmt.Errorf("failed to fetch buckets: %w", err) + } + defer rows.Close() + + var buckets []api.Bucket + for rows.Next() { + bucket, err := scanBucket(rows) + if err != nil { + return nil, fmt.Errorf("failed to scan bucket: %w", err) + } + buckets = append(buckets, bucket) + } + return buckets, nil +} + +func MultipartUpload(ctx context.Context, tx sql.Tx, uploadID string) (api.MultipartUpload, error) { + resp, err := scanMultipartUpload(tx.QueryRow(ctx, "SELECT b.name, mu.key, mu.object_id, mu.upload_id, mu.created_at FROM multipart_uploads mu INNER JOIN buckets b ON b.id = mu.db_bucket_id WHERE mu.upload_id = ?", uploadID)) + if err != nil { + return api.MultipartUpload{}, fmt.Errorf("failed to fetch multipart upload: %w", err) + } + return resp, nil +} + +func MultipartUploadParts(ctx context.Context, tx sql.Tx, bucket, key, uploadID string, marker int, limit int64) (api.MultipartListPartsResponse, error) { + limitExpr := "" + limitUsed := limit > 0 + if limitUsed { + limitExpr = fmt.Sprintf("LIMIT %d", limit+1) + } + + rows, err := tx.Query(ctx, fmt.Sprintf(` + SELECT mp.part_number, mp.created_at, mp.etag, mp.size + FROM multipart_parts mp + INNER JOIN multipart_uploads mus ON mus.id = mp.db_multipart_upload_id + INNER JOIN buckets b ON b.id = mus.db_bucket_id + WHERE mus.object_id = ? AND b.name = ? AND mus.upload_id = ? AND part_number > ? + ORDER BY part_number ASC + %s + `, limitExpr), key, bucket, uploadID, marker) + if err != nil { + return api.MultipartListPartsResponse{}, fmt.Errorf("failed to fetch multipart parts: %w", err) + } + defer rows.Close() + + var parts []api.MultipartListPartItem + for rows.Next() { + var part api.MultipartListPartItem + if err := rows.Scan(&part.PartNumber, (*time.Time)(&part.LastModified), &part.ETag, &part.Size); err != nil { + return api.MultipartListPartsResponse{}, fmt.Errorf("failed to scan part: %w", err) + } + parts = append(parts, part) + } + + // check if there are more parts beyond 'limit'. + var hasMore bool + var nextMarker int + if limitUsed && len(parts) > int(limit) { + hasMore = true + parts = parts[:len(parts)-1] + nextMarker = parts[len(parts)-1].PartNumber + } + + return api.MultipartListPartsResponse{ + HasMore: hasMore, + NextMarker: nextMarker, + Parts: parts, + }, nil +} + +func MultipartUploads(ctx context.Context, tx sql.Tx, bucket, prefix, keyMarker, uploadIDMarker string, limit int) (api.MultipartListUploadsResponse, error) { + // both markers must be used together + if (keyMarker == "" && uploadIDMarker != "") || (keyMarker != "" && uploadIDMarker == "") { + return api.MultipartListUploadsResponse{}, errors.New("both keyMarker and uploadIDMarker must be set or neither") + } + + // prepare 'limit' expression + limitExpr := "" + limitUsed := limit > 0 + if limitUsed { + limitExpr = fmt.Sprintf("LIMIT %d", limit+1) + } + + // prepare 'where' expression + var whereExprs []string + var args []any + if keyMarker != "" { + whereExprs = append(whereExprs, "object_id > ? OR (object_id = ? AND upload_id > ?)") + args = append(args, keyMarker, keyMarker, uploadIDMarker) + } + if prefix != "" { + whereExprs = append(whereExprs, "SUBSTR(object_id, 1, ?) = ?") + args = append(args, utf8.RuneCountInString(prefix), prefix) + } + if bucket != "" { + whereExprs = append(whereExprs, "b.name = ?") + args = append(args, bucket) + } + whereExpr := "" + if len(whereExprs) > 0 { + whereExpr = "WHERE " + strings.Join(whereExprs, " AND ") + } + + // fetch multipart uploads + var uploads []api.MultipartUpload + rows, err := tx.Query(ctx, fmt.Sprintf("SELECT b.name, mu.key, mu.object_id, mu.upload_id, mu.created_at FROM multipart_uploads mu INNER JOIN buckets b ON b.id = mu.db_bucket_id %s ORDER BY object_id ASC, upload_id ASC %s", + whereExpr, limitExpr), args...) + if err != nil { + return api.MultipartListUploadsResponse{}, fmt.Errorf("failed to fetch multipart uploads: %w", err) + } + defer rows.Close() + for rows.Next() { + upload, err := scanMultipartUpload(rows) + if err != nil { + return api.MultipartListUploadsResponse{}, fmt.Errorf("failed to scan multipart upload: %w", err) + } + uploads = append(uploads, upload) + } + + // check if there are more uploads beyond 'limit'. + var hasMore bool + var nextPathMarker, nextUploadIDMarker string + if limitUsed && len(uploads) > int(limit) { + hasMore = true + uploads = uploads[:len(uploads)-1] + nextPathMarker = uploads[len(uploads)-1].Path + nextUploadIDMarker = uploads[len(uploads)-1].UploadID + } + + return api.MultipartListUploadsResponse{ + HasMore: hasMore, + NextPathMarker: nextPathMarker, + NextUploadIDMarker: nextUploadIDMarker, + Uploads: uploads, + }, nil +} + +type multipartUpload struct { + ID int64 + Key string + Bucket string + BucketID int64 + EC []byte + MimeType string +} + +type multipartUploadPart struct { + ID int64 + PartNumber int64 + Etag string + Size int64 +} + +func MultipartUploadForCompletion(ctx context.Context, tx sql.Tx, bucket, key, uploadID string, parts []api.MultipartCompletedPart) (multipartUpload, []multipartUploadPart, int64, string, error) { + // fetch upload + var mpu multipartUpload + err := tx.QueryRow(ctx, ` + SELECT mu.id, mu.object_id, mu.mime_type, mu.key, b.name, b.id + FROM multipart_uploads mu INNER JOIN buckets b ON b.id = mu.db_bucket_id + WHERE mu.upload_id = ?`, uploadID). + Scan(&mpu.ID, &mpu.Key, &mpu.MimeType, &mpu.EC, &mpu.Bucket, &mpu.BucketID) + if err != nil { + return multipartUpload{}, nil, 0, "", fmt.Errorf("failed to fetch upload: %w", err) + } else if mpu.Key != key { + return multipartUpload{}, nil, 0, "", fmt.Errorf("object id mismatch: %v != %v: %w", mpu.Key, key, api.ErrObjectNotFound) + } else if mpu.Bucket != bucket { + return multipartUpload{}, nil, 0, "", fmt.Errorf("bucket name mismatch: %v != %v: %w", mpu.Bucket, bucket, api.ErrBucketNotFound) + } + + // find relevant parts + rows, err := tx.Query(ctx, "SELECT id, part_number, etag, size FROM multipart_parts WHERE db_multipart_upload_id = ? ORDER BY part_number ASC", mpu.ID) + if err != nil { + return multipartUpload{}, nil, 0, "", fmt.Errorf("failed to fetch parts: %w", err) + } + defer rows.Close() + + var storedParts []multipartUploadPart + for rows.Next() { + var p multipartUploadPart + if err := rows.Scan(&p.ID, &p.PartNumber, &p.Etag, &p.Size); err != nil { + return multipartUpload{}, nil, 0, "", fmt.Errorf("failed to scan part: %w", err) + } + storedParts = append(storedParts, p) + } + + var neededParts []multipartUploadPart + var size int64 + h := types.NewHasher() + j := 0 + for _, part := range storedParts { + for { + if j >= len(storedParts) { + // ran out of parts in the database + return multipartUpload{}, nil, 0, "", api.ErrPartNotFound + } else if storedParts[j].PartNumber > part.PartNumber { + // missing part + return multipartUpload{}, nil, 0, "", api.ErrPartNotFound + } else if storedParts[j].PartNumber == part.PartNumber && storedParts[j].Etag == strings.Trim(part.Etag, "\"") { + // found a match + neededParts = append(neededParts, storedParts[j]) + size += storedParts[j].Size + j++ + + // update hasher + if _, err = h.E.Write([]byte(part.Etag)); err != nil { + return multipartUpload{}, nil, 0, "", fmt.Errorf("failed to hash etag: %w", err) + } + break + } else { + // try next + j++ + } + } + } + + // compute ETag. + sum := h.Sum() + eTag := hex.EncodeToString(sum[:]) + return mpu, neededParts, size, eTag, nil +} + +func ObjectsStats(ctx context.Context, tx sql.Tx, opts api.ObjectsStatsOpts) (api.ObjectsStatsResponse, error) { + var args []any + var bucketExpr string + var bucketID int64 + if opts.Bucket != "" { + err := tx.QueryRow(ctx, "SELECT id FROM buckets WHERE name = ?", opts.Bucket). + Scan(&bucketID) + if errors.Is(err, dsql.ErrNoRows) { + return api.ObjectsStatsResponse{}, api.ErrBucketNotFound + } else if err != nil { + return api.ObjectsStatsResponse{}, fmt.Errorf("failed to fetch bucket id: %w", err) + } + bucketExpr = "WHERE db_bucket_id = ?" + args = append(args, bucketID) + } + + // objects stats + var numObjects, totalObjectsSize uint64 + var minHealth float64 + err := tx.QueryRow(ctx, "SELECT COUNT(*), COALESCE(MIN(health), 1), COALESCE(SUM(size), 0) FROM objects "+bucketExpr, args...). + Scan(&numObjects, &minHealth, &totalObjectsSize) + if err != nil { + return api.ObjectsStatsResponse{}, fmt.Errorf("failed to fetch objects stats: %w", err) + } + + // multipart upload stats + var unfinishedObjects uint64 + err = tx.QueryRow(ctx, "SELECT COUNT(*) FROM multipart_uploads "+bucketExpr, args...). + Scan(&unfinishedObjects) + if err != nil { + return api.ObjectsStatsResponse{}, fmt.Errorf("failed to fetch multipart upload stats: %w", err) + } + + // multipart upload part stats + var totalUnfinishedObjectsSize uint64 + err = tx.QueryRow(ctx, "SELECT COALESCE(SUM(size), 0) FROM multipart_parts mp INNER JOIN multipart_uploads mu ON mp.db_multipart_upload_id = mu.id "+bucketExpr, args...). + Scan(&totalUnfinishedObjectsSize) + if err != nil { + return api.ObjectsStatsResponse{}, fmt.Errorf("failed to fetch multipart upload part stats: %w", err) + } + + // total sectors + var whereExpr string + var whereArgs []any + if opts.Bucket != "" { + whereExpr = ` + AND EXISTS ( + SELECT 1 FROM slices sli + INNER JOIN objects o ON o.id = sli.db_object_id AND o.db_bucket_id = ? + WHERE sli.db_slab_id = sla.id + ) + ` + whereArgs = append(whereArgs, bucketID) + } + var totalSectors uint64 + err = tx.QueryRow(ctx, "SELECT COALESCE(SUM(total_shards), 0) FROM slabs sla WHERE db_buffered_slab_id IS NULL "+whereExpr, whereArgs...). + Scan(&totalSectors) + if err != nil { + return api.ObjectsStatsResponse{}, fmt.Errorf("failed to fetch total sector stats: %w", err) + } + + var totalUploaded uint64 + err = tx.QueryRow(ctx, "SELECT COALESCE(SUM(size), 0) FROM contracts"). + Scan(&totalUploaded) + if err != nil { + return api.ObjectsStatsResponse{}, fmt.Errorf("failed to fetch contract stats: %w", err) + } + + return api.ObjectsStatsResponse{ + MinHealth: minHealth, + NumObjects: numObjects, + NumUnfinishedObjects: unfinishedObjects, + TotalUnfinishedObjectsSize: totalUnfinishedObjectsSize, + TotalObjectsSize: totalObjectsSize, + TotalSectorsSize: totalSectors * rhpv2.SectorSize, + TotalUploadedSize: totalUploaded, + }, nil +} + +func RecordHostScans(ctx context.Context, tx sql.Tx, scans []api.HostScan) error { + if len(scans) == 0 { + return nil + } + // NOTE: The order of the assignments in the UPDATE statement is important + // for MySQL compatibility. e.g. second_to_last_scan_success must be set + // before last_scan_success. + stmt, err := tx.Prepare(ctx, ` + UPDATE hosts SET + scanned = scanned OR ?, + total_scans = total_scans + 1, + second_to_last_scan_success = last_scan_success, + last_scan_success = ?, + recent_downtime = CASE WHEN ? AND last_scan > 0 AND last_scan < ? THEN recent_downtime + ? - last_scan ELSE CASE WHEN ? THEN 0 ELSE recent_downtime END END, + recent_scan_failures = CASE WHEN ? THEN 0 ELSE recent_scan_failures + 1 END, + downtime = CASE WHEN ? AND last_scan > 0 AND last_scan < ? THEN downtime + ? - last_scan ELSE downtime END, + uptime = CASE WHEN ? AND last_scan > 0 AND last_scan < ? THEN uptime + ? - last_scan ELSE uptime END, + last_scan = ?, + settings = CASE WHEN ? THEN ? ELSE settings END, + price_table = CASE WHEN ? AND (price_table_expiry IS NULL OR ? > price_table_expiry) THEN ? ELSE price_table END, + price_table_expiry = CASE WHEN ? AND (price_table_expiry IS NULL OR ? > price_table_expiry) THEN ? ELSE price_table_expiry END, + successful_interactions = CASE WHEN ? THEN successful_interactions + 1 ELSE successful_interactions END, + failed_interactions = CASE WHEN ? THEN failed_interactions + 1 ELSE failed_interactions END, + subnets = CASE WHEN ? THEN ? ELSE subnets END + WHERE public_key = ? + `) + if err != nil { + return fmt.Errorf("failed to prepare statement to update host with scan: %w", err) + } + defer stmt.Close() + + now := time.Now() + for _, scan := range scans { + scanTime := scan.Timestamp.UnixNano() + _, err = stmt.Exec(ctx, + scan.Success, // scanned + scan.Success, // last_scan_success + !scan.Success, scanTime, scanTime, scan.Success, // recent_downtime + scan.Success, // recent_scan_failures + !scan.Success, scanTime, scanTime, // downtime + scan.Success, scanTime, scanTime, // uptime + scanTime, // last_scan + scan.Success, HostSettings(scan.Settings), // settings + scan.Success, now, PriceTable(scan.PriceTable), // price_table + scan.Success, now, now, // price_table_expiry + scan.Success, // successful_interactions + !scan.Success, // failed_interactions + len(scan.Subnets) > 0, strings.Join(scan.Subnets, ","), + PublicKey(scan.HostKey), + ) + if err != nil { + return fmt.Errorf("failed to update host with scan: %w", err) + } + } + return nil +} + +func RecordPriceTables(ctx context.Context, tx sql.Tx, priceTableUpdates []api.HostPriceTableUpdate) error { + if len(priceTableUpdates) == 0 { + return nil + } + + stmt, err := tx.Prepare(ctx, ` + UPDATE hosts SET + recent_downtime = CASE WHEN ? THEN recent_downtime = 0 ELSE recent_downtime END, + recent_scan_failures = CASE WHEN ? THEN recent_scan_failures = 0 ELSE recent_scan_failures END, + price_table = CASE WHEN ? THEN ? ELSE price_table END, + price_table_expiry = CASE WHEN ? THEN ? ELSE price_table_expiry END, + successful_interactions = CASE WHEN ? THEN successful_interactions + 1 ELSE successful_interactions END, + failed_interactions = CASE WHEN ? THEN failed_interactions + 1 ELSE failed_interactions END + WHERE public_key = ? + `) + if err != nil { + return fmt.Errorf("failed to prepare statement to update host with price table: %w", err) + } + defer stmt.Close() + + for _, ptu := range priceTableUpdates { + _, err := stmt.Exec(ctx, + ptu.Success, // recent_downtime + ptu.Success, // recent_scan_failures + ptu.Success, PriceTable(ptu.PriceTable.HostPriceTable), // price_table + ptu.Success, ptu.PriceTable.Expiry, // price_table_expiry + ptu.Success, // successful_interactions + !ptu.Success, // failed_interactions + PublicKey(ptu.HostKey), + ) + if err != nil { + return fmt.Errorf("failed to update host with price table: %w", err) + } + } + return nil +} + +func RemoveContractSet(ctx context.Context, tx sql.Tx, contractSet string) error { + _, err := tx.Exec(ctx, "DELETE FROM contract_sets WHERE name = ?", contractSet) + if err != nil { + return fmt.Errorf("failed to delete contract set: %w", err) + } + return nil +} + +func RemoveOfflineHosts(ctx context.Context, tx sql.Tx, minRecentFailures uint64, maxDownTime time.Duration) (int64, error) { + // fetch contracts + rows, err := tx.Query(ctx, ` + SELECT fcid + FROM contracts + INNER JOIN hosts h ON h.id = contracts.host_id + WHERE recent_downtime >= ? AND recent_scan_failures >= ? + `, maxDownTime, minRecentFailures) + if err != nil { + return 0, fmt.Errorf("failed to fetch contracts: %w", err) + } + defer rows.Close() + + var fcids []types.FileContractID + for rows.Next() { + var fcid FileContractID + if err := rows.Scan(&fcid); err != nil { + return 0, fmt.Errorf("failed to scan contract: %w", err) + } + fcids = append(fcids, types.FileContractID(fcid)) + } + + // archive contracts + for _, fcid := range fcids { + if err := ArchiveContract(ctx, tx, fcid, api.ContractArchivalReasonHostPruned); err != nil { + return 0, fmt.Errorf("failed to archive contract %v: %w", fcid, err) + } + } + + // delete hosts + res, err := tx.Exec(ctx, "DELETE FROM hosts WHERE recent_downtime >= ? AND recent_scan_failures >= ?", + maxDownTime, minRecentFailures) + if err != nil { + return 0, fmt.Errorf("failed to delete hosts: %w", err) + } + return res.RowsAffected() +} + +func InitConsensusInfo(ctx context.Context, tx sql.Tx) (types.ChainIndex, modules.ConsensusChangeID, error) { + // try fetch existing + var ccid modules.ConsensusChangeID + var ci types.ChainIndex + err := tx.QueryRow(ctx, "SELECT cc_id, height, block_id FROM consensus_infos WHERE id = ?", consensuInfoID). + Scan((*CCID)(&ccid), &ci.Height, (*Hash256)(&ci.ID)) + if err != nil && !errors.Is(err, dsql.ErrNoRows) { + return types.ChainIndex{}, modules.ConsensusChangeID{}, fmt.Errorf("failed to fetch consensus info: %w", err) + } else if err == nil { + return ci, ccid, nil + } + // otherwise init + ci = types.ChainIndex{} + if _, err := tx.Exec(ctx, "INSERT INTO consensus_infos (id, created_at, cc_id, height, block_id) VALUES (?, ?, ?, ?, ?)", + consensuInfoID, time.Now(), (CCID)(modules.ConsensusChangeBeginning), ci.Height, (Hash256)(ci.ID)); err != nil { + return types.ChainIndex{}, modules.ConsensusChangeID{}, fmt.Errorf("failed to init consensus infos: %w", err) + } + return types.ChainIndex{}, modules.ConsensusChangeBeginning, nil +} + +func QueryContracts(ctx context.Context, tx sql.Tx, whereExprs []string, whereArgs []any) ([]api.ContractMetadata, error) { + var whereExpr string + if len(whereExprs) > 0 { + whereExpr = "WHERE " + strings.Join(whereExprs, " AND ") + } + rows, err := tx.Query(ctx, fmt.Sprintf(` + SELECT c.fcid, c.renewed_from, c.contract_price, c.state, c.total_cost, c.proof_height, + c.revision_height, c.revision_number, c.size, c.start_height, c.window_start, c.window_end, + c.upload_spending, c.download_spending, c.fund_account_spending, c.delete_spending, c.list_spending, + COALESCE(cs.name, ""), h.net_address, h.public_key, h.settings->>'$.siamuxport' AS siamux_port + FROM contracts AS c + INNER JOIN hosts h ON h.id = c.host_id + LEFT JOIN contract_set_contracts csc ON csc.db_contract_id = c.id + LEFT JOIN contract_sets cs ON cs.id = csc.db_contract_set_id + %s + ORDER BY c.id ASC`, whereExpr), + whereArgs..., + ) + if err != nil { + return nil, err + } + defer rows.Close() + + var scannedRows []ContractRow + for rows.Next() { + var r ContractRow + if err := r.Scan(rows); err != nil { + return nil, fmt.Errorf("failed to scan row: %w", err) + } + scannedRows = append(scannedRows, r) + } + + if len(scannedRows) == 0 { + return nil, nil + } + + // merge 'Host', 'Name' and 'Contract' into dbContracts + var contracts []api.ContractMetadata + current, scannedRows := scannedRows[0].ContractMetadata(), scannedRows[1:] + for { + if len(scannedRows) == 0 { + contracts = append(contracts, current) + break + } else if current.ID != types.FileContractID(scannedRows[0].FCID) { + contracts = append(contracts, current) + } else if scannedRows[0].ContractSet != "" { + current.ContractSets = append(current.ContractSets, scannedRows[0].ContractSet) + } + current, scannedRows = scannedRows[0].ContractMetadata(), scannedRows[1:] + } + return contracts, nil +} + +func RenewedContract(ctx context.Context, tx sql.Tx, renewedFrom types.FileContractID) (api.ContractMetadata, error) { + contracts, err := QueryContracts(ctx, tx, []string{"c.renewed_from = ?"}, []any{FileContractID(renewedFrom)}) + if err != nil { + return api.ContractMetadata{}, fmt.Errorf("failed to query renewed contract: %w", err) + } else if len(contracts) == 0 { + return api.ContractMetadata{}, api.ErrContractNotFound + } + return contracts[0], nil +} + +func ResetConsensusSubscription(ctx context.Context, tx sql.Tx) (ci types.ChainIndex, err error) { + if _, err := tx.Exec(ctx, "DELETE FROM consensus_infos"); err != nil { + return types.ChainIndex{}, fmt.Errorf("failed to delete consensus infos: %w", err) + } else if _, err := tx.Exec(ctx, "DELETE FROM siacoin_elements"); err != nil { + return types.ChainIndex{}, fmt.Errorf("failed to delete siacoin elements: %w", err) + } else if _, err := tx.Exec(ctx, "DELETE FROM transactions"); err != nil { + return types.ChainIndex{}, fmt.Errorf("failed to delete transactions: %w", err) + } else if ci, _, err = InitConsensusInfo(ctx, tx); err != nil { + return types.ChainIndex{}, fmt.Errorf("failed to initialize consensus info: %w", err) + } + return ci, nil +} + +func ResetLostSectors(ctx context.Context, tx sql.Tx, hk types.PublicKey) error { + _, err := tx.Exec(ctx, "UPDATE hosts SET lost_sectors = 0 WHERE public_key = ?", PublicKey(hk)) + if err != nil { + return fmt.Errorf("failed to reset lost sectors for host %v: %w", hk, err) + } + return nil +} + +func SearchHosts(ctx context.Context, tx sql.Tx, autopilot, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) { + if offset < 0 { + return nil, ErrNegativeOffset + } + + var hasAllowlist, hasBlocklist bool + if err := tx.QueryRow(ctx, "SELECT EXISTS (SELECT 1 FROM host_allowlist_entries)").Scan(&hasAllowlist); err != nil { + return nil, fmt.Errorf("failed to check for allowlist: %w", err) + } else if err := tx.QueryRow(ctx, "SELECT EXISTS (SELECT 1 FROM host_blocklist_entries)").Scan(&hasBlocklist); err != nil { + return nil, fmt.Errorf("failed to check for blocklist: %w", err) + } + + // validate filterMode + switch filterMode { + case api.HostFilterModeAllowed: + case api.HostFilterModeBlocked: + case api.HostFilterModeAll: + default: + return nil, fmt.Errorf("invalid filter mode: %v", filterMode) + } + + var whereExprs []string + var args []any + + // fetch autopilot id + var autopilotID int64 + if autopilot != "" { + if err := tx.QueryRow(ctx, "SELECT id FROM autopilots WHERE identifier = ?", autopilot). + Scan(&autopilotID); errors.Is(err, dsql.ErrNoRows) { + return nil, api.ErrAutopilotNotFound + } else if err != nil { + return nil, fmt.Errorf("failed to fetch autopilot id: %w", err) + } + } + + // filter allowlist/blocklist + switch filterMode { + case api.HostFilterModeAllowed: + if hasAllowlist { + whereExprs = append(whereExprs, "EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") + } + if hasBlocklist { + whereExprs = append(whereExprs, "NOT EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") + } + case api.HostFilterModeBlocked: + if hasAllowlist { + whereExprs = append(whereExprs, "NOT EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") + } + if hasBlocklist { + whereExprs = append(whereExprs, "EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") + } + if !hasAllowlist && !hasBlocklist { + // if neither an allowlist nor a blocklist exist, all hosts are + // allowed which means we return none + return []api.Host{}, nil + } + } + + // filter address + if addressContains != "" { + whereExprs = append(whereExprs, "h.net_address LIKE ?") + args = append(args, "%"+addressContains+"%") + } + + // filter public key + if len(keyIn) > 0 { + pubKeys := make([]any, len(keyIn)) + for i, pk := range keyIn { + pubKeys[i] = PublicKey(pk) + } + placeholders := strings.Repeat("?, ", len(keyIn)-1) + "?" + whereExprs = append(whereExprs, fmt.Sprintf("h.public_key IN (%s)", placeholders)) + args = append(args, pubKeys...) + } + + // filter usability + whereApExpr := "" + if autopilot != "" { + whereApExpr = "AND hc.db_autopilot_id = ?" + } + switch usabilityMode { + case api.UsabilityFilterModeUsable: + whereExprs = append(whereExprs, fmt.Sprintf("EXISTS (SELECT 1 FROM hosts h2 INNER JOIN host_checks hc ON hc.db_host_id = h2.id AND h2.id = h.id WHERE (hc.usability_blocked = 0 AND hc.usability_offline = 0 AND hc.usability_low_score = 0 AND hc.usability_redundant_ip = 0 AND hc.usability_gouging = 0 AND hc.usability_not_accepting_contracts = 0 AND hc.usability_not_announced = 0 AND hc.usability_not_completing_scan = 0) %s)", whereApExpr)) + args = append(args, autopilotID) + case api.UsabilityFilterModeUnusable: + whereExprs = append(whereExprs, fmt.Sprintf("EXISTS (SELECT 1 FROM hosts h2 INNER JOIN host_checks hc ON hc.db_host_id = h2.id AND h2.id = h.id WHERE (hc.usability_blocked = 1 OR hc.usability_offline = 1 OR hc.usability_low_score = 1 OR hc.usability_redundant_ip = 1 OR hc.usability_gouging = 1 OR hc.usability_not_accepting_contracts = 1 OR hc.usability_not_announced = 1 OR hc.usability_not_completing_scan = 1) %s)", whereApExpr)) + args = append(args, autopilotID) + } + + // offset + limit + if limit == -1 { + limit = math.MaxInt64 + } + offsetLimitStr := fmt.Sprintf("LIMIT %d OFFSET %d", limit, offset) + + // fetch stored data for each host + rows, err := tx.Query(ctx, "SELECT host_id, SUM(size) FROM contracts GROUP BY host_id") + if err != nil { + return nil, fmt.Errorf("failed to fetch stored data: %w", err) + } + defer rows.Close() + + storedDataMap := make(map[int64]uint64) + for rows.Next() { + var hostID int64 + var storedData uint64 + if err := rows.Scan(&hostID, &storedData); err != nil { + return nil, fmt.Errorf("failed to scan stored data: %w", err) + } + storedDataMap[hostID] = storedData + } + + // query hosts + var blockedExprs []string + if hasAllowlist { + blockedExprs = append(blockedExprs, "NOT EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") + } + if hasBlocklist { + blockedExprs = append(blockedExprs, "EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = h.id)") + } + var blockedExpr string + if len(blockedExprs) > 0 { + blockedExpr = strings.Join(blockedExprs, " OR ") + } else { + blockedExpr = "FALSE" + } + var whereExpr string + if len(whereExprs) > 0 { + whereExpr = "WHERE " + strings.Join(whereExprs, " AND ") + } + rows, err = tx.Query(ctx, fmt.Sprintf(` + SELECT h.id, h.created_at, h.last_announcement, h.public_key, h.net_address, h.price_table, h.price_table_expiry, + h.settings, h.total_scans, h.last_scan, h.last_scan_success, h.second_to_last_scan_success, + h.uptime, h.downtime, h.successful_interactions, h.failed_interactions, COALESCE(h.lost_sectors, 0), + h.scanned, h.subnets, %s + FROM hosts h + %s + %s + `, blockedExpr, whereExpr, offsetLimitStr), args...) + if err != nil { + return nil, fmt.Errorf("failed to fetch hosts: %w", err) + } + defer rows.Close() + + var hosts []api.Host + for rows.Next() { + var h api.Host + var hostID int64 + var pte dsql.NullTime + var subnets string + err := rows.Scan(&hostID, &h.KnownSince, &h.LastAnnouncement, (*PublicKey)(&h.PublicKey), + &h.NetAddress, (*PriceTable)(&h.PriceTable.HostPriceTable), &pte, + (*HostSettings)(&h.Settings), &h.Interactions.TotalScans, (*UnixTimeNS)(&h.Interactions.LastScan), &h.Interactions.LastScanSuccess, + &h.Interactions.SecondToLastScanSuccess, &h.Interactions.Uptime, &h.Interactions.Downtime, + &h.Interactions.SuccessfulInteractions, &h.Interactions.FailedInteractions, &h.Interactions.LostSectors, + &h.Scanned, &subnets, &h.Blocked, + ) + if err != nil { + return nil, fmt.Errorf("failed to scan host: %w", err) + } + + if subnets != "" { + h.Subnets = strings.Split(subnets, ",") + } + h.PriceTable.Expiry = pte.Time + h.StoredData = storedDataMap[hostID] + hosts = append(hosts, h) + } + + // query host checks + var apExpr string + if autopilot != "" { + apExpr = "WHERE ap.identifier = ?" + args = append(args, autopilot) + } + rows, err = tx.Query(ctx, fmt.Sprintf(` + SELECT h.public_key, ap.identifier, hc.usability_blocked, hc.usability_offline, hc.usability_low_score, hc.usability_redundant_ip, + hc.usability_gouging, usability_not_accepting_contracts, hc.usability_not_announced, hc.usability_not_completing_scan, + hc.score_age, hc.score_collateral, hc.score_interactions, hc.score_storage_remaining, hc.score_uptime, + hc.score_version, hc.score_prices, hc.gouging_contract_err, hc.gouging_download_err, hc.gouging_gouging_err, + hc.gouging_prune_err, hc.gouging_upload_err + FROM ( + SELECT h.id, h.public_key + FROM hosts h + %s + %s + ) AS h + INNER JOIN host_checks hc ON hc.db_host_id = h.id + INNER JOIN autopilots ap ON hc.db_autopilot_id = ap.id + %s + `, whereExpr, offsetLimitStr, apExpr), args...) + if err != nil { + return nil, fmt.Errorf("failed to fetch host checks: %w", err) + } + defer rows.Close() + + hostChecks := make(map[types.PublicKey]map[string]api.HostCheck) + for rows.Next() { + var ap string + var pk PublicKey + var hc api.HostCheck + err := rows.Scan(&pk, &ap, &hc.Usability.Blocked, &hc.Usability.Offline, &hc.Usability.LowScore, &hc.Usability.RedundantIP, + &hc.Usability.Gouging, &hc.Usability.NotAcceptingContracts, &hc.Usability.NotAnnounced, &hc.Usability.NotCompletingScan, + &hc.Score.Age, &hc.Score.Collateral, &hc.Score.Interactions, &hc.Score.StorageRemaining, &hc.Score.Uptime, + &hc.Score.Version, &hc.Score.Prices, &hc.Gouging.ContractErr, &hc.Gouging.DownloadErr, &hc.Gouging.GougingErr, + &hc.Gouging.PruneErr, &hc.Gouging.UploadErr) + if err != nil { + return nil, fmt.Errorf("failed to scan host: %w", err) + } + if _, ok := hostChecks[types.PublicKey(pk)]; !ok { + hostChecks[types.PublicKey(pk)] = make(map[string]api.HostCheck) + } + hostChecks[types.PublicKey(pk)][ap] = hc + } + + // fill in hosts + for i := range hosts { + hosts[i].Checks = hostChecks[hosts[i].PublicKey] + } + return hosts, nil +} + +func Setting(ctx context.Context, tx sql.Tx, key string) (string, error) { + var value string + err := tx.QueryRow(ctx, "SELECT value FROM settings WHERE `key` = ?", key).Scan((*BusSetting)(&value)) + if errors.Is(err, dsql.ErrNoRows) { + return "", api.ErrSettingNotFound + } else if err != nil { + return "", fmt.Errorf("failed to fetch setting '%s': %w", key, err) + } + return value, nil +} + +func Settings(ctx context.Context, tx sql.Tx) ([]string, error) { + rows, err := tx.Query(ctx, "SELECT `key` FROM settings") + if err != nil { + return nil, fmt.Errorf("failed to query settings: %w", err) + } + var settings []string + for rows.Next() { + var setting string + if err := rows.Scan(&setting); err != nil { + return nil, fmt.Errorf("failed to scan setting key") + } + settings = append(settings, setting) + } + return settings, nil +} + +func SetUncleanShutdown(ctx context.Context, tx sql.Tx) error { + _, err := tx.Exec(ctx, "UPDATE ephemeral_accounts SET clean_shutdown = 0, requires_sync = 1") + if err != nil { + return fmt.Errorf("failed to set unclean shutdown: %w", err) + } + return err +} + +func SlabBuffers(ctx context.Context, tx sql.Tx) (map[string]string, error) { + rows, err := tx.Query(ctx, ` + SELECT buffered_slabs.filename, cs.name + FROM buffered_slabs + INNER JOIN slabs sla ON sla.db_buffered_slab_id = buffered_slabs.id + INNER JOIN contract_sets cs ON cs.id = sla.db_contract_set_id + `) + if err != nil { + return nil, fmt.Errorf("failed to fetch contract sets") + } + defer rows.Close() + + fileNameToContractSet := make(map[string]string) + for rows.Next() { + var fileName string + var contractSetName string + if err := rows.Scan(&fileName, &contractSetName); err != nil { + return nil, fmt.Errorf("failed to scan contract set: %w", err) + } + fileNameToContractSet[fileName] = contractSetName + } + return fileNameToContractSet, nil +} + +func UpdateBucketPolicy(ctx context.Context, tx sql.Tx, bucket string, bp api.BucketPolicy) error { + policy, err := json.Marshal(bp) + if err != nil { + return err + } + res, err := tx.Exec(ctx, "UPDATE buckets SET policy = ? WHERE name = ?", policy, bucket) + if err != nil { + return fmt.Errorf("failed to update bucket policy: %w", err) + } else if n, err := res.RowsAffected(); err != nil { + return fmt.Errorf("failed to check rows affected: %w", err) + } else if n == 0 { + return api.ErrBucketNotFound + } + return nil +} + +func Webhooks(ctx context.Context, tx sql.Tx) ([]webhooks.Webhook, error) { + rows, err := tx.Query(ctx, "SELECT module, event, url, headers FROM webhooks") + if err != nil { + return nil, fmt.Errorf("failed to fetch webhooks: %w", err) + } + defer rows.Close() + + var whs []webhooks.Webhook + for rows.Next() { + var webhook webhooks.Webhook + var headers string + if err := rows.Scan(&webhook.Module, &webhook.Event, &webhook.URL, &headers); err != nil { + return nil, fmt.Errorf("failed to scan webhook: %w", err) + } else if err := json.Unmarshal([]byte(headers), &webhook.Headers); err != nil { + return nil, fmt.Errorf("failed to unmarshal headers: %w", err) + } + whs = append(whs, webhook) + } + return whs, nil +} + +func scanAutopilot(s scanner) (api.Autopilot, error) { + var a api.Autopilot + if err := s.Scan(&a.ID, (*AutopilotConfig)(&a.Config), &a.CurrentPeriod); err != nil { + return api.Autopilot{}, err + } + return a, nil +} + +func scanBucket(s scanner) (api.Bucket, error) { + var createdAt time.Time + var name, policy string + err := s.Scan(&createdAt, &name, &policy) + if errors.Is(err, dsql.ErrNoRows) { + return api.Bucket{}, api.ErrBucketNotFound + } else if err != nil { + return api.Bucket{}, err + } + var bp api.BucketPolicy + if err := json.Unmarshal([]byte(policy), &bp); err != nil { + return api.Bucket{}, err + } + return api.Bucket{ + CreatedAt: api.TimeRFC3339(createdAt), + Name: name, + Policy: bp, + }, nil +} + +func scanMultipartUpload(s scanner) (resp api.MultipartUpload, _ error) { + var key SecretKey + err := s.Scan(&resp.Bucket, &key, &resp.Path, &resp.UploadID, &resp.CreatedAt) + if errors.Is(err, dsql.ErrNoRows) { + return api.MultipartUpload{}, api.ErrMultipartUploadNotFound + } else if err != nil { + return api.MultipartUpload{}, fmt.Errorf("failed to fetch multipart upload: %w", err) + } else if err := resp.Key.UnmarshalBinary(key); err != nil { + return api.MultipartUpload{}, fmt.Errorf("failed to unmarshal encryption key: %w", err) + } + return +} + +func scanObjectMetadata(s scanner) (api.ObjectMetadata, error) { + var md api.ObjectMetadata + if err := s.Scan(&md.Name, &md.Size, &md.Health, &md.MimeType, &md.ModTime, &md.ETag); err != nil { + return api.ObjectMetadata{}, fmt.Errorf("failed to scan object metadata: %w", err) + } + return md, nil +} + +func ListObjects(ctx context.Context, tx sql.Tx, bucket, prefix, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) { + // fetch one more to see if there are more entries + if limit <= -1 { + limit = math.MaxInt + } else { + limit++ + } + + // establish sane defaults for sorting + if sortBy == "" { + sortBy = api.ObjectSortByName + } + if sortDir == "" { + sortDir = api.ObjectSortDirAsc + } + + // filter by bucket + whereExprs := []string{"o.db_bucket_id = (SELECT id FROM buckets b WHERE b.name = ?)"} + whereArgs := []any{bucket} + + // apply prefix + if prefix != "" { + whereExprs = append(whereExprs, "o.object_id LIKE ? AND SUBSTR(o.object_id, 1, ?) = ?") + whereArgs = append(whereArgs, prefix+"%", utf8.RuneCountInString(prefix), prefix) + } + + // apply sorting + dir2SQL := map[string]string{ + api.ObjectSortDirAsc: "ASC", + api.ObjectSortDirDesc: "DESC", + } + if _, ok := dir2SQL[strings.ToLower(sortDir)]; !ok { + return api.ObjectsListResponse{}, fmt.Errorf("invalid sortDir: %v", sortDir) + } + var orderByExprs []string + switch strings.ToLower(sortBy) { + case "", api.ObjectSortByName: + orderByExprs = append(orderByExprs, "o.object_id "+dir2SQL[strings.ToLower(sortDir)]) + case api.ObjectSortByHealth: + orderByExprs = append(orderByExprs, "o.health "+dir2SQL[strings.ToLower(sortDir)]) + case api.ObjectSortBySize: + orderByExprs = append(orderByExprs, "o.size "+dir2SQL[strings.ToLower(sortDir)]) + default: + return api.ObjectsListResponse{}, fmt.Errorf("invalid sortBy: %v", sortBy) + } + + // always sort by object_id as well if we aren't explicitly + if sortBy != api.ObjectSortByName { + orderByExprs = append(orderByExprs, "o.object_id ASC") + } + + // apply marker + queryMarker := func(dst any, marker, col string) error { + err := tx.QueryRow(ctx, fmt.Sprintf(` + SELECT o.%s + FROM objects o + INNER JOIN buckets b ON o.db_bucket_id = b.id + WHERE b.name = ? AND o.object_id = ? + `, col), bucket, marker).Scan(dst) + if errors.Is(err, dsql.ErrNoRows) { + return api.ErrMarkerNotFound + } else { + return err + } + } + desc := strings.ToLower(sortDir) == api.ObjectSortDirDesc + if marker != "" { + switch strings.ToLower(sortBy) { + case api.ObjectSortByName: + if desc { + whereExprs = append(whereExprs, "o.object_id < ?") + } else { + whereExprs = append(whereExprs, "o.object_id > ?") + } + whereArgs = append(whereArgs, marker) + case api.ObjectSortByHealth: + var markerHealth float64 + if err := queryMarker(&markerHealth, marker, "health"); err != nil { + return api.ObjectsListResponse{}, fmt.Errorf("failed to fetch health marker: %w", err) + } else if desc { + whereExprs = append(whereExprs, "((o.health <= ? AND o.object_id >?) OR o.health < ?)") + whereArgs = append(whereArgs, markerHealth, marker, markerHealth) + } else { + whereExprs = append(whereExprs, "(o.health > ? OR (o.health >= ? AND object_id > ?))") + whereArgs = append(whereArgs, markerHealth, markerHealth, marker) + } + case api.ObjectSortBySize: + var markerSize int64 + if err := queryMarker(&markerSize, marker, "size"); err != nil { + return api.ObjectsListResponse{}, fmt.Errorf("failed to fetch health marker: %w", err) + } else if desc { + whereExprs = append(whereExprs, "((o.size <= ? AND o.object_id >?) OR o.size < ?)") + whereArgs = append(whereArgs, markerSize, marker, markerSize) + } else { + whereExprs = append(whereExprs, "(o.size > ? OR (o.size >= ? AND object_id > ?))") + whereArgs = append(whereArgs, markerSize, markerSize, marker) + } + default: + return api.ObjectsListResponse{}, fmt.Errorf("invalid marker: %v", marker) + } + } + + // apply limit + whereArgs = append(whereArgs, limit) + + // run query + rows, err := tx.Query(ctx, fmt.Sprintf(` + SELECT o.object_id, o.size, o.health, o.mime_type, o.created_at, o.etag + FROM objects o + WHERE %s + ORDER BY %s + LIMIT ? + `, + strings.Join(whereExprs, " AND "), + strings.Join(orderByExprs, ", ")), + whereArgs...) + if err != nil { + return api.ObjectsListResponse{}, fmt.Errorf("failed to fetch objects: %w", err) + } + defer rows.Close() + + var objects []api.ObjectMetadata + for rows.Next() { + om, err := scanObjectMetadata(rows) + if err != nil { + return api.ObjectsListResponse{}, fmt.Errorf("failed to scan object metadata: %w", err) + } + objects = append(objects, om) + } + + var hasMore bool + var nextMarker string + if len(objects) == limit { + objects = objects[:len(objects)-1] + if len(objects) > 0 { + hasMore = true + nextMarker = objects[len(objects)-1].Name + } + } + + return api.ObjectsListResponse{ + HasMore: hasMore, + NextMarker: nextMarker, + Objects: objects, + }, nil +} diff --git a/stores/sql/metrics.go b/stores/sql/metrics.go new file mode 100644 index 000000000..689f98843 --- /dev/null +++ b/stores/sql/metrics.go @@ -0,0 +1,621 @@ +package sql + +import ( + "context" + "errors" + "fmt" + "math" + "time" + + "go.sia.tech/core/types" + "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/sql" +) + +const ( + contractMetricGranularity = 5 * time.Minute +) + +type ( + ContractMetricsQueryOpts struct { + api.ContractMetricsQueryOpts + IndexHint string + } +) + +func ContractMetrics(ctx context.Context, tx sql.Tx, start time.Time, n uint64, interval time.Duration, opts ContractMetricsQueryOpts) ([]api.ContractMetric, error) { + // define a helper function to scan a contract metric from a row. + scanContractMetric := func(rows *sql.LoggedRows, aggregate bool) (cm api.ContractMetric, err error) { + var placeHolder int64 + var placeHolderTime time.Time + var placeHolderHK PublicKey + var placeHolderFCID FileContractID + var placeHolderRevisionNumber Unsigned64 + + var timestamp UnixTimeMS + err = rows.Scan( + &placeHolder, + &placeHolderTime, + ×tamp, + &placeHolderFCID, + &placeHolderHK, + (*Unsigned64)(&cm.RemainingCollateral.Lo), (*Unsigned64)(&cm.RemainingCollateral.Hi), + (*Unsigned64)(&cm.RemainingFunds.Lo), (*Unsigned64)(&cm.RemainingFunds.Hi), + &placeHolderRevisionNumber, + (*Unsigned64)(&cm.UploadSpending.Lo), (*Unsigned64)(&cm.UploadSpending.Hi), + (*Unsigned64)(&cm.DownloadSpending.Lo), (*Unsigned64)(&cm.DownloadSpending.Hi), + (*Unsigned64)(&cm.FundAccountSpending.Lo), (*Unsigned64)(&cm.FundAccountSpending.Hi), + (*Unsigned64)(&cm.DeleteSpending.Lo), (*Unsigned64)(&cm.DeleteSpending.Hi), + (*Unsigned64)(&cm.ListSpending.Lo), (*Unsigned64)(&cm.ListSpending.Hi), + ) + if err != nil { + err = fmt.Errorf("failed to scan contract metric: %w", err) + return + } + + cm.Timestamp = api.TimeRFC3339(normaliseTimestamp(start, interval, timestamp)) + if !aggregate { + cm.ContractID = types.FileContractID(placeHolderFCID) + cm.HostKey = types.PublicKey(placeHolderHK) + cm.RevisionNumber = uint64(placeHolderRevisionNumber) + } + return + } + + // if a host filter is set, query periods + if opts.ContractID != (types.FileContractID{}) || opts.HostKey != (types.PublicKey{}) { + return queryPeriods(ctx, tx, start, n, interval, opts.ContractMetricsQueryOpts, func(rows *sql.LoggedRows) (api.ContractMetric, error) { + return scanContractMetric(rows, false) + }) + } + + // otherwise we return the aggregated metrics for each period + return queryAggregatedPeriods(ctx, tx, start, n, interval, opts.IndexHint, func(rows *sql.LoggedRows) (api.ContractMetric, error) { + return scanContractMetric(rows, true) + }) +} + +func ContractPruneMetrics(ctx context.Context, tx sql.Tx, start time.Time, n uint64, interval time.Duration, opts api.ContractPruneMetricsQueryOpts) ([]api.ContractPruneMetric, error) { + return queryPeriods(ctx, tx, start, n, interval, opts, func(rows *sql.LoggedRows) (m api.ContractPruneMetric, err error) { + var placeHolder int64 + var placeHolderTime time.Time + var timestamp UnixTimeMS + err = rows.Scan( + &placeHolder, + &placeHolderTime, + ×tamp, + (*FileContractID)(&m.ContractID), + (*PublicKey)(&m.HostKey), + &m.HostVersion, + (*Unsigned64)(&m.Pruned), + (*Unsigned64)(&m.Remaining), + &m.Duration, + ) + if err != nil { + err = fmt.Errorf("failed to scan contract prune metric: %w", err) + return + } + m.Timestamp = api.TimeRFC3339(normaliseTimestamp(start, interval, timestamp)) + return + }) +} + +func ContractSetChurnMetrics(ctx context.Context, tx sql.Tx, start time.Time, n uint64, interval time.Duration, opts api.ContractSetChurnMetricsQueryOpts) ([]api.ContractSetChurnMetric, error) { + return queryPeriods(ctx, tx, start, n, interval, opts, func(rows *sql.LoggedRows) (m api.ContractSetChurnMetric, err error) { + var placeHolder int64 + var placeHolderTime time.Time + var timestamp UnixTimeMS + err = rows.Scan( + &placeHolder, + &placeHolderTime, + ×tamp, + &m.Name, + (*FileContractID)(&m.ContractID), + &m.Direction, + &m.Reason, + ) + if err != nil { + err = fmt.Errorf("failed to scan contract set churn metric: %w", err) + return + } + m.Timestamp = api.TimeRFC3339(normaliseTimestamp(start, interval, timestamp)) + return + }) +} + +func ContractSetMetrics(ctx context.Context, tx sql.Tx, start time.Time, n uint64, interval time.Duration, opts api.ContractSetMetricsQueryOpts) ([]api.ContractSetMetric, error) { + return queryPeriods(ctx, tx, start, n, interval, opts, func(rows *sql.LoggedRows) (m api.ContractSetMetric, err error) { + var placeHolder int64 + var placeHolderTime time.Time + var timestamp UnixTimeMS + err = rows.Scan( + &placeHolder, + &placeHolderTime, + ×tamp, + &m.Name, + &m.Contracts, + ) + if err != nil { + err = fmt.Errorf("failed to scan contract set metric: %w", err) + return + } + m.Timestamp = api.TimeRFC3339(normaliseTimestamp(start, interval, timestamp)) + return + }) +} + +func PerformanceMetrics(ctx context.Context, tx sql.Tx, start time.Time, n uint64, interval time.Duration, opts api.PerformanceMetricsQueryOpts) ([]api.PerformanceMetric, error) { + return queryPeriods(ctx, tx, start, n, interval, opts, func(rows *sql.LoggedRows) (m api.PerformanceMetric, err error) { + var placeHolder int64 + var placeHolderTime time.Time + var timestamp UnixTimeMS + err = rows.Scan( + &placeHolder, + &placeHolderTime, + ×tamp, + &m.Action, + (*PublicKey)(&m.HostKey), + &m.Origin, + &m.Duration, + ) + if err != nil { + err = fmt.Errorf("failed to scan contract set metric: %w", err) + return + } + m.Timestamp = api.TimeRFC3339(normaliseTimestamp(start, interval, timestamp)) + return + }) +} + +func PruneMetrics(ctx context.Context, tx sql.Tx, metric string, cutoff time.Time) error { + if metric == "" { + return errors.New("metric must be set") + } else if cutoff.IsZero() { + return errors.New("cutoff time must be set") + } + + var table string + switch metric { + case api.MetricContractPrune: + table = "contract_prunes" + case api.MetricContractSet: + table = "contract_sets" + case api.MetricContractSetChurn: + table = "contract_sets_churn" + case api.MetricContract: + table = "contracts" + case api.MetricPerformance: + table = "performance" + case api.MetricWallet: + table = "wallets" + default: + return fmt.Errorf("unknown metric '%s'", metric) + } + _, err := tx.Exec(ctx, fmt.Sprintf("DELETE FROM %s WHERE timestamp < ?", table), UnixTimeMS(cutoff)) + return err +} + +func RecordContractMetric(ctx context.Context, tx sql.Tx, metrics ...api.ContractMetric) error { + insertStmt, err := tx.Prepare(ctx, "INSERT INTO contracts (created_at, timestamp, fcid, host, remaining_collateral_lo, remaining_collateral_hi, remaining_funds_lo, remaining_funds_hi, revision_number, upload_spending_lo, upload_spending_hi, download_spending_lo, download_spending_hi, fund_account_spending_lo, fund_account_spending_hi, delete_spending_lo, delete_spending_hi, list_spending_lo, list_spending_hi) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") + if err != nil { + return fmt.Errorf("failed to prepare statement to insert contract metric: %w", err) + } + defer insertStmt.Close() + + deleteStmt, err := tx.Prepare(ctx, "DELETE FROM contracts WHERE fcid = ? AND timestamp >= ? AND timestamp < ?") + if err != nil { + return fmt.Errorf("failed to prepare statement to delete contract metric: %w", err) + } + + for _, metric := range metrics { + // delete any existing metric for the same contract that has happened + // within the same 5' window by diving the timestamp by 5' and use + // integer division. + intervalStart := metric.Timestamp.Std().Truncate(contractMetricGranularity) + intervalEnd := intervalStart.Add(contractMetricGranularity) + _, err := deleteStmt.Exec(ctx, + FileContractID(metric.ContractID), + UnixTimeMS(intervalStart), + UnixTimeMS(intervalEnd), + ) + + res, err := insertStmt.Exec(ctx, + time.Now().UTC(), + UnixTimeMS(metric.Timestamp), + FileContractID(metric.ContractID), + PublicKey(metric.HostKey), + Unsigned64(metric.RemainingCollateral.Lo), + Unsigned64(metric.RemainingCollateral.Hi), + Unsigned64(metric.RemainingFunds.Lo), + Unsigned64(metric.RemainingFunds.Hi), + Unsigned64(metric.RevisionNumber), + Unsigned64(metric.UploadSpending.Lo), + Unsigned64(metric.UploadSpending.Hi), + Unsigned64(metric.DownloadSpending.Lo), + Unsigned64(metric.DownloadSpending.Hi), + Unsigned64(metric.FundAccountSpending.Lo), + Unsigned64(metric.FundAccountSpending.Hi), + Unsigned64(metric.DeleteSpending.Lo), + Unsigned64(metric.DeleteSpending.Hi), + Unsigned64(metric.ListSpending.Lo), + Unsigned64(metric.ListSpending.Hi), + ) + if err != nil { + return fmt.Errorf("failed to insert contract metric: %w", err) + } else if n, err := res.RowsAffected(); err != nil { + return fmt.Errorf("failed to get rows affected: %w", err) + } else if n == 0 { + return fmt.Errorf("failed to insert contract metric: no rows affected") + } + } + + return nil +} + +func RecordContractPruneMetric(ctx context.Context, tx sql.Tx, metrics ...api.ContractPruneMetric) error { + insertStmt, err := tx.Prepare(ctx, "INSERT INTO contract_prunes (created_at, timestamp, fcid, host, host_version, pruned, remaining, duration) VALUES (?, ?,?, ?, ?, ?, ?, ?)") + if err != nil { + return fmt.Errorf("failed to prepare statement to insert contract prune metric: %w", err) + } + defer insertStmt.Close() + + for _, metric := range metrics { + res, err := insertStmt.Exec(ctx, + time.Now().UTC(), + UnixTimeMS(metric.Timestamp), + FileContractID(metric.ContractID), + PublicKey(metric.HostKey), + metric.HostVersion, + Unsigned64(metric.Pruned), + Unsigned64(metric.Remaining), + metric.Duration, + ) + if err != nil { + return fmt.Errorf("failed to insert contract prune metric: %w", err) + } else if n, err := res.RowsAffected(); err != nil { + return fmt.Errorf("failed to get rows affected: %w", err) + } else if n == 0 { + return fmt.Errorf("failed to insert contract prune metric: no rows affected") + } + } + + return nil +} + +func RecordContractSetChurnMetric(ctx context.Context, tx sql.Tx, metrics ...api.ContractSetChurnMetric) error { + insertStmt, err := tx.Prepare(ctx, "INSERT INTO contract_sets_churn (created_at, timestamp, name, fc_id, direction, reason) VALUES (?, ?, ?, ?, ?, ?)") + if err != nil { + return fmt.Errorf("failed to prepare statement to insert contract set churn metric: %w", err) + } + defer insertStmt.Close() + + for _, metric := range metrics { + res, err := insertStmt.Exec(ctx, + time.Now().UTC(), + UnixTimeMS(metric.Timestamp), + metric.Name, + FileContractID(metric.ContractID), + metric.Direction, + metric.Reason, + ) + if err != nil { + return fmt.Errorf("failed to insert contract set churn metric: %w", err) + } else if n, err := res.RowsAffected(); err != nil { + return fmt.Errorf("failed to get rows affected: %w", err) + } else if n == 0 { + return fmt.Errorf("failed to insert contract set churn metric: no rows affected") + } + } + + return nil +} + +func RecordContractSetMetric(ctx context.Context, tx sql.Tx, metrics ...api.ContractSetMetric) error { + insertStmt, err := tx.Prepare(ctx, "INSERT INTO contract_sets (created_at, timestamp, name, contracts) VALUES (?, ?, ?, ?)") + if err != nil { + return fmt.Errorf("failed to prepare statement to insert contract set metric: %w", err) + } + defer insertStmt.Close() + + for _, metric := range metrics { + res, err := insertStmt.Exec(ctx, + time.Now().UTC(), + UnixTimeMS(metric.Timestamp), + metric.Name, + metric.Contracts, + ) + if err != nil { + return fmt.Errorf("failed to insert contract set metric: %w", err) + } else if n, err := res.RowsAffected(); err != nil { + return fmt.Errorf("failed to get rows affected: %w", err) + } else if n == 0 { + return fmt.Errorf("failed to insert contract set metric: no rows affected") + } + } + + return nil +} + +func RecordPerformanceMetric(ctx context.Context, tx sql.Tx, metrics ...api.PerformanceMetric) error { + insertStmt, err := tx.Prepare(ctx, "INSERT INTO performance (created_at, timestamp, action, host, origin, duration) VALUES (?, ?, ?, ?, ?, ?)") + if err != nil { + return fmt.Errorf("failed to prepare statement to insert performance metric: %w", err) + } + defer insertStmt.Close() + + for _, metric := range metrics { + res, err := insertStmt.Exec(ctx, + time.Now().UTC(), + UnixTimeMS(metric.Timestamp), + metric.Action, + PublicKey(metric.HostKey), + metric.Origin, + metric.Duration, + ) + if err != nil { + return fmt.Errorf("failed to insert performance metric: %w", err) + } else if n, err := res.RowsAffected(); err != nil { + return fmt.Errorf("failed to get rows affected: %w", err) + } else if n == 0 { + return fmt.Errorf("failed to insert performance metric: no rows affected") + } + } + + return nil +} + +func RecordWalletMetric(ctx context.Context, tx sql.Tx, metrics ...api.WalletMetric) error { + insertStmt, err := tx.Prepare(ctx, "INSERT INTO wallets (created_at, timestamp, confirmed_lo, confirmed_hi, spendable_lo, spendable_hi, unconfirmed_lo, unconfirmed_hi) VALUES (?, ?, ?, ?, ?, ?, ?, ?)") + if err != nil { + return fmt.Errorf("failed to prepare statement to insert wallet metric: %w", err) + } + defer insertStmt.Close() + + for _, metric := range metrics { + res, err := insertStmt.Exec(ctx, + time.Now().UTC(), + UnixTimeMS(metric.Timestamp), + Unsigned64(metric.Confirmed.Lo), + Unsigned64(metric.Confirmed.Hi), + Unsigned64(metric.Spendable.Lo), + Unsigned64(metric.Spendable.Hi), + Unsigned64(metric.Unconfirmed.Lo), + Unsigned64(metric.Unconfirmed.Hi), + ) + if err != nil { + return fmt.Errorf("failed to insert wallet metric: %w", err) + } else if n, err := res.RowsAffected(); err != nil { + return fmt.Errorf("failed to get rows affected: %w", err) + } else if n == 0 { + return fmt.Errorf("failed to insert wallet metric: no rows affected") + } + } + + return nil +} + +func WalletMetrics(ctx context.Context, tx sql.Tx, start time.Time, n uint64, interval time.Duration, opts api.WalletMetricsQueryOpts) ([]api.WalletMetric, error) { + return queryPeriods(ctx, tx, start, n, interval, opts, func(rows *sql.LoggedRows) (m api.WalletMetric, err error) { + var placeHolder int64 + var placeHolderTime time.Time + var timestamp UnixTimeMS + err = rows.Scan( + &placeHolder, + &placeHolderTime, + ×tamp, + (*Unsigned64)(&m.Confirmed.Lo), (*Unsigned64)(&m.Confirmed.Hi), + (*Unsigned64)(&m.Spendable.Lo), (*Unsigned64)(&m.Spendable.Hi), + (*Unsigned64)(&m.Unconfirmed.Lo), (*Unsigned64)(&m.Unconfirmed.Hi), + ) + if err != nil { + err = fmt.Errorf("failed to scan contract set metric: %w", err) + return + } + m.Timestamp = api.TimeRFC3339(normaliseTimestamp(start, interval, timestamp)) + return + }) +} + +func queryPeriods[T any](ctx context.Context, tx sql.Tx, start time.Time, n uint64, interval time.Duration, opts interface{}, scanRowFn func(*sql.LoggedRows) (T, error)) ([]T, error) { + if n > api.MetricMaxIntervals { + return nil, api.ErrMaxIntervalsExceeded + } + params := []interface{}{ + UnixTimeMS(start), + interval.Milliseconds(), + UnixTimeMS(start.Add(time.Duration(n) * interval)), + interval.Milliseconds(), + interval.Milliseconds(), + } + + query := "1=1" + var table string + switch opts := opts.(type) { + case api.ContractMetricsQueryOpts: + table = "contracts" + if opts.ContractID != (types.FileContractID{}) { + query += " AND fcid = ?" + params = append(params, FileContractID(opts.ContractID)) + } + if opts.HostKey != (types.PublicKey{}) { + query += " AND host = ?" + params = append(params, PublicKey(opts.HostKey)) + } + case api.ContractPruneMetricsQueryOpts: + table = "contract_prunes" + if opts.ContractID != (types.FileContractID{}) { + query += " AND fcid = ?" + params = append(params, FileContractID(opts.ContractID)) + } + if opts.HostKey != (types.PublicKey{}) { + query += " AND host = ?" + params = append(params, PublicKey(opts.HostKey)) + } + if opts.HostVersion != "" { + query += " AND host_version = ?" + params = append(params, opts.HostVersion) + } + case api.ContractSetChurnMetricsQueryOpts: + table = "contract_sets_churn" + if opts.Name != "" { + query += " AND name = ?" + params = append(params, opts.Name) + } + if opts.Direction != "" { + query += " AND direction = ?" + params = append(params, opts.Direction) + } + if opts.Reason != "" { + query += " AND reason = ?" + params = append(params, opts.Reason) + } + case api.ContractSetMetricsQueryOpts: + table = "contract_sets" + if opts.Name != "" { + query += " AND name = ?" + params = append(params, opts.Name) + } + case api.PerformanceMetricsQueryOpts: + table = "performance" + if opts.Action != "" { + query += " AND action = ?" + params = append(params, opts.Action) + } + if opts.HostKey != (types.PublicKey{}) { + query += " AND host = ?" + params = append(params, PublicKey(opts.HostKey)) + } + if opts.Origin != "" { + query += " AND origin = ?" + params = append(params, opts.Origin) + } + case api.WalletMetricsQueryOpts: + table = "wallets" + default: + return nil, fmt.Errorf("unknown query opts type: %T", opts) + } + + rows, err := tx.Query(ctx, fmt.Sprintf(` + WITH RECURSIVE periods AS ( + SELECT ? AS period_start + UNION ALL + SELECT period_start + ? + FROM periods + WHERE period_start < ? - ? + ) + SELECT %s.* FROM %s + INNER JOIN ( + SELECT + p.period_start as Period, + MIN(obj.id) AS id + FROM + periods p + INNER JOIN + %s obj ON obj.timestamp >= p.period_start AND obj.timestamp < p.period_start + ? + WHERE %s + GROUP BY + p.period_start + ) i ON %s.id = i.id ORDER BY Period ASC + `, table, table, table, query, table), params...) + if err != nil { + return nil, fmt.Errorf("failed to query periods: %w", err) + } + defer rows.Close() + + var result []T + for rows.Next() { + m, err := scanRowFn(rows) + if err != nil { + return nil, fmt.Errorf("failed to scan row: %w", err) + } + result = append(result, m) + } + return result, nil +} + +func queryAggregatedPeriods(ctx context.Context, tx sql.Tx, start time.Time, n uint64, interval time.Duration, indexHint string, scanRowFn func(int64 *sql.LoggedRows) (api.ContractMetric, error)) ([]api.ContractMetric, error) { + if n > api.MetricMaxIntervals { + return nil, api.ErrMaxIntervalsExceeded + } + end := start.Add(time.Duration(n) * interval) + + // fetch distinct contract ids + rows, err := tx.Query(ctx, + "SELECT DISTINCT fcid FROM contracts WHERE contracts.timestamp >= ? AND contracts.timestamp < ?", + UnixTimeMS(start), + UnixTimeMS(end), + ) + if err != nil { + return nil, fmt.Errorf("failed to fetch distinct contract ids: %w", err) + } + defer rows.Close() + + var fcids []FileContractID + for rows.Next() { + var fcid FileContractID + if err := rows.Scan(&fcid); err != nil { + return nil, fmt.Errorf("failed to scan contract id: %w", err) + } + fcids = append(fcids, fcid) + } + + // prepare statement to fetch contract metrics + queryStmt, err := tx.Prepare(ctx, fmt.Sprintf("SELECT * FROM contracts %s WHERE contracts.timestamp >= ? AND contracts.timestamp < ? AND contracts.fcid = ? LIMIT 1", indexHint)) + if err != nil { + return nil, fmt.Errorf("failed to prepare statement to fetch contract metrics: %w", err) + } + defer queryStmt.Close() + + var result []api.ContractMetric + currentPeriod := int64(math.MinInt64) + for intervalStart := start; intervalStart.Before(end); intervalStart = intervalStart.Add(interval) { + intervalEnd := intervalStart.Add(interval) + period := intervalStart.UnixMilli() + for _, fcid := range fcids { + rows, err := queryStmt.Query(ctx, UnixTimeMS(intervalStart), UnixTimeMS(intervalEnd), FileContractID(fcid)) + if err != nil { + return nil, fmt.Errorf("failed to fetch contract metrics: %w", err) + } + for rows.Next() { + m, err := scanRowFn(rows) + if err != nil { + rows.Close() + return nil, fmt.Errorf("failed to scan metric: %w", err) + } + + if period != currentPeriod { + result = append(result, m) + currentPeriod = period + } else { + result[len(result)-1] = aggregateMetrics(result[len(result)-1], m) + } + } + rows.Close() + } + } + + return result, nil +} + +func aggregateMetrics(x, y api.ContractMetric) (out api.ContractMetric) { + out = x + out.RemainingCollateral, _ = out.RemainingCollateral.AddWithOverflow(y.RemainingCollateral) + out.RemainingFunds, _ = out.RemainingFunds.AddWithOverflow(y.RemainingFunds) + out.UploadSpending, _ = out.UploadSpending.AddWithOverflow(y.UploadSpending) + out.DownloadSpending, _ = out.DownloadSpending.AddWithOverflow(y.DownloadSpending) + out.FundAccountSpending, _ = out.FundAccountSpending.AddWithOverflow(y.FundAccountSpending) + out.DeleteSpending, _ = out.DeleteSpending.AddWithOverflow(y.DeleteSpending) + out.ListSpending, _ = out.ListSpending.AddWithOverflow(y.ListSpending) + return +} + +func normaliseTimestamp(start time.Time, interval time.Duration, t UnixTimeMS) UnixTimeMS { + startMS := start.UnixMilli() + toNormaliseMS := time.Time(t).UnixMilli() + intervalMS := interval.Milliseconds() + if startMS > toNormaliseMS { + return UnixTimeMS(start) + } + normalizedMS := (toNormaliseMS-startMS)/intervalMS*intervalMS + start.UnixMilli() + return UnixTimeMS(time.UnixMilli(normalizedMS)) +} diff --git a/stores/sql/mysql/common.go b/stores/sql/mysql/common.go new file mode 100644 index 000000000..fca2749e7 --- /dev/null +++ b/stores/sql/mysql/common.go @@ -0,0 +1,47 @@ +package mysql + +import ( + "context" + dsql "database/sql" + "embed" + "fmt" + + "go.sia.tech/renterd/internal/sql" +) + +var deadlockMsgs = []string{ + "Deadlock found when trying to get lock", +} + +func Open(user, password, addr, dbName string) (*dsql.DB, error) { + return dsql.Open("mysql", fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8mb4&parseTime=True&loc=Local&multiStatements=true", user, password, addr, dbName)) +} + +//go:embed all:migrations/* +var migrationsFs embed.FS + +func applyMigration(ctx context.Context, db *sql.DB, fn func(tx sql.Tx) (bool, error)) error { + return db.Transaction(ctx, func(tx sql.Tx) error { + _, err := fn(tx) + return err + }) +} + +func createMigrationTable(ctx context.Context, db *sql.DB) error { + if _, err := db.Exec(ctx, ` + CREATE TABLE IF NOT EXISTS migrations ( + id varchar(255) NOT NULL, + PRIMARY KEY (id) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci;`); err != nil { + return fmt.Errorf("failed to create migrations table: %w", err) + } + return nil +} + +func version(ctx context.Context, db *sql.DB) (string, string, error) { + var version string + if err := db.QueryRow(ctx, "select version()").Scan(&version); err != nil { + return "", "", err + } + return "MySQL", version, nil +} diff --git a/stores/sql/mysql/main.go b/stores/sql/mysql/main.go new file mode 100644 index 000000000..88ce3cb83 --- /dev/null +++ b/stores/sql/mysql/main.go @@ -0,0 +1,1213 @@ +package mysql + +import ( + "context" + dsql "database/sql" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "strings" + "time" + "unicode/utf8" + + "go.sia.tech/core/types" + "go.sia.tech/renterd/api" + "go.sia.tech/renterd/object" + ssql "go.sia.tech/renterd/stores/sql" + "go.sia.tech/renterd/webhooks" + "go.sia.tech/siad/modules" + "lukechampine.com/frand" + + "go.sia.tech/renterd/internal/sql" + + "go.uber.org/zap" +) + +type ( + MainDatabase struct { + db *sql.DB + log *zap.SugaredLogger + } + + MainDatabaseTx struct { + sql.Tx + log *zap.SugaredLogger + } +) + +// NewMainDatabase creates a new MySQL backend. +func NewMainDatabase(db *dsql.DB, log *zap.SugaredLogger, lqd, ltd time.Duration) (*MainDatabase, error) { + store, err := sql.NewDB(db, log.Desugar(), deadlockMsgs, lqd, ltd) + return &MainDatabase{ + db: store, + log: log, + }, err +} + +func (b *MainDatabase) ApplyMigration(ctx context.Context, fn func(tx sql.Tx) (bool, error)) error { + return applyMigration(ctx, b.db, fn) +} + +func (b *MainDatabase) Close() error { + return b.db.Close() +} + +func (b *MainDatabase) CreateMigrationTable(ctx context.Context) error { + return createMigrationTable(ctx, b.db) +} + +func (b *MainDatabase) DB() *sql.DB { + return b.db +} + +func (b *MainDatabase) LoadSlabBuffers(ctx context.Context) ([]ssql.LoadedSlabBuffer, []string, error) { + return ssql.LoadSlabBuffers(ctx, b.db) +} + +func (b *MainDatabase) MakeDirsForPath(ctx context.Context, tx sql.Tx, path string) (int64, error) { + mtx := b.wrapTxn(tx) + return mtx.MakeDirsForPath(ctx, path) +} + +func (b *MainDatabase) Migrate(ctx context.Context) error { + return sql.PerformMigrations(ctx, b, migrationsFs, "main", sql.MainMigrations(ctx, b, migrationsFs, b.log)) +} + +func (b *MainDatabase) Transaction(ctx context.Context, fn func(tx ssql.DatabaseTx) error) error { + return b.db.Transaction(ctx, func(tx sql.Tx) error { + return fn(b.wrapTxn(tx)) + }) +} + +func (b *MainDatabase) Version(ctx context.Context) (string, string, error) { + return version(ctx, b.db) +} + +func (b *MainDatabase) wrapTxn(tx sql.Tx) *MainDatabaseTx { + return &MainDatabaseTx{tx, b.log.Named(hex.EncodeToString(frand.Bytes(16)))} +} + +func (tx *MainDatabaseTx) Accounts(ctx context.Context) ([]api.Account, error) { + return ssql.Accounts(ctx, tx) +} + +func (tx *MainDatabaseTx) AddMultipartPart(ctx context.Context, bucket, path, contractSet, eTag, uploadID string, partNumber int, slices object.SlabSlices) error { + // fetch contract set + var csID int64 + err := tx.QueryRow(ctx, "SELECT id FROM contract_sets WHERE name = ?", contractSet). + Scan(&csID) + if errors.Is(err, dsql.ErrNoRows) { + return api.ErrContractSetNotFound + } else if err != nil { + return fmt.Errorf("failed to fetch contract set id: %w", err) + } + + // find multipart upload + var muID int64 + err = tx.QueryRow(ctx, "SELECT id FROM multipart_uploads WHERE upload_id = ?", uploadID). + Scan(&muID) + if err != nil { + return fmt.Errorf("failed to fetch multipart upload: %w", err) + } + + // delete a potentially existing part + _, err = tx.Exec(ctx, "DELETE FROM multipart_parts WHERE db_multipart_upload_id = ? AND part_number = ?", + muID, partNumber) + if err != nil { + return fmt.Errorf("failed to delete existing part: %w", err) + } + + // insert new part + var size uint64 + for _, slice := range slices { + size += uint64(slice.Length) + } + var partID int64 + res, err := tx.Exec(ctx, "INSERT INTO multipart_parts (created_at, etag, part_number, size, db_multipart_upload_id) VALUES (?, ?, ?, ?, ?)", + time.Now(), eTag, partNumber, size, muID) + if err != nil { + return fmt.Errorf("failed to insert part: %w", err) + } else if partID, err = res.LastInsertId(); err != nil { + return fmt.Errorf("failed to fetch part id: %w", err) + } + + // create slices + return tx.insertSlabs(ctx, nil, &partID, contractSet, slices) +} + +func (tx *MainDatabaseTx) AbortMultipartUpload(ctx context.Context, bucket, path string, uploadID string) error { + return ssql.AbortMultipartUpload(ctx, tx, bucket, path, uploadID) +} + +func (tx *MainDatabaseTx) AddWebhook(ctx context.Context, wh webhooks.Webhook) error { + headers := "{}" + if len(wh.Headers) > 0 { + h, err := json.Marshal(wh.Headers) + if err != nil { + return fmt.Errorf("failed to marshal headers: %w", err) + } + headers = string(h) + } + _, err := tx.Exec(ctx, "INSERT INTO webhooks (created_at, module, event, url, headers) VALUES (?, ?, ?, ?, ?) ON DUPLICATE KEY UPDATE headers = VALUES(headers)", + time.Now(), wh.Module, wh.Event, wh.URL, headers) + if err != nil { + return fmt.Errorf("failed to insert webhook: %w", err) + } + return nil +} + +func (tx *MainDatabaseTx) AncestorContracts(ctx context.Context, fcid types.FileContractID, startHeight uint64) ([]api.ArchivedContract, error) { + return ssql.AncestorContracts(ctx, tx, fcid, startHeight) +} + +func (tx *MainDatabaseTx) ArchiveContract(ctx context.Context, fcid types.FileContractID, reason string) error { + return ssql.ArchiveContract(ctx, tx, fcid, reason) +} + +func (tx *MainDatabaseTx) Autopilot(ctx context.Context, id string) (api.Autopilot, error) { + return ssql.Autopilot(ctx, tx, id) +} + +func (tx *MainDatabaseTx) Autopilots(ctx context.Context) ([]api.Autopilot, error) { + return ssql.Autopilots(ctx, tx) +} + +func (tx *MainDatabaseTx) Bucket(ctx context.Context, bucket string) (api.Bucket, error) { + return ssql.Bucket(ctx, tx, bucket) +} + +func (tx *MainDatabaseTx) CompleteMultipartUpload(ctx context.Context, bucket, key, uploadID string, parts []api.MultipartCompletedPart, opts api.CompleteMultipartOptions) (string, error) { + mpu, neededParts, size, eTag, err := ssql.MultipartUploadForCompletion(ctx, tx, bucket, key, uploadID, parts) + if err != nil { + return "", fmt.Errorf("failed to fetch multipart upload: %w", err) + } + + // create the directory. + dirID, err := tx.MakeDirsForPath(ctx, key) + if err != nil { + return "", fmt.Errorf("failed to create directory for key %s: %w", key, err) + } + + // create the object + objID, err := ssql.InsertObject(ctx, tx, key, dirID, mpu.BucketID, size, mpu.EC, mpu.MimeType, eTag) + if err != nil { + return "", fmt.Errorf("failed to insert object: %w", err) + } + + // update slices + updateSlicesStmt, err := tx.Prepare(ctx, ` + UPDATE slices s + INNER JOIN multipart_parts mpp ON s.db_multipart_part_id = mpp.id + SET s.db_object_id = ?, + s.db_multipart_part_id = NULL, + s.object_index = s.object_index + ? + WHERE mpp.id = ? + `) + if err != nil { + return "", fmt.Errorf("failed to prepare statement to update slices: %w", err) + } + defer updateSlicesStmt.Close() + + var updatedSlices int64 + for _, part := range neededParts { + res, err := updateSlicesStmt.Exec(ctx, objID, updatedSlices, part.ID) + if err != nil { + return "", fmt.Errorf("failed to update slices: %w", err) + } + n, err := res.RowsAffected() + if err != nil { + return "", fmt.Errorf("failed to get rows affected: %w", err) + } + updatedSlices += n + } + + // create/update metadata + if err := ssql.InsertMetadata(ctx, tx, &objID, nil, opts.Metadata); err != nil { + return "", fmt.Errorf("failed to insert object metadata: %w", err) + } + _, err = tx.Exec(ctx, "UPDATE object_user_metadata SET db_multipart_upload_id = NULL, db_object_id = ? WHERE db_multipart_upload_id = ?", + objID, mpu.ID) + if err != nil { + return "", fmt.Errorf("failed to update object metadata: %w", err) + } + + // delete the multipart upload + if _, err := tx.Exec(ctx, "DELETE FROM multipart_uploads WHERE id = ?", mpu.ID); err != nil { + return "", fmt.Errorf("failed to delete multipart upload: %w", err) + } + + return eTag, nil +} + +func (tx *MainDatabaseTx) ContractRoots(ctx context.Context, fcid types.FileContractID) ([]types.Hash256, error) { + return ssql.ContractRoots(ctx, tx, fcid) +} + +func (tx *MainDatabaseTx) Contracts(ctx context.Context, opts api.ContractsOpts) ([]api.ContractMetadata, error) { + return ssql.Contracts(ctx, tx, opts) +} + +func (tx *MainDatabaseTx) ContractSets(ctx context.Context) ([]string, error) { + return ssql.ContractSets(ctx, tx) +} + +func (tx *MainDatabaseTx) ContractSize(ctx context.Context, id types.FileContractID) (api.ContractSize, error) { + return ssql.ContractSize(ctx, tx, id) +} + +func (tx *MainDatabaseTx) ContractSizes(ctx context.Context) (map[types.FileContractID]api.ContractSize, error) { + return ssql.ContractSizes(ctx, tx) +} + +func (tx *MainDatabaseTx) CopyObject(ctx context.Context, srcBucket, dstBucket, srcKey, dstKey, mimeType string, metadata api.ObjectUserMetadata) (api.ObjectMetadata, error) { + return ssql.CopyObject(ctx, tx, srcBucket, dstBucket, srcKey, dstKey, mimeType, metadata) +} + +func (tx *MainDatabaseTx) CreateBucket(ctx context.Context, bucket string, bp api.BucketPolicy) error { + policy, err := json.Marshal(bp) + if err != nil { + return err + } + res, err := tx.Exec(ctx, "INSERT INTO buckets (created_at, name, policy) VALUES (?, ?, ?) ON DUPLICATE KEY UPDATE id = id", + time.Now(), bucket, policy) + if err != nil { + return fmt.Errorf("failed to create bucket: %w", err) + } else if n, err := res.RowsAffected(); err != nil { + return fmt.Errorf("failed to get rows affected: %w", err) + } else if n == 0 { + return api.ErrBucketExists + } + return nil +} + +func (tx *MainDatabaseTx) DeleteHostSector(ctx context.Context, hk types.PublicKey, root types.Hash256) (int, error) { + return ssql.DeleteHostSector(ctx, tx, hk, root) +} + +func (tx *MainDatabaseTx) InsertBufferedSlab(ctx context.Context, fileName string, contractSetID int64, ec object.EncryptionKey, minShards, totalShards uint8) (int64, error) { + return ssql.InsertBufferedSlab(ctx, tx, fileName, contractSetID, ec, minShards, totalShards) +} + +func (tx *MainDatabaseTx) InsertMultipartUpload(ctx context.Context, bucket, key string, ec object.EncryptionKey, mimeType string, metadata api.ObjectUserMetadata) (string, error) { + return ssql.InsertMultipartUpload(ctx, tx, bucket, key, ec, mimeType, metadata) +} + +func (tx *MainDatabaseTx) DeleteSettings(ctx context.Context, key string) error { + return ssql.DeleteSettings(ctx, tx, key) +} + +func (tx *MainDatabaseTx) DeleteWebhook(ctx context.Context, wh webhooks.Webhook) error { + return ssql.DeleteWebhook(ctx, tx, wh) +} + +func (tx *MainDatabaseTx) DeleteBucket(ctx context.Context, bucket string) error { + return ssql.DeleteBucket(ctx, tx, bucket) +} + +func (tx *MainDatabaseTx) DeleteObject(ctx context.Context, bucket string, key string) (bool, error) { + // check if the object exists first to avoid unnecessary locking for the + // common case + var objID uint + err := tx.QueryRow(ctx, "SELECT id FROM objects WHERE object_id = ? AND db_bucket_id = (SELECT id FROM buckets WHERE buckets.name = ?)", key, bucket).Scan(&objID) + if errors.Is(err, dsql.ErrNoRows) { + return false, nil + } else if err != nil { + return false, err + } + + resp, err := tx.Exec(ctx, "DELETE FROM objects WHERE id = ?", objID) + if err != nil { + return false, err + } else if n, err := resp.RowsAffected(); err != nil { + return false, err + } else { + return n != 0, nil + } +} + +func (tx *MainDatabaseTx) DeleteObjects(ctx context.Context, bucket string, key string, limit int64) (bool, error) { + resp, err := tx.Exec(ctx, ` + DELETE o + FROM objects o + JOIN ( + SELECT id + FROM objects + WHERE object_id LIKE ? AND db_bucket_id = ( + SELECT id FROM buckets WHERE buckets.name = ? + ) + LIMIT ? + ) AS limited ON o.id = limited.id`, + key+"%", bucket, limit) + if err != nil { + return false, err + } else if n, err := resp.RowsAffected(); err != nil { + return false, err + } else { + return n != 0, nil + } +} + +func (tx *MainDatabaseTx) HostAllowlist(ctx context.Context) ([]types.PublicKey, error) { + return ssql.HostAllowlist(ctx, tx) +} + +func (tx *MainDatabaseTx) HostBlocklist(ctx context.Context) ([]string, error) { + return ssql.HostBlocklist(ctx, tx) +} + +func (tx *MainDatabaseTx) HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]api.HostAddress, error) { + return ssql.HostsForScanning(ctx, tx, maxLastScan, offset, limit) +} + +func (tx *MainDatabaseTx) InitConsensusInfo(ctx context.Context) (types.ChainIndex, modules.ConsensusChangeID, error) { + return ssql.InitConsensusInfo(ctx, tx) +} + +func (tx *MainDatabaseTx) InsertObject(ctx context.Context, bucket, key, contractSet string, dirID int64, o object.Object, mimeType, eTag string, md api.ObjectUserMetadata) error { + // get bucket id + var bucketID int64 + err := tx.QueryRow(ctx, "SELECT id FROM buckets WHERE buckets.name = ?", bucket).Scan(&bucketID) + if errors.Is(err, dsql.ErrNoRows) { + return api.ErrBucketNotFound + } else if err != nil { + return fmt.Errorf("failed to fetch bucket id: %w", err) + } + + // insert object + objKey, err := o.Key.MarshalBinary() + if err != nil { + return fmt.Errorf("failed to marshal object key: %w", err) + } + objID, err := ssql.InsertObject(ctx, tx, key, dirID, bucketID, o.TotalSize(), objKey, mimeType, eTag) + if err != nil { + return fmt.Errorf("failed to insert object: %w", err) + } + + // insert slabs + if err := tx.insertSlabs(ctx, &objID, nil, contractSet, o.Slabs); err != nil { + return fmt.Errorf("failed to insert slabs: %w", err) + } + + // insert metadata + if err := ssql.InsertMetadata(ctx, tx, &objID, nil, md); err != nil { + return fmt.Errorf("failed to insert object metadata: %w", err) + } + return nil +} + +func (tx *MainDatabaseTx) InvalidateSlabHealthByFCID(ctx context.Context, fcids []types.FileContractID, limit int64) (int64, error) { + if len(fcids) == 0 { + return 0, nil + } + // prepare args + var args []any + for _, fcid := range fcids { + args = append(args, ssql.FileContractID(fcid)) + } + args = append(args, time.Now().Unix()) + args = append(args, limit) + res, err := tx.Exec(ctx, fmt.Sprintf(` + UPDATE slabs SET health_valid_until = 0 WHERE id in ( + SELECT * + FROM ( + SELECT slabs.id + FROM slabs + INNER JOIN sectors se ON se.db_slab_id = slabs.id + INNER JOIN contract_sectors cs ON cs.db_sector_id = se.id + INNER JOIN contracts c ON c.id = cs.db_contract_id + WHERE c.fcid IN (%s) AND slabs.health_valid_until >= ? + LIMIT ? + ) slab_ids + ) + `, strings.Repeat("?, ", len(fcids)-1)+"?"), args...) + if err != nil { + return 0, err + } + return res.RowsAffected() +} + +func (tx *MainDatabaseTx) ListBuckets(ctx context.Context) ([]api.Bucket, error) { + return ssql.ListBuckets(ctx, tx) +} + +func (tx *MainDatabaseTx) ListObjects(ctx context.Context, bucket, prefix, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) { + return ssql.ListObjects(ctx, tx, bucket, prefix, sortBy, sortDir, marker, limit) +} + +func (tx *MainDatabaseTx) MakeDirsForPath(ctx context.Context, path string) (int64, error) { + // Create root dir. + dirID := int64(sql.DirectoriesRootID) + if _, err := tx.Exec(ctx, "INSERT IGNORE INTO directories (id, name, db_parent_id) VALUES (?, '/', NULL)", dirID); err != nil { + return 0, fmt.Errorf("failed to create root directory: %w", err) + } + + path = strings.TrimSuffix(path, "/") + if path == "/" { + return dirID, nil + } + + // Create remaining directories. + insertDirStmt, err := tx.Prepare(ctx, "INSERT INTO directories (name, db_parent_id) VALUES (?, ?) ON DUPLICATE KEY UPDATE id = last_insert_id(id)") + if err != nil { + return 0, fmt.Errorf("failed to prepare statement to insert dir: %w", err) + } + defer insertDirStmt.Close() + + for i := 0; i < utf8.RuneCountInString(path); i++ { + if path[i] != '/' { + continue + } + dir := path[:i+1] + if dir == "/" { + continue + } + if res, err := insertDirStmt.Exec(ctx, dir, dirID); err != nil { + return 0, fmt.Errorf("failed to create directory %v: %w", dir, err) + } else if dirID, err = res.LastInsertId(); err != nil { + return 0, fmt.Errorf("failed to fetch directory id %v: %w", dir, err) + } + } + return dirID, nil +} + +func (tx *MainDatabaseTx) MultipartUpload(ctx context.Context, uploadID string) (api.MultipartUpload, error) { + return ssql.MultipartUpload(ctx, tx, uploadID) +} + +func (tx *MainDatabaseTx) MultipartUploadParts(ctx context.Context, bucket, key, uploadID string, marker int, limit int64) (api.MultipartListPartsResponse, error) { + return ssql.MultipartUploadParts(ctx, tx, bucket, key, uploadID, marker, limit) +} + +func (tx *MainDatabaseTx) MultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker string, limit int) (api.MultipartListUploadsResponse, error) { + return ssql.MultipartUploads(ctx, tx, bucket, prefix, keyMarker, uploadIDMarker, limit) +} + +func (tx *MainDatabaseTx) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) (api.ObjectsStatsResponse, error) { + return ssql.ObjectsStats(ctx, tx, opts) +} + +func (tx *MainDatabaseTx) PruneEmptydirs(ctx context.Context) error { + stmt, err := tx.Prepare(ctx, ` + DELETE + FROM directories + WHERE directories.id != 1 + AND NOT EXISTS (SELECT 1 FROM objects WHERE objects.db_directory_id = directories.id) + AND NOT EXISTS (SELECT 1 FROM (SELECT 1 FROM directories AS d WHERE d.db_parent_id = directories.id) i) + `) + if err != nil { + return err + } + defer stmt.Close() + for { + res, err := stmt.Exec(ctx) + if err != nil { + return err + } else if n, err := res.RowsAffected(); err != nil { + return err + } else if n == 0 { + return nil + } + } +} + +func (tx *MainDatabaseTx) PruneSlabs(ctx context.Context, limit int64) (int64, error) { + res, err := tx.Exec(ctx, ` + DELETE FROM slabs + WHERE id IN ( + SELECT id + FROM ( + SELECT slabs.id + FROM slabs + WHERE NOT EXISTS ( + SELECT 1 FROM slices WHERE slices.db_slab_id = slabs.id + ) + AND slabs.db_buffered_slab_id IS NULL + LIMIT ? + ) AS limited + )`, limit) + if err != nil { + return 0, err + } + return res.RowsAffected() +} + +func (tx *MainDatabaseTx) RecordHostScans(ctx context.Context, scans []api.HostScan) error { + return ssql.RecordHostScans(ctx, tx, scans) +} + +func (tx *MainDatabaseTx) RecordPriceTables(ctx context.Context, priceTableUpdates []api.HostPriceTableUpdate) error { + return ssql.RecordPriceTables(ctx, tx, priceTableUpdates) +} + +func (tx *MainDatabaseTx) RemoveContractSet(ctx context.Context, contractSet string) error { + return ssql.RemoveContractSet(ctx, tx, contractSet) +} + +func (tx *MainDatabaseTx) RemoveOfflineHosts(ctx context.Context, minRecentFailures uint64, maxDownTime time.Duration) (int64, error) { + return ssql.RemoveOfflineHosts(ctx, tx, minRecentFailures, maxDownTime) +} + +func (tx *MainDatabaseTx) RenameObject(ctx context.Context, bucket, keyOld, keyNew string, dirID int64, force bool) error { + if force { + // delete potentially existing object at destination + if _, err := tx.DeleteObject(ctx, bucket, keyNew); err != nil { + return fmt.Errorf("RenameObject: failed to delete object: %w", err) + } + } else { + var exists bool + if err := tx.QueryRow(ctx, "SELECT EXISTS (SELECT 1 FROM objects WHERE object_id = ? AND db_bucket_id = (SELECT id FROM buckets WHERE buckets.name = ?))", keyNew, bucket).Scan(&exists); err != nil { + return err + } else if exists { + return api.ErrObjectExists + } + } + resp, err := tx.Exec(ctx, `UPDATE objects SET object_id = ?, db_directory_id = ? WHERE object_id = ? AND db_bucket_id = (SELECT id FROM buckets WHERE buckets.name = ?)`, keyNew, dirID, keyOld, bucket) + if err != nil { + return err + } else if n, err := resp.RowsAffected(); err != nil { + return err + } else if n == 0 { + return fmt.Errorf("%w: key %v", api.ErrObjectNotFound, keyOld) + } + return nil +} + +func (tx *MainDatabaseTx) RenameObjects(ctx context.Context, bucket, prefixOld, prefixNew string, dirID int64, force bool) error { + if force { + _, err := tx.Exec(ctx, ` + DELETE + FROM objects + WHERE object_id IN ( + SELECT * + FROM ( + SELECT CONCAT(?, SUBSTR(object_id, ?)) + FROM objects + WHERE object_id LIKE ? + AND db_bucket_id = (SELECT id FROM buckets WHERE buckets.name = ?) + ) as i + )`, + prefixNew, + utf8.RuneCountInString(prefixOld)+1, + prefixOld+"%", + bucket) + if err != nil { + return err + } + } + resp, err := tx.Exec(ctx, ` + UPDATE objects + SET object_id = CONCAT(?, SUBSTR(object_id, ?)), + db_directory_id = ? + WHERE object_id LIKE ? + AND db_bucket_id = (SELECT id FROM buckets WHERE buckets.name = ?)`, + prefixNew, utf8.RuneCountInString(prefixOld)+1, + dirID, + prefixOld+"%", + bucket) + if err != nil && strings.Contains(err.Error(), "Duplicate entry") { + return api.ErrObjectExists + } else if err != nil { + return err + } else if n, err := resp.RowsAffected(); err != nil { + return err + } else if n == 0 { + return fmt.Errorf("%w: prefix %v", api.ErrObjectNotFound, prefixOld) + } + return nil +} + +func (tx *MainDatabaseTx) RenewedContract(ctx context.Context, renwedFrom types.FileContractID) (api.ContractMetadata, error) { + return ssql.RenewedContract(ctx, tx, renwedFrom) +} + +func (tx *MainDatabaseTx) ResetConsensusSubscription(ctx context.Context) (types.ChainIndex, error) { + return ssql.ResetConsensusSubscription(ctx, tx) +} + +func (tx *MainDatabaseTx) ResetLostSectors(ctx context.Context, hk types.PublicKey) error { + return ssql.ResetLostSectors(ctx, tx, hk) +} + +func (tx MainDatabaseTx) SaveAccounts(ctx context.Context, accounts []api.Account) error { + // clean_shutdown = 1 after save + stmt, err := tx.Prepare(ctx, ` + INSERT INTO ephemeral_accounts (created_at, account_id, clean_shutdown, host, balance, drift, requires_sync) + VAlUES (?, ?, 1, ?, ?, ?, ?) + ON DUPLICATE KEY UPDATE + account_id = VALUES(account_id), + clean_shutdown = 1, + host = VALUES(host), + balance = VALUES(balance), + drift = VALUES(drift), + requires_sync = VALUES(requires_sync) + `) + if err != nil { + return err + } + defer stmt.Close() + + for _, acc := range accounts { + res, err := stmt.Exec(ctx, time.Now(), (ssql.PublicKey)(acc.ID), (ssql.PublicKey)(acc.HostKey), (*ssql.BigInt)(acc.Balance), (*ssql.BigInt)(acc.Drift), acc.RequiresSync) + if err != nil { + return fmt.Errorf("failed to insert account %v: %w", acc.ID, err) + } else if n, err := res.RowsAffected(); err != nil { + return fmt.Errorf("failed to get rows affected: %w", err) + } else if n != 1 && n != 2 { // 1 for insert, 2 for update + return fmt.Errorf("expected 1 row affected, got %v", n) + } + } + return nil +} + +func (tx *MainDatabaseTx) SearchHosts(ctx context.Context, autopilotID, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) { + return ssql.SearchHosts(ctx, tx, autopilotID, filterMode, usabilityMode, addressContains, keyIn, offset, limit) +} + +func (tx *MainDatabaseTx) Setting(ctx context.Context, key string) (string, error) { + return ssql.Setting(ctx, tx, key) +} + +func (tx *MainDatabaseTx) Settings(ctx context.Context) ([]string, error) { + return ssql.Settings(ctx, tx) +} + +func (tx *MainDatabaseTx) SetUncleanShutdown(ctx context.Context) error { + return ssql.SetUncleanShutdown(ctx, tx) +} + +func (tx *MainDatabaseTx) SlabBuffers(ctx context.Context) (map[string]string, error) { + return ssql.SlabBuffers(ctx, tx) +} + +func (tx *MainDatabaseTx) UpdateAutopilot(ctx context.Context, ap api.Autopilot) error { + res, err := tx.Exec(ctx, ` + INSERT INTO autopilots (created_at, identifier, config, current_period) + VALUES (?, ?, ?, ?) + ON DUPLICATE KEY UPDATE + config = VALUES(config), + current_period = VALUES(current_period) + `, time.Now(), ap.ID, (*ssql.AutopilotConfig)(&ap.Config), ap.CurrentPeriod) + if err != nil { + return err + } else if n, err := res.RowsAffected(); err != nil { + return err + } else if n != 1 && n != 2 { // 1 if inserted, 2 if updated + return fmt.Errorf("expected 1 row affected, got %v", n) + } + return nil +} + +func (tx *MainDatabaseTx) UpdateBucketPolicy(ctx context.Context, bucket string, bp api.BucketPolicy) error { + return ssql.UpdateBucketPolicy(ctx, tx, bucket, bp) +} + +func (tx *MainDatabaseTx) UpdateHostAllowlistEntries(ctx context.Context, add, remove []types.PublicKey, clear bool) error { + if clear { + if _, err := tx.Exec(ctx, "DELETE FROM host_allowlist_entries"); err != nil { + return fmt.Errorf("failed to clear host allowlist entries: %w", err) + } + } + + if len(add) > 0 { + insertStmt, err := tx.Prepare(ctx, "INSERT INTO host_allowlist_entries (entry) VALUES (?) ON DUPLICATE KEY UPDATE id = last_insert_id(id)") + if err != nil { + return fmt.Errorf("failed to prepare insert statement: %w", err) + } + defer insertStmt.Close() + joinStmt, err := tx.Prepare(ctx, ` + INSERT IGNORE INTO host_allowlist_entry_hosts (db_allowlist_entry_id, db_host_id) + SELECT ?, id FROM ( + SELECT id + FROM hosts + WHERE public_key = ? + ) AS _`) + if err != nil { + return fmt.Errorf("failed to prepare join statement: %w", err) + } + defer joinStmt.Close() + + for _, pk := range add { + if res, err := insertStmt.Exec(ctx, ssql.PublicKey(pk)); err != nil { + return fmt.Errorf("failed to insert host allowlist entry: %w", err) + } else if entryID, err := res.LastInsertId(); err != nil { + return fmt.Errorf("failed to fetch host allowlist entry id: %w", err) + } else if _, err := joinStmt.Exec(ctx, entryID, ssql.PublicKey(pk)); err != nil { + return fmt.Errorf("failed to join host allowlist entry: %w", err) + } + } + } + + if !clear && len(remove) > 0 { + deleteStmt, err := tx.Prepare(ctx, "DELETE FROM host_allowlist_entries WHERE entry = ?") + if err != nil { + return fmt.Errorf("failed to prepare delete statement: %w", err) + } + defer deleteStmt.Close() + + for _, pk := range remove { + if _, err := deleteStmt.Exec(ctx, ssql.PublicKey(pk)); err != nil { + return fmt.Errorf("failed to delete host allowlist entry: %w", err) + } + } + } + return nil +} + +func (tx *MainDatabaseTx) UpdateHostBlocklistEntries(ctx context.Context, add, remove []string, clear bool) error { + if clear { + if _, err := tx.Exec(ctx, "DELETE FROM host_blocklist_entries"); err != nil { + return fmt.Errorf("failed to clear host blocklist entries: %w", err) + } + } + + if len(add) > 0 { + insertStmt, err := tx.Prepare(ctx, "INSERT INTO host_blocklist_entries (entry) VALUES (?) ON DUPLICATE KEY UPDATE id = last_insert_id(id)") + if err != nil { + return fmt.Errorf("failed to prepare insert statement: %w", err) + } + defer insertStmt.Close() + joinStmt, err := tx.Prepare(ctx, ` + INSERT IGNORE INTO host_blocklist_entry_hosts (db_blocklist_entry_id, db_host_id) + SELECT ?, id FROM ( + SELECT id + FROM hosts + WHERE net_address=? OR + SUBSTRING_INDEX(net_address,':',1) = ? OR + SUBSTRING_INDEX(net_address,':',1) LIKE ? + ) AS _ + `) + if err != nil { + return fmt.Errorf("failed to prepare join statement: %w", err) + } + defer joinStmt.Close() + + for _, entry := range add { + if res, err := insertStmt.Exec(ctx, entry); err != nil { + return fmt.Errorf("failed to insert host blocklist entry: %w", err) + } else if entryID, err := res.LastInsertId(); err != nil { + return fmt.Errorf("failed to fetch host blocklist entry id: %w", err) + } else if _, err := joinStmt.Exec(ctx, entryID, entry, entry, fmt.Sprintf("%%.%s", entry)); err != nil { + return fmt.Errorf("failed to join host blocklist entry: %w", err) + } + } + } + + if !clear && len(remove) > 0 { + deleteStmt, err := tx.Prepare(ctx, "DELETE FROM host_blocklist_entries WHERE entry = ?") + if err != nil { + return fmt.Errorf("failed to prepare delete statement: %w", err) + } + defer deleteStmt.Close() + + for _, entry := range remove { + if _, err := deleteStmt.Exec(ctx, entry); err != nil { + return fmt.Errorf("failed to delete host blocklist entry: %w", err) + } + } + } + return nil +} + +func (tx *MainDatabaseTx) UpdateHostCheck(ctx context.Context, autopilot string, hk types.PublicKey, hc api.HostCheck) error { + _, err := tx.Exec(ctx, ` + INSERT INTO host_checks (created_at, db_autopilot_id, db_host_id, usability_blocked, usability_offline, usability_low_score, + usability_redundant_ip, usability_gouging, usability_not_accepting_contracts, usability_not_announced, usability_not_completing_scan, + score_age, score_collateral, score_interactions, score_storage_remaining, score_uptime, score_version, score_prices, + gouging_contract_err, gouging_download_err, gouging_gouging_err, gouging_prune_err, gouging_upload_err) + VALUES (?, + (SELECT id FROM autopilots WHERE identifier = ?), + (SELECT id FROM hosts WHERE public_key = ?), + ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + ON DUPLICATE KEY UPDATE + created_at = VALUES(created_at), db_autopilot_id = VALUES(db_autopilot_id), db_host_id = VALUES(db_host_id), + usability_blocked = VALUES(usability_blocked), usability_offline = VALUES(usability_offline), usability_low_score = VALUES(usability_low_score), + usability_redundant_ip = VALUES(usability_redundant_ip), usability_gouging = VALUES(usability_gouging), usability_not_accepting_contracts = VALUES(usability_not_accepting_contracts), + usability_not_announced = VALUES(usability_not_announced), usability_not_completing_scan = VALUES(usability_not_completing_scan), + score_age = VALUES(score_age), score_collateral = VALUES(score_collateral), score_interactions = VALUES(score_interactions), + score_storage_remaining = VALUES(score_storage_remaining), score_uptime = VALUES(score_uptime), score_version = VALUES(score_version), + score_prices = VALUES(score_prices), gouging_contract_err = VALUES(gouging_contract_err), gouging_download_err = VALUES(gouging_download_err), + gouging_gouging_err = VALUES(gouging_gouging_err), gouging_prune_err = VALUES(gouging_prune_err), gouging_upload_err = VALUES(gouging_upload_err) + `, time.Now(), autopilot, ssql.PublicKey(hk), hc.Usability.Blocked, hc.Usability.Offline, hc.Usability.LowScore, + hc.Usability.RedundantIP, hc.Usability.Gouging, hc.Usability.NotAcceptingContracts, hc.Usability.NotAnnounced, hc.Usability.NotCompletingScan, + hc.Score.Age, hc.Score.Collateral, hc.Score.Interactions, hc.Score.StorageRemaining, hc.Score.Uptime, hc.Score.Version, hc.Score.Prices, + hc.Gouging.ContractErr, hc.Gouging.DownloadErr, hc.Gouging.GougingErr, hc.Gouging.PruneErr, hc.Gouging.UploadErr, + ) + if err != nil { + return fmt.Errorf("failed to insert host check: %w", err) + } + return nil +} + +func (tx *MainDatabaseTx) UpdateSetting(ctx context.Context, key, value string) error { + _, err := tx.Exec(ctx, "INSERT INTO settings (created_at, `key`, value) VALUES (?, ?, ?) ON DUPLICATE KEY UPDATE value = VALUES(value)", + time.Now(), key, value) + if err != nil { + return fmt.Errorf("failed to update setting '%s': %w", key, err) + } + return nil +} + +func (tx *MainDatabaseTx) UpdateSlab(ctx context.Context, s object.Slab, contractSet string, fcids []types.FileContractID) error { + // find all used contracts + usedContracts, err := ssql.FetchUsedContracts(ctx, tx, fcids) + if err != nil { + return fmt.Errorf("failed to fetch used contracts: %w", err) + } + + // extract the slab key + key, err := s.Key.MarshalBinary() + if err != nil { + return fmt.Errorf("failed to marshal slab key: %w", err) + } + + // update slab + res, err := tx.Exec(ctx, ` + UPDATE slabs + SET db_contract_set_id = (SELECT id FROM contract_sets WHERE name = ?), + health_valid_until = ?, + health = ? + WHERE `+"`key`"+` = ? + `, contractSet, time.Now().Unix(), 1, ssql.SecretKey(key)) + if err != nil { + return err + } else if n, err := res.RowsAffected(); err != nil { + return err + } else if n == 0 { + return fmt.Errorf("%w: slab with key '%s' not found: %w", api.ErrSlabNotFound, string(key), err) + } + + // fetch slab id and total shards + var slabID, totalShards int64 + err = tx.QueryRow(ctx, "SELECT id, total_shards FROM slabs WHERE `key` = ?", ssql.SecretKey(key)). + Scan(&slabID, &totalShards) + if err != nil { + return err + } + + // find shards of slab + var roots []types.Hash256 + rows, err := tx.Query(ctx, "SELECT root FROM sectors WHERE db_slab_id = ? ORDER BY sectors.slab_index ASC", slabID) + if err != nil { + return fmt.Errorf("failed to fetch sectors: %w", err) + } + defer rows.Close() + + for rows.Next() { + var root ssql.Hash256 + if err := rows.Scan(&root); err != nil { + return fmt.Errorf("failed to scan sector id: %w", err) + } + roots = append(roots, types.Hash256(root)) + } + nSectors := len(roots) + + // make sure the number of shards doesn't change. + // NOTE: check both the slice as well as the TotalShards field to be + // safe. + if len(s.Shards) != int(totalShards) { + return fmt.Errorf("%w: expected %v shards (TotalShards) but got %v", sql.ErrInvalidNumberOfShards, totalShards, len(s.Shards)) + } else if len(s.Shards) != nSectors { + return fmt.Errorf("%w: expected %v shards (Shards) but got %v", sql.ErrInvalidNumberOfShards, nSectors, len(s.Shards)) + } + + // make sure the roots stay the same. + for i, root := range roots { + if root != types.Hash256(s.Shards[i].Root) { + return fmt.Errorf("%w: shard %v has changed root from %v to %v", sql.ErrShardRootChanged, i, s.Shards[i].Root, root[:]) + } + } + + // update sectors + var upsertSectors []upsertSector + for i := range s.Shards { + upsertSectors = append(upsertSectors, upsertSector{ + slabID, + i + 1, + s.Shards[i].LatestHost, + s.Shards[i].Root, + }) + } + sectorIDs, err := tx.upsertSectors(ctx, upsertSectors) + if err != nil { + return fmt.Errorf("failed to insert sectors: %w", err) + } + + // build contract <-> sector links + var upsertContractSectors []upsertContractSector + for i, shard := range s.Shards { + sectorID := sectorIDs[i] + + // ensure the associations are updated + for _, fcids := range shard.Contracts { + for _, fcid := range fcids { + if _, ok := usedContracts[fcid]; ok { + upsertContractSectors = append(upsertContractSectors, upsertContractSector{ + sectorID, + usedContracts[fcid].ID, + }) + } else { + tx.log.Named("UpdateSlab").Warn("missing contract for shard", + "contract", fcid, + "root", shard.Root, + "latest_host", shard.LatestHost, + ) + } + } + } + } + if err := tx.upsertContractSectors(ctx, upsertContractSectors); err != nil { + return err + } + + return nil +} + +func (tx *MainDatabaseTx) UpdateSlabHealth(ctx context.Context, limit int64, minDuration, maxDuration time.Duration) (int64, error) { + now := time.Now() + if err := ssql.PrepareSlabHealth(ctx, tx, limit, now); err != nil { + return 0, fmt.Errorf("failed to compute slab health: %w", err) + } + + res, err := tx.Exec(ctx, "UPDATE slabs sla INNER JOIN slabs_health h ON sla.id = h.id SET sla.health = h.health, health_valid_until = (FLOOR(? + RAND() * (? - ?)))", + now.Add(minDuration).Unix(), maxDuration.Seconds(), minDuration.Seconds()) + if err != nil { + return 0, fmt.Errorf("failed to update slab health: %w", err) + } + + _, err = tx.Exec(ctx, ` + UPDATE objects o + INNER JOIN ( + SELECT sli.db_object_id as id, MIN(sla.health) as health + FROM slabs sla + INNER JOIN slices sli ON sli.db_slab_id = sla.id + GROUP BY sli.db_object_id + ) AS object_health ON object_health.id = o.id + SET o.health = object_health.health + WHERE EXISTS ( + SELECT 1 + FROM slabs_health h + INNER JOIN slices ON slices.db_slab_id = h.id + WHERE slices.db_object_id = o.id + ) + `) + if err != nil { + return 0, fmt.Errorf("failed to update object health: %w", err) + } + return res.RowsAffected() +} + +func (tx *MainDatabaseTx) Webhooks(ctx context.Context) ([]webhooks.Webhook, error) { + return ssql.Webhooks(ctx, tx) +} + +func (tx *MainDatabaseTx) insertSlabs(ctx context.Context, objID, partID *int64, contractSet string, slices object.SlabSlices) error { + if (objID == nil) == (partID == nil) { + return errors.New("exactly one of objID and partID must be set") + } else if len(slices) == 0 { + return nil // nothing to do + } + + usedContracts, err := ssql.FetchUsedContracts(ctx, tx.Tx, slices.Contracts()) + if err != nil { + return fmt.Errorf("failed to fetch used contracts: %w", err) + } + + // get contract set id + var contractSetID int64 + if err := tx.QueryRow(ctx, "SELECT id FROM contract_sets WHERE contract_sets.name = ?", contractSet). + Scan(&contractSetID); err != nil { + return fmt.Errorf("failed to fetch contract set id: %w", err) + } + + // insert slabs + insertSlabStmt, err := tx.Prepare(ctx, `INSERT INTO slabs (created_at, db_contract_set_id, `+"`key`"+`, min_shards, total_shards) + VALUES (?, ?, ?, ?, ?) + ON DUPLICATE KEY UPDATE id = last_insert_id(id)`) + if err != nil { + return fmt.Errorf("failed to prepare statement to insert slab: %w", err) + } + defer insertSlabStmt.Close() + + querySlabIDStmt, err := tx.Prepare(ctx, "SELECT id FROM slabs WHERE `key` = ?") + if err != nil { + return fmt.Errorf("failed to prepare statement to query slab id: %w", err) + } + defer querySlabIDStmt.Close() + + slabIDs := make([]int64, len(slices)) + for i := range slices { + slabKey, err := slices[i].Key.MarshalBinary() + if err != nil { + return fmt.Errorf("failed to marshal slab key: %w", err) + } + res, err := insertSlabStmt.Exec(ctx, + time.Now(), + contractSetID, + ssql.SecretKey(slabKey), + slices[i].MinShards, + uint8(len(slices[i].Shards)), + ) + if err != nil { + return fmt.Errorf("failed to insert slab: %w", err) + } + slabIDs[i], err = res.LastInsertId() + if err != nil { + return fmt.Errorf("failed to fetch slab id: %w", err) + } + } + + // insert slices + insertSliceStmt, err := tx.Prepare(ctx, `INSERT INTO slices (created_at, db_object_id, object_index, db_multipart_part_id, db_slab_id, offset, length) + VALUES (?, ?, ?, ?, ?, ?, ?)`) + if err != nil { + return fmt.Errorf("failed to prepare statement to insert slice: %w", err) + } + defer insertSliceStmt.Close() + + for i := range slices { + res, err := insertSliceStmt.Exec(ctx, + time.Now(), + objID, + uint(i+1), + partID, + slabIDs[i], + slices[i].Offset, + slices[i].Length, + ) + if err != nil { + return fmt.Errorf("failed to insert slice: %w", err) + } else if n, err := res.RowsAffected(); err != nil { + return fmt.Errorf("failed to get rows affected: %w", err) + } else if n == 0 { + return fmt.Errorf("failed to insert slice: no rows affected") + } + } + + // insert sectors + var upsertSectors []upsertSector + for i, ss := range slices { + for j := range ss.Shards { + upsertSectors = append(upsertSectors, upsertSector{ + slabIDs[i], + j + 1, + ss.Shards[j].LatestHost, + ss.Shards[j].Root, + }) + } + } + sectorIDs, err := tx.upsertSectors(ctx, upsertSectors) + if err != nil { + return fmt.Errorf("failed to insert sectors: %w", err) + } + + // insert contract <-> sector links + sectorIdx := 0 + var upsertContractSectors []upsertContractSector + for _, ss := range slices { + for _, shard := range ss.Shards { + for _, fcids := range shard.Contracts { + for _, fcid := range fcids { + if _, ok := usedContracts[fcid]; ok { + upsertContractSectors = append(upsertContractSectors, upsertContractSector{ + sectorIDs[sectorIdx], + usedContracts[fcid].ID, + }) + } else { + tx.log.Named("InsertObject").Warn("missing contract for shard", + "contract", fcid, + "root", shard.Root, + "latest_host", shard.LatestHost, + ) + } + } + } + sectorIdx++ + } + } + if err := tx.upsertContractSectors(ctx, upsertContractSectors); err != nil { + return err + } + return nil +} + +type upsertContractSector struct { + sectorID int64 + contractID int64 +} + +func (tx *MainDatabaseTx) upsertContractSectors(ctx context.Context, contractSectors []upsertContractSector) error { + if len(contractSectors) == 0 { + return nil + } + + // insert contract <-> sector links + insertContractSectorStmt, err := tx.Prepare(ctx, `INSERT IGNORE INTO contract_sectors (db_sector_id, db_contract_id) + VALUES (?, ?)`) + if err != nil { + return fmt.Errorf("failed to prepare statement to insert contract sector link: %w", err) + } + defer insertContractSectorStmt.Close() + + for _, cs := range contractSectors { + _, err := insertContractSectorStmt.Exec(ctx, + cs.sectorID, + cs.contractID, + ) + if err != nil { + return fmt.Errorf("failed to insert contract sector link: %w", err) + } + } + return nil +} + +type upsertSector struct { + slabID int64 + slabIndex int + latestHost types.PublicKey + root types.Hash256 +} + +func (tx *MainDatabaseTx) upsertSectors(ctx context.Context, sectors []upsertSector) ([]int64, error) { + if len(sectors) == 0 { + return nil, nil + } + + // insert sectors - make sure to update last_insert_id in case of a + // duplicate key to be able to retrieve the id + insertSectorStmt, err := tx.Prepare(ctx, `INSERT INTO sectors (created_at, db_slab_id, slab_index, latest_host, root) + VALUES (?, ?, ?, ?, ?) ON DUPLICATE KEY UPDATE latest_host = VALUES(latest_host), id = last_insert_id(id)`) + if err != nil { + return nil, fmt.Errorf("failed to prepare statement to insert sector: %w", err) + } + defer insertSectorStmt.Close() + + querySectorSlabIDStmt, err := tx.Prepare(ctx, "SELECT db_slab_id FROM sectors WHERE id = ?") + if err != nil { + return nil, fmt.Errorf("failed to prepare statement to query slab id: %w", err) + } + defer querySectorSlabIDStmt.Close() + + var sectorIDs []int64 + for _, s := range sectors { + var sectorID, slabID int64 + res, err := insertSectorStmt.Exec(ctx, + time.Now(), + s.slabID, + s.slabIndex, + ssql.PublicKey(s.latestHost), + s.root[:], + ) + if err != nil { + return nil, fmt.Errorf("failed to insert sector: %w", err) + } else if sectorID, err = res.LastInsertId(); err != nil { + return nil, fmt.Errorf("failed to fetch sector id: %w", err) + } else if err := querySectorSlabIDStmt.QueryRow(ctx, sectorID).Scan(&slabID); err != nil { + return nil, fmt.Errorf("failed to fetch slab id: %w", err) + } else if slabID != s.slabID { + return nil, fmt.Errorf("failed to insert sector for slab %v: already exists for slab %v", s.slabID, slabID) + } + sectorIDs = append(sectorIDs, sectorID) + } + return sectorIDs, nil +} diff --git a/stores/sql/mysql/metrics.go b/stores/sql/mysql/metrics.go new file mode 100644 index 000000000..dd51f228e --- /dev/null +++ b/stores/sql/mysql/metrics.go @@ -0,0 +1,125 @@ +package mysql + +import ( + "context" + "encoding/hex" + "time" + + dsql "database/sql" + + "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/sql" + ssql "go.sia.tech/renterd/stores/sql" + "lukechampine.com/frand" + + "go.uber.org/zap" +) + +type ( + MetricsDatabase struct { + log *zap.SugaredLogger + db *sql.DB + } + + MetricsDatabaseTx struct { + sql.Tx + log *zap.SugaredLogger + } +) + +var _ ssql.MetricsDatabaseTx = (*MetricsDatabaseTx)(nil) + +// NewMetricsDatabase creates a new MySQL backend. +func NewMetricsDatabase(db *dsql.DB, log *zap.SugaredLogger, lqd, ltd time.Duration) (*MetricsDatabase, error) { + store, err := sql.NewDB(db, log.Desugar(), deadlockMsgs, lqd, ltd) + return &MetricsDatabase{ + db: store, + log: log, + }, err +} + +func (b *MetricsDatabase) ApplyMigration(ctx context.Context, fn func(tx sql.Tx) (bool, error)) error { + return applyMigration(ctx, b.db, fn) +} + +func (b *MetricsDatabase) Close() error { + return b.db.Close() +} + +func (b *MetricsDatabase) DB() *sql.DB { + return b.db +} + +func (b *MetricsDatabase) CreateMigrationTable(ctx context.Context) error { + return createMigrationTable(ctx, b.db) +} + +func (b *MetricsDatabase) Migrate(ctx context.Context) error { + return sql.PerformMigrations(ctx, b, migrationsFs, "metrics", sql.MetricsMigrations(ctx, migrationsFs, b.log)) +} + +func (b *MetricsDatabase) Transaction(ctx context.Context, fn func(tx ssql.MetricsDatabaseTx) error) error { + return b.db.Transaction(ctx, func(tx sql.Tx) error { + return fn(b.wrapTxn(tx)) + }) +} + +func (b *MetricsDatabase) Version(ctx context.Context) (string, string, error) { + return version(ctx, b.db) +} + +func (b *MetricsDatabase) wrapTxn(tx sql.Tx) *MetricsDatabaseTx { + return &MetricsDatabaseTx{tx, b.log.Named(hex.EncodeToString(frand.Bytes(16)))} +} + +func (tx *MetricsDatabaseTx) ContractMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.ContractMetricsQueryOpts) ([]api.ContractMetric, error) { + return ssql.ContractMetrics(ctx, tx, start, n, interval, ssql.ContractMetricsQueryOpts{ContractMetricsQueryOpts: opts, IndexHint: "USE INDEX (idx_contracts_fcid_timestamp)"}) +} + +func (tx *MetricsDatabaseTx) ContractPruneMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.ContractPruneMetricsQueryOpts) ([]api.ContractPruneMetric, error) { + return ssql.ContractPruneMetrics(ctx, tx, start, n, interval, opts) +} + +func (tx *MetricsDatabaseTx) ContractSetChurnMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.ContractSetChurnMetricsQueryOpts) ([]api.ContractSetChurnMetric, error) { + return ssql.ContractSetChurnMetrics(ctx, tx, start, n, interval, opts) +} + +func (tx *MetricsDatabaseTx) ContractSetMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.ContractSetMetricsQueryOpts) (metrics []api.ContractSetMetric, _ error) { + return ssql.ContractSetMetrics(ctx, tx, start, n, interval, opts) +} + +func (tx *MetricsDatabaseTx) PerformanceMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.PerformanceMetricsQueryOpts) ([]api.PerformanceMetric, error) { + return ssql.PerformanceMetrics(ctx, tx, start, n, interval, opts) +} + +func (tx *MetricsDatabaseTx) PruneMetrics(ctx context.Context, metric string, cutoff time.Time) error { + return ssql.PruneMetrics(ctx, tx, metric, cutoff) +} + +func (tx *MetricsDatabaseTx) RecordContractMetric(ctx context.Context, metrics ...api.ContractMetric) error { + return ssql.RecordContractMetric(ctx, tx, metrics...) +} + +func (tx *MetricsDatabaseTx) RecordContractPruneMetric(ctx context.Context, metrics ...api.ContractPruneMetric) error { + return ssql.RecordContractPruneMetric(ctx, tx, metrics...) +} + +func (tx *MetricsDatabaseTx) RecordContractSetChurnMetric(ctx context.Context, metrics ...api.ContractSetChurnMetric) error { + return ssql.RecordContractSetChurnMetric(ctx, tx, metrics...) +} + +func (tx *MetricsDatabaseTx) RecordContractSetMetric(ctx context.Context, metrics ...api.ContractSetMetric) error { + return ssql.RecordContractSetMetric(ctx, tx, metrics...) +} + +func (tx *MetricsDatabaseTx) RecordPerformanceMetric(ctx context.Context, metrics ...api.PerformanceMetric) error { + return ssql.RecordPerformanceMetric(ctx, tx, metrics...) +} + +func (tx *MetricsDatabaseTx) RecordWalletMetric(ctx context.Context, metrics ...api.WalletMetric) error { + return ssql.RecordWalletMetric(ctx, tx, metrics...) +} + +func (tx *MetricsDatabaseTx) WalletMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.WalletMetricsQueryOpts) ([]api.WalletMetric, error) { + return ssql.WalletMetrics(ctx, tx, start, n, interval, opts) +} diff --git a/stores/migrations/mysql/main/migration_00001_object_metadata.sql b/stores/sql/mysql/migrations/main/migration_00001_object_metadata.sql similarity index 100% rename from stores/migrations/mysql/main/migration_00001_object_metadata.sql rename to stores/sql/mysql/migrations/main/migration_00001_object_metadata.sql diff --git a/stores/migrations/mysql/main/migration_00002_prune_slabs_trigger.sql b/stores/sql/mysql/migrations/main/migration_00002_prune_slabs_trigger.sql similarity index 100% rename from stores/migrations/mysql/main/migration_00002_prune_slabs_trigger.sql rename to stores/sql/mysql/migrations/main/migration_00002_prune_slabs_trigger.sql diff --git a/stores/migrations/mysql/main/migration_00003_idx_objects_size.sql b/stores/sql/mysql/migrations/main/migration_00003_idx_objects_size.sql similarity index 100% rename from stores/migrations/mysql/main/migration_00003_idx_objects_size.sql rename to stores/sql/mysql/migrations/main/migration_00003_idx_objects_size.sql diff --git a/stores/migrations/mysql/main/migration_00004_prune_slabs_cascade.sql b/stores/sql/mysql/migrations/main/migration_00004_prune_slabs_cascade.sql similarity index 100% rename from stores/migrations/mysql/main/migration_00004_prune_slabs_cascade.sql rename to stores/sql/mysql/migrations/main/migration_00004_prune_slabs_cascade.sql diff --git a/stores/migrations/mysql/main/migration_00005_zero_size_object_health.sql b/stores/sql/mysql/migrations/main/migration_00005_zero_size_object_health.sql similarity index 100% rename from stores/migrations/mysql/main/migration_00005_zero_size_object_health.sql rename to stores/sql/mysql/migrations/main/migration_00005_zero_size_object_health.sql diff --git a/stores/migrations/mysql/main/migration_00006_idx_objects_created_at.sql b/stores/sql/mysql/migrations/main/migration_00006_idx_objects_created_at.sql similarity index 100% rename from stores/migrations/mysql/main/migration_00006_idx_objects_created_at.sql rename to stores/sql/mysql/migrations/main/migration_00006_idx_objects_created_at.sql diff --git a/stores/migrations/mysql/main/migration_00007_host_checks.sql b/stores/sql/mysql/migrations/main/migration_00007_host_checks.sql similarity index 100% rename from stores/migrations/mysql/main/migration_00007_host_checks.sql rename to stores/sql/mysql/migrations/main/migration_00007_host_checks.sql diff --git a/stores/sql/mysql/migrations/main/migration_00008_directories_1.sql b/stores/sql/mysql/migrations/main/migration_00008_directories_1.sql new file mode 100644 index 000000000..8c2edfd70 --- /dev/null +++ b/stores/sql/mysql/migrations/main/migration_00008_directories_1.sql @@ -0,0 +1,16 @@ +-- dbDirectory +CREATE TABLE IF NOT EXISTS `directories` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT, + `created_at` datetime(3) DEFAULT NULL, + `db_parent_id` bigint unsigned, + `name` varchar(766) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, + PRIMARY KEY (`id`), + KEY `idx_directories_parent_id` (`db_parent_id`), + UNIQUE KEY `idx_directories_name` (`name`), + CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`db_parent_id`) REFERENCES `directories` (`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; + +-- dbObject: add column and constraint +ALTER TABLE `objects` +ADD COLUMN `db_directory_id` bigint unsigned, +ADD CONSTRAINT `fk_objects_db_directory_id` FOREIGN KEY (`db_directory_id`) REFERENCES `directories` (`id`); \ No newline at end of file diff --git a/stores/sql/mysql/migrations/main/migration_00008_directories_2.sql b/stores/sql/mysql/migrations/main/migration_00008_directories_2.sql new file mode 100644 index 000000000..6033f7c02 --- /dev/null +++ b/stores/sql/mysql/migrations/main/migration_00008_directories_2.sql @@ -0,0 +1,3 @@ +-- Add NOT NULL to column +ALTER TABLE `objects` +MODIFY COLUMN `db_directory_id` bigint unsigned NOT NULL; \ No newline at end of file diff --git a/stores/sql/mysql/migrations/main/migration_00009_json_settings.sql b/stores/sql/mysql/migrations/main/migration_00009_json_settings.sql new file mode 100644 index 000000000..cde425b38 --- /dev/null +++ b/stores/sql/mysql/migrations/main/migration_00009_json_settings.sql @@ -0,0 +1,2 @@ +ALTER TABLE `hosts` MODIFY COLUMN `settings` JSON; +ALTER TABLE `buckets` MODIFY COLUMN `policy` JSON; \ No newline at end of file diff --git a/stores/sql/mysql/migrations/main/migration_00010_webhook_headers.sql b/stores/sql/mysql/migrations/main/migration_00010_webhook_headers.sql new file mode 100644 index 000000000..3fc6af932 --- /dev/null +++ b/stores/sql/mysql/migrations/main/migration_00010_webhook_headers.sql @@ -0,0 +1 @@ +ALTER TABLE `webhooks` ADD COLUMN `headers` JSON DEFAULT ('{}'); \ No newline at end of file diff --git a/stores/sql/mysql/migrations/main/migration_00011_host_subnets.sql b/stores/sql/mysql/migrations/main/migration_00011_host_subnets.sql new file mode 100644 index 000000000..ef70b7843 --- /dev/null +++ b/stores/sql/mysql/migrations/main/migration_00011_host_subnets.sql @@ -0,0 +1,2 @@ +ALTER TABLE `hosts` ADD COLUMN `subnets` VARCHAR(255) NOT NULL DEFAULT ''; +UPDATE `hosts` SET last_scan = 0; \ No newline at end of file diff --git a/stores/migrations/mysql/main/schema.sql b/stores/sql/mysql/migrations/main/schema.sql similarity index 95% rename from stores/migrations/mysql/main/schema.sql rename to stores/sql/mysql/migrations/main/schema.sql index 446b2a805..145fa9452 100644 --- a/stores/migrations/mysql/main/schema.sql +++ b/stores/sql/mysql/migrations/main/schema.sql @@ -51,7 +51,7 @@ CREATE TABLE `autopilots` ( CREATE TABLE `buckets` ( `id` bigint unsigned NOT NULL AUTO_INCREMENT, `created_at` datetime(3) DEFAULT NULL, - `policy` longtext, + `policy` JSON, `name` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, PRIMARY KEY (`id`), UNIQUE KEY `name` (`name`), @@ -81,7 +81,7 @@ CREATE TABLE `hosts` ( `id` bigint unsigned NOT NULL AUTO_INCREMENT, `created_at` datetime(3) DEFAULT NULL, `public_key` varbinary(32) NOT NULL, - `settings` longtext, + `settings` JSON, `price_table` longtext, `price_table_expiry` datetime(3) DEFAULT NULL, `total_scans` bigint unsigned DEFAULT NULL, @@ -98,6 +98,7 @@ CREATE TABLE `hosts` ( `lost_sectors` bigint unsigned DEFAULT NULL, `last_announcement` datetime(3) DEFAULT NULL, `net_address` varchar(191) DEFAULT NULL, + `subnets` varchar(255) NOT NULL DEFAULT '', PRIMARY KEY (`id`), UNIQUE KEY `public_key` (`public_key`), KEY `idx_hosts_public_key` (`public_key`), @@ -313,11 +314,24 @@ CREATE TABLE `multipart_parts` ( CONSTRAINT `fk_multipart_uploads_parts` FOREIGN KEY (`db_multipart_upload_id`) REFERENCES `multipart_uploads` (`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; +-- dbDirectory +CREATE TABLE `directories` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT, + `created_at` datetime(3) DEFAULT NULL, + `db_parent_id` bigint unsigned, + `name` varchar(766) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, + PRIMARY KEY (`id`), + KEY `idx_directories_parent_id` (`db_parent_id`), + UNIQUE KEY `idx_directories_name` (`name`), + CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`db_parent_id`) REFERENCES `directories` (`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; + -- dbObject CREATE TABLE `objects` ( `id` bigint unsigned NOT NULL AUTO_INCREMENT, `created_at` datetime(3) DEFAULT NULL, `db_bucket_id` bigint unsigned NOT NULL, + `db_directory_id` bigint unsigned NOT NULL, `object_id` varchar(766) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, `key` longblob, `health` double NOT NULL DEFAULT '1', @@ -332,7 +346,9 @@ CREATE TABLE `objects` ( KEY `idx_objects_etag` (`etag`), KEY `idx_objects_size` (`size`), KEY `idx_objects_created_at` (`created_at`), - CONSTRAINT `fk_objects_db_bucket` FOREIGN KEY (`db_bucket_id`) REFERENCES `buckets` (`id`) + KEY `idx_objects_db_directory_id` (`db_directory_id`), + CONSTRAINT `fk_objects_db_bucket` FOREIGN KEY (`db_bucket_id`) REFERENCES `buckets` (`id`), + CONSTRAINT `fk_objects_db_directory_id` FOREIGN KEY (`db_directory_id`) REFERENCES `directories` (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -- dbSetting @@ -404,6 +420,7 @@ CREATE TABLE `webhooks` ( `module` varchar(255) NOT NULL, `event` varchar(255) NOT NULL, `url` varchar(255) NOT NULL, + `headers` JSON DEFAULT ('{}'), PRIMARY KEY (`id`), UNIQUE KEY `idx_module_event_url` (`module`,`event`,`url`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; diff --git a/stores/migrations/mysql/metrics/migration_00001_idx_contracts_fcid_timestamp.sql b/stores/sql/mysql/migrations/metrics/migration_00001_idx_contracts_fcid_timestamp.sql similarity index 100% rename from stores/migrations/mysql/metrics/migration_00001_idx_contracts_fcid_timestamp.sql rename to stores/sql/mysql/migrations/metrics/migration_00001_idx_contracts_fcid_timestamp.sql diff --git a/stores/migrations/mysql/metrics/schema.sql b/stores/sql/mysql/migrations/metrics/schema.sql similarity index 100% rename from stores/migrations/mysql/metrics/schema.sql rename to stores/sql/mysql/migrations/metrics/schema.sql diff --git a/stores/sql/rows.go b/stores/sql/rows.go new file mode 100644 index 000000000..44260b2ad --- /dev/null +++ b/stores/sql/rows.go @@ -0,0 +1,82 @@ +package sql + +import ( + rhpv2 "go.sia.tech/core/rhp/v2" + "go.sia.tech/core/types" + "go.sia.tech/renterd/api" +) + +type scanner interface { + Scan(dest ...any) error +} + +type ContractRow struct { + FCID FileContractID + RenewedFrom FileContractID + + ContractPrice Currency + State ContractState + TotalCost Currency + ProofHeight uint64 + RevisionHeight uint64 + RevisionNumber uint64 + Size uint64 + StartHeight uint64 + WindowStart uint64 + WindowEnd uint64 + + // spending fields + UploadSpending Currency + DownloadSpending Currency + FundAccountSpending Currency + DeleteSpending Currency + ListSpending Currency + + ContractSet string + NetAddress string + PublicKey PublicKey + SiamuxPort string +} + +func (r *ContractRow) Scan(s scanner) error { + return s.Scan(&r.FCID, &r.RenewedFrom, &r.ContractPrice, &r.State, &r.TotalCost, &r.ProofHeight, + &r.RevisionHeight, &r.RevisionNumber, &r.Size, &r.StartHeight, &r.WindowStart, &r.WindowEnd, + &r.UploadSpending, &r.DownloadSpending, &r.FundAccountSpending, &r.DeleteSpending, &r.ListSpending, + &r.ContractSet, &r.NetAddress, &r.PublicKey, &r.SiamuxPort) +} + +func (r *ContractRow) ContractMetadata() api.ContractMetadata { + var sets []string + if r.ContractSet != "" { + sets = append(sets, r.ContractSet) + } + return api.ContractMetadata{ + ContractPrice: types.Currency(r.ContractPrice), + ID: types.FileContractID(r.FCID), + HostIP: r.NetAddress, + HostKey: types.PublicKey(r.PublicKey), + SiamuxAddr: rhpv2.HostSettings{ + NetAddress: r.NetAddress, + SiaMuxPort: r.SiamuxPort, + }.SiamuxAddr(), + + RenewedFrom: types.FileContractID(r.RenewedFrom), + TotalCost: types.Currency(r.TotalCost), + Spending: api.ContractSpending{ + Uploads: types.Currency(r.UploadSpending), + Downloads: types.Currency(r.DownloadSpending), + FundAccount: types.Currency(r.FundAccountSpending), + Deletions: types.Currency(r.DeleteSpending), + SectorRoots: types.Currency(r.ListSpending), + }, + ProofHeight: r.ProofHeight, + RevisionHeight: r.RevisionHeight, + RevisionNumber: r.RevisionNumber, + ContractSets: sets, + Size: r.Size, + StartHeight: r.StartHeight, + State: r.State.String(), + WindowStart: r.WindowStart, + WindowEnd: r.WindowEnd, + } +} diff --git a/stores/sql/sqlite/common.go b/stores/sql/sqlite/common.go new file mode 100644 index 000000000..c45c0eab7 --- /dev/null +++ b/stores/sql/sqlite/common.go @@ -0,0 +1,78 @@ +package sqlite + +import ( + "context" + dsql "database/sql" + "embed" + "errors" + "fmt" + "time" + + "go.sia.tech/renterd/internal/sql" + "go.uber.org/zap" +) + +var deadlockMsgs = []string{ + "database is locked", + "database table is locked", +} + +//go:embed all:migrations/* +var migrationsFs embed.FS + +func Open(path string) (*dsql.DB, error) { + return dsql.Open("sqlite3", fmt.Sprintf("file:%s?_busy_timeout=30000&_foreign_keys=1&_journal_mode=WAL&_secure_delete=false&_cache_size=65536", path)) +} + +func OpenEphemeral(name string) (*dsql.DB, error) { + return dsql.Open("sqlite3", fmt.Sprintf("file:%s?mode=memory&cache=shared&_foreign_keys=1", name)) +} + +func applyMigration(ctx context.Context, db *sql.DB, fn func(tx sql.Tx) (bool, error)) (err error) { + if _, err := db.Exec(ctx, "PRAGMA foreign_keys=OFF"); err != nil { + return fmt.Errorf("failed to disable foreign keys: %w", err) + } + defer func() { + _, err2 := db.Exec(ctx, "PRAGMA foreign_keys=ON") + err = errors.Join(err, err2) + }() + return db.Transaction(ctx, func(tx sql.Tx) error { + // execute migration + if migrated, err := fn(tx); err != nil { + return err + } else if !migrated { + return nil + } + // perform foreign key integrity check + if err := tx.QueryRow(ctx, "PRAGMA foreign_key_check").Scan(); !errors.Is(err, dsql.ErrNoRows) { + return fmt.Errorf("foreign key constraints are not satisfied") + } + return nil + }) +} + +func closeDB(db *sql.DB, log *zap.SugaredLogger) error { + // NOTE: as recommended by https://www.sqlite.org/lang_analyze.html + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + _, err := db.Exec(ctx, "PRAGMA analysis_limit=400; PRAGMA optimize;") + if err != nil { + log.With(zap.Error(err)).Error("failed to optimize database before closing") + } + return db.Close() +} + +func createMigrationTable(ctx context.Context, db *sql.DB) error { + if _, err := db.Exec(ctx, "CREATE TABLE IF NOT EXISTS `migrations` (`id` text,PRIMARY KEY (`id`))"); err != nil { + return fmt.Errorf("failed to create migrations table: %w", err) + } + return nil +} + +func version(ctx context.Context, db *sql.DB) (string, string, error) { + var version string + if err := db.QueryRow(ctx, "select sqlite_version()").Scan(&version); err != nil { + return "", "", err + } + return "SQLite", version, nil +} diff --git a/stores/sql/sqlite/main.go b/stores/sql/sqlite/main.go new file mode 100644 index 000000000..cf6c415e6 --- /dev/null +++ b/stores/sql/sqlite/main.go @@ -0,0 +1,1189 @@ +package sqlite + +import ( + "context" + dsql "database/sql" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "strings" + "time" + "unicode/utf8" + + "go.sia.tech/core/types" + "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/sql" + "go.sia.tech/renterd/object" + ssql "go.sia.tech/renterd/stores/sql" + "go.sia.tech/renterd/webhooks" + "go.sia.tech/siad/modules" + "lukechampine.com/frand" + + "go.uber.org/zap" +) + +type ( + MainDatabase struct { + db *sql.DB + log *zap.SugaredLogger + } + + MainDatabaseTx struct { + sql.Tx + log *zap.SugaredLogger + } +) + +// NewMainDatabase creates a new SQLite backend. +func NewMainDatabase(db *dsql.DB, log *zap.SugaredLogger, lqd, ltd time.Duration) (*MainDatabase, error) { + store, err := sql.NewDB(db, log.Desugar(), deadlockMsgs, lqd, ltd) + return &MainDatabase{ + db: store, + log: log, + }, err +} + +func (b *MainDatabase) ApplyMigration(ctx context.Context, fn func(tx sql.Tx) (bool, error)) error { + return applyMigration(ctx, b.db, fn) +} + +func (b *MainDatabase) Close() error { + return closeDB(b.db, b.log) +} + +func (b *MainDatabase) CreateMigrationTable(ctx context.Context) error { + return createMigrationTable(ctx, b.db) +} + +func (b *MainDatabase) DB() *sql.DB { + return b.db +} + +func (b *MainDatabase) LoadSlabBuffers(ctx context.Context) ([]ssql.LoadedSlabBuffer, []string, error) { + return ssql.LoadSlabBuffers(ctx, b.db) +} + +func (b *MainDatabase) MakeDirsForPath(ctx context.Context, tx sql.Tx, path string) (int64, error) { + mtx := b.wrapTxn(tx) + return mtx.MakeDirsForPath(ctx, path) +} + +func (b *MainDatabase) Migrate(ctx context.Context) error { + return sql.PerformMigrations(ctx, b, migrationsFs, "main", sql.MainMigrations(ctx, b, migrationsFs, b.log)) +} + +func (b *MainDatabase) Transaction(ctx context.Context, fn func(tx ssql.DatabaseTx) error) error { + return b.db.Transaction(ctx, func(tx sql.Tx) error { + return fn(b.wrapTxn(tx)) + }) +} + +func (b *MainDatabase) Version(ctx context.Context) (string, string, error) { + return version(ctx, b.db) +} + +func (b *MainDatabase) wrapTxn(tx sql.Tx) *MainDatabaseTx { + return &MainDatabaseTx{tx, b.log.Named(hex.EncodeToString(frand.Bytes(16)))} +} + +func (tx *MainDatabaseTx) Accounts(ctx context.Context) ([]api.Account, error) { + return ssql.Accounts(ctx, tx) +} + +func (tx *MainDatabaseTx) AddMultipartPart(ctx context.Context, bucket, path, contractSet, eTag, uploadID string, partNumber int, slices object.SlabSlices) error { + // fetch contract set + var csID int64 + err := tx.QueryRow(ctx, "SELECT id FROM contract_sets WHERE name = ?", contractSet). + Scan(&csID) + if errors.Is(err, dsql.ErrNoRows) { + return api.ErrContractSetNotFound + } else if err != nil { + return fmt.Errorf("failed to fetch contract set id: %w", err) + } + + // find multipart upload + var muID int64 + err = tx.QueryRow(ctx, "SELECT id FROM multipart_uploads WHERE upload_id = ?", uploadID). + Scan(&muID) + if err != nil { + return fmt.Errorf("failed to fetch multipart upload: %w", err) + } + + // delete a potentially existing part + _, err = tx.Exec(ctx, "DELETE FROM multipart_parts WHERE db_multipart_upload_id = ? AND part_number = ?", + muID, partNumber) + if err != nil { + return fmt.Errorf("failed to delete existing part: %w", err) + } + + // insert new part + var size uint64 + for _, slice := range slices { + size += uint64(slice.Length) + } + var partID int64 + res, err := tx.Exec(ctx, "INSERT INTO multipart_parts (created_at, etag, part_number, size, db_multipart_upload_id) VALUES (?, ?, ?, ?, ?)", + time.Now(), eTag, partNumber, size, muID) + if err != nil { + return fmt.Errorf("failed to insert part: %w", err) + } else if partID, err = res.LastInsertId(); err != nil { + return fmt.Errorf("failed to fetch part id: %w", err) + } + + // create slices + return tx.insertSlabs(ctx, nil, &partID, contractSet, slices) +} + +func (tx *MainDatabaseTx) AbortMultipartUpload(ctx context.Context, bucket, path string, uploadID string) error { + return ssql.AbortMultipartUpload(ctx, tx, bucket, path, uploadID) +} + +func (tx *MainDatabaseTx) AddWebhook(ctx context.Context, wh webhooks.Webhook) error { + headers := "{}" + if len(wh.Headers) > 0 { + h, err := json.Marshal(wh.Headers) + if err != nil { + return fmt.Errorf("failed to marshal headers: %w", err) + } + headers = string(h) + } + _, err := tx.Exec(ctx, "INSERT INTO webhooks (created_at, module, event, url, headers) VALUES (?, ?, ?, ?, ?) ON CONFLICT DO UPDATE SET headers = EXCLUDED.headers", + time.Now(), wh.Module, wh.Event, wh.URL, headers) + if err != nil { + return fmt.Errorf("failed to insert webhook: %w", err) + } + return nil +} + +func (tx *MainDatabaseTx) AncestorContracts(ctx context.Context, fcid types.FileContractID, startHeight uint64) ([]api.ArchivedContract, error) { + return ssql.AncestorContracts(ctx, tx, fcid, startHeight) +} + +func (tx *MainDatabaseTx) ArchiveContract(ctx context.Context, fcid types.FileContractID, reason string) error { + return ssql.ArchiveContract(ctx, tx, fcid, reason) +} + +func (tx *MainDatabaseTx) Autopilot(ctx context.Context, id string) (api.Autopilot, error) { + return ssql.Autopilot(ctx, tx, id) +} + +func (tx *MainDatabaseTx) Autopilots(ctx context.Context) ([]api.Autopilot, error) { + return ssql.Autopilots(ctx, tx) +} + +func (tx *MainDatabaseTx) Bucket(ctx context.Context, bucket string) (api.Bucket, error) { + return ssql.Bucket(ctx, tx, bucket) +} + +func (tx *MainDatabaseTx) CompleteMultipartUpload(ctx context.Context, bucket, key, uploadID string, parts []api.MultipartCompletedPart, opts api.CompleteMultipartOptions) (string, error) { + mpu, neededParts, size, eTag, err := ssql.MultipartUploadForCompletion(ctx, tx, bucket, key, uploadID, parts) + if err != nil { + return "", fmt.Errorf("failed to fetch multipart upload: %w", err) + } + + // create the directory. + dirID, err := tx.MakeDirsForPath(ctx, key) + if err != nil { + return "", fmt.Errorf("failed to create directory for key %s: %w", key, err) + } + + // create the object + objID, err := ssql.InsertObject(ctx, tx, key, dirID, mpu.BucketID, size, mpu.EC, mpu.MimeType, eTag) + if err != nil { + return "", fmt.Errorf("failed to insert object: %w", err) + } + + // update slices + updateSlicesStmt, err := tx.Prepare(ctx, ` + WITH cte AS ( + SELECT s.rowid + FROM slices s + INNER JOIN multipart_parts mpp ON s.db_multipart_part_id = mpp.id + WHERE mpp.id = ? + ) + UPDATE slices + SET db_object_id = ?, + db_multipart_part_id = NULL, + object_index = object_index + ? + WHERE rowid IN (SELECT rowid FROM cte); + `) + if err != nil { + return "", fmt.Errorf("failed to prepare statement: %w", err) + } + defer updateSlicesStmt.Close() + + var updatedSlices int64 + for _, part := range neededParts { + res, err := updateSlicesStmt.Exec(ctx, part.ID, objID, updatedSlices) + if err != nil { + return "", fmt.Errorf("failed to update slices: %w", err) + } + n, err := res.RowsAffected() + if err != nil { + return "", fmt.Errorf("failed to get rows affected: %w", err) + } + updatedSlices += n + } + + // create/update metadata + if err := ssql.InsertMetadata(ctx, tx, &objID, nil, opts.Metadata); err != nil { + return "", fmt.Errorf("failed to insert object metadata: %w", err) + } + _, err = tx.Exec(ctx, "UPDATE object_user_metadata SET db_multipart_upload_id = NULL, db_object_id = ? WHERE db_multipart_upload_id = ?", + objID, mpu.ID) + if err != nil { + return "", fmt.Errorf("failed to update object metadata: %w", err) + } + + // delete the multipart upload + if _, err := tx.Exec(ctx, "DELETE FROM multipart_uploads WHERE id = ?", mpu.ID); err != nil { + return "", fmt.Errorf("failed to delete multipart upload: %w", err) + } + + return eTag, nil +} + +func (tx *MainDatabaseTx) ContractRoots(ctx context.Context, fcid types.FileContractID) ([]types.Hash256, error) { + return ssql.ContractRoots(ctx, tx, fcid) +} + +func (tx *MainDatabaseTx) Contracts(ctx context.Context, opts api.ContractsOpts) ([]api.ContractMetadata, error) { + return ssql.Contracts(ctx, tx, opts) +} + +func (tx *MainDatabaseTx) ContractSets(ctx context.Context) ([]string, error) { + return ssql.ContractSets(ctx, tx) +} + +func (tx *MainDatabaseTx) ContractSize(ctx context.Context, id types.FileContractID) (api.ContractSize, error) { + return ssql.ContractSize(ctx, tx, id) +} + +func (tx *MainDatabaseTx) ContractSizes(ctx context.Context) (map[types.FileContractID]api.ContractSize, error) { + return ssql.ContractSizes(ctx, tx) +} + +func (tx *MainDatabaseTx) CopyObject(ctx context.Context, srcBucket, dstBucket, srcKey, dstKey, mimeType string, metadata api.ObjectUserMetadata) (api.ObjectMetadata, error) { + return ssql.CopyObject(ctx, tx, srcBucket, dstBucket, srcKey, dstKey, mimeType, metadata) +} + +func (tx *MainDatabaseTx) CreateBucket(ctx context.Context, bucket string, bp api.BucketPolicy) error { + policy, err := json.Marshal(bp) + if err != nil { + return err + } + res, err := tx.Exec(ctx, "INSERT INTO buckets (created_at, name, policy) VALUES (?, ?, ?) ON CONFLICT(name) DO NOTHING", + time.Now(), bucket, policy) + if err != nil { + return fmt.Errorf("failed to create bucket: %w", err) + } else if n, err := res.RowsAffected(); err != nil { + return fmt.Errorf("failed to get rows affected: %w", err) + } else if n == 0 { + return api.ErrBucketExists + } + return nil +} + +func (tx *MainDatabaseTx) DeleteHostSector(ctx context.Context, hk types.PublicKey, root types.Hash256) (int, error) { + return ssql.DeleteHostSector(ctx, tx, hk, root) +} + +func (tx *MainDatabaseTx) DeleteSettings(ctx context.Context, key string) error { + return ssql.DeleteSettings(ctx, tx, key) +} + +func (tx *MainDatabaseTx) DeleteWebhook(ctx context.Context, wh webhooks.Webhook) error { + return ssql.DeleteWebhook(ctx, tx, wh) +} + +func (tx *MainDatabaseTx) InsertBufferedSlab(ctx context.Context, fileName string, contractSetID int64, ec object.EncryptionKey, minShards, totalShards uint8) (int64, error) { + return ssql.InsertBufferedSlab(ctx, tx, fileName, contractSetID, ec, minShards, totalShards) +} + +func (tx *MainDatabaseTx) InsertMultipartUpload(ctx context.Context, bucket, key string, ec object.EncryptionKey, mimeType string, metadata api.ObjectUserMetadata) (string, error) { + return ssql.InsertMultipartUpload(ctx, tx, bucket, key, ec, mimeType, metadata) +} + +func (tx *MainDatabaseTx) DeleteBucket(ctx context.Context, bucket string) error { + return ssql.DeleteBucket(ctx, tx, bucket) +} + +func (tx *MainDatabaseTx) DeleteObject(ctx context.Context, bucket string, key string) (bool, error) { + resp, err := tx.Exec(ctx, "DELETE FROM objects WHERE object_id = ? AND db_bucket_id = (SELECT id FROM buckets WHERE buckets.name = ?)", key, bucket) + if err != nil { + return false, err + } else if n, err := resp.RowsAffected(); err != nil { + return false, err + } else { + return n != 0, nil + } +} + +func (tx *MainDatabaseTx) DeleteObjects(ctx context.Context, bucket string, key string, limit int64) (bool, error) { + resp, err := tx.Exec(ctx, ` + DELETE FROM objects + WHERE id IN ( + SELECT id FROM objects + WHERE object_id LIKE ? AND SUBSTR(object_id, 1, ?) = ? AND db_bucket_id = (SELECT id FROM buckets WHERE buckets.name = ?) + LIMIT ? + )`, key+"%", utf8.RuneCountInString(key), key, bucket, limit) + if err != nil { + return false, err + } else if n, err := resp.RowsAffected(); err != nil { + return false, err + } else { + return n != 0, nil + } +} + +func (tx *MainDatabaseTx) HostAllowlist(ctx context.Context) ([]types.PublicKey, error) { + return ssql.HostAllowlist(ctx, tx) +} + +func (tx *MainDatabaseTx) HostBlocklist(ctx context.Context) ([]string, error) { + return ssql.HostBlocklist(ctx, tx) +} + +func (tx *MainDatabaseTx) HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]api.HostAddress, error) { + return ssql.HostsForScanning(ctx, tx, maxLastScan, offset, limit) +} + +func (tx *MainDatabaseTx) InitConsensusInfo(ctx context.Context) (types.ChainIndex, modules.ConsensusChangeID, error) { + return ssql.InitConsensusInfo(ctx, tx) +} + +func (tx *MainDatabaseTx) InsertObject(ctx context.Context, bucket, key, contractSet string, dirID int64, o object.Object, mimeType, eTag string, md api.ObjectUserMetadata) error { + // get bucket id + var bucketID int64 + err := tx.QueryRow(ctx, "SELECT id FROM buckets WHERE buckets.name = ?", bucket).Scan(&bucketID) + if errors.Is(err, dsql.ErrNoRows) { + return api.ErrBucketNotFound + } else if err != nil { + return fmt.Errorf("failed to fetch bucket id: %w", err) + } + + // insert object + objKey, err := o.Key.MarshalBinary() + if err != nil { + return fmt.Errorf("failed to marshal object key: %w", err) + } + objID, err := ssql.InsertObject(ctx, tx, key, dirID, bucketID, o.TotalSize(), objKey, mimeType, eTag) + if err != nil { + return fmt.Errorf("failed to insert object: %w", err) + } + + // insert slabs + if err := tx.insertSlabs(ctx, &objID, nil, contractSet, o.Slabs); err != nil { + return fmt.Errorf("failed to insert slabs: %w", err) + } + + // insert metadata + if err := ssql.InsertMetadata(ctx, tx, &objID, nil, md); err != nil { + return fmt.Errorf("failed to insert object metadata: %w", err) + } + return nil +} + +func (tx *MainDatabaseTx) InvalidateSlabHealthByFCID(ctx context.Context, fcids []types.FileContractID, limit int64) (int64, error) { + if len(fcids) == 0 { + return 0, nil + } + // prepare args + var args []any + for _, fcid := range fcids { + args = append(args, ssql.FileContractID(fcid)) + } + args = append(args, time.Now().Unix()) + args = append(args, limit) + res, err := tx.Exec(ctx, fmt.Sprintf(` + UPDATE slabs SET health_valid_until = 0 WHERE id in ( + SELECT slabs.id + FROM slabs + INNER JOIN sectors se ON se.db_slab_id = slabs.id + INNER JOIN contract_sectors cs ON cs.db_sector_id = se.id + INNER JOIN contracts c ON c.id = cs.db_contract_id + WHERE c.fcid IN (%s) AND slabs.health_valid_until >= ? + LIMIT ? + ) + `, strings.Repeat("?, ", len(fcids)-1)+"?"), args...) + if err != nil { + fmt.Println(strings.Repeat("?, ", len(fcids)-1) + "?") + return 0, err + } + return res.RowsAffected() +} + +func (tx *MainDatabaseTx) ListBuckets(ctx context.Context) ([]api.Bucket, error) { + return ssql.ListBuckets(ctx, tx) +} + +func (tx *MainDatabaseTx) ListObjects(ctx context.Context, bucket, prefix, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) { + return ssql.ListObjects(ctx, tx, bucket, prefix, sortBy, sortDir, marker, limit) +} + +func (tx *MainDatabaseTx) MakeDirsForPath(ctx context.Context, path string) (int64, error) { + insertDirStmt, err := tx.Prepare(ctx, "INSERT INTO directories (name, db_parent_id) VALUES (?, ?) ON CONFLICT(name) DO NOTHING") + if err != nil { + return 0, fmt.Errorf("failed to prepare statement: %w", err) + } + defer insertDirStmt.Close() + + queryDirStmt, err := tx.Prepare(ctx, "SELECT id FROM directories WHERE name = ?") + if err != nil { + return 0, fmt.Errorf("failed to prepare statement: %w", err) + } + defer queryDirStmt.Close() + + // Create root dir. + dirID := int64(sql.DirectoriesRootID) + if _, err := tx.Exec(ctx, "INSERT INTO directories (id, name, db_parent_id) VALUES (?, '/', NULL) ON CONFLICT(id) DO NOTHING", dirID); err != nil { + return 0, fmt.Errorf("failed to create root directory: %w", err) + } + + // Create remaining directories. + path = strings.TrimSuffix(path, "/") + if path == "/" { + return dirID, nil + } + for i := 0; i < utf8.RuneCountInString(path); i++ { + if path[i] != '/' { + continue + } + dir := path[:i+1] + if dir == "/" { + continue + } + if _, err := insertDirStmt.Exec(ctx, dir, dirID); err != nil { + return 0, fmt.Errorf("failed to create directory %v: %w", dir, err) + } + var childID int64 + if err := queryDirStmt.QueryRow(ctx, dir).Scan(&childID); err != nil { + return 0, fmt.Errorf("failed to fetch directory id %v: %w", dir, err) + } else if childID == 0 { + return 0, fmt.Errorf("dir we just created doesn't exist - shouldn't happen") + } + dirID = childID + } + return dirID, nil +} + +func (tx *MainDatabaseTx) MultipartUpload(ctx context.Context, uploadID string) (api.MultipartUpload, error) { + return ssql.MultipartUpload(ctx, tx, uploadID) +} + +func (tx *MainDatabaseTx) MultipartUploadParts(ctx context.Context, bucket, key, uploadID string, marker int, limit int64) (api.MultipartListPartsResponse, error) { + return ssql.MultipartUploadParts(ctx, tx, bucket, key, uploadID, marker, limit) +} + +func (tx *MainDatabaseTx) MultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker string, limit int) (api.MultipartListUploadsResponse, error) { + return ssql.MultipartUploads(ctx, tx, bucket, prefix, keyMarker, uploadIDMarker, limit) +} + +func (tx *MainDatabaseTx) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) (api.ObjectsStatsResponse, error) { + return ssql.ObjectsStats(ctx, tx, opts) +} + +func (tx *MainDatabaseTx) PruneEmptydirs(ctx context.Context) error { + stmt, err := tx.Prepare(ctx, ` + DELETE + FROM directories + WHERE directories.id != 1 + AND NOT EXISTS (SELECT 1 FROM objects WHERE objects.db_directory_id = directories.id) + AND NOT EXISTS (SELECT 1 FROM (SELECT 1 FROM directories AS d WHERE d.db_parent_id = directories.id) i) + `) + if err != nil { + return err + } + defer stmt.Close() + for { + res, err := stmt.Exec(ctx) + if err != nil { + return err + } else if n, err := res.RowsAffected(); err != nil { + return err + } else if n == 0 { + return nil + } + } +} + +func (tx *MainDatabaseTx) PruneSlabs(ctx context.Context, limit int64) (int64, error) { + res, err := tx.Exec(ctx, ` + DELETE FROM slabs + WHERE id IN ( + SELECT id + FROM ( + SELECT slabs.id + FROM slabs + WHERE NOT EXISTS ( + SELECT 1 FROM slices WHERE slices.db_slab_id = slabs.id + ) + AND slabs.db_buffered_slab_id IS NULL + LIMIT ? + ) AS limited + )`, limit) + if err != nil { + return 0, err + } + return res.RowsAffected() +} + +func (tx *MainDatabaseTx) RecordHostScans(ctx context.Context, scans []api.HostScan) error { + return ssql.RecordHostScans(ctx, tx, scans) +} + +func (tx *MainDatabaseTx) RecordPriceTables(ctx context.Context, priceTableUpdates []api.HostPriceTableUpdate) error { + return ssql.RecordPriceTables(ctx, tx, priceTableUpdates) +} + +func (tx *MainDatabaseTx) RemoveContractSet(ctx context.Context, contractSet string) error { + return ssql.RemoveContractSet(ctx, tx, contractSet) +} + +func (tx *MainDatabaseTx) RemoveOfflineHosts(ctx context.Context, minRecentFailures uint64, maxDownTime time.Duration) (int64, error) { + return ssql.RemoveOfflineHosts(ctx, tx, minRecentFailures, maxDownTime) +} + +func (tx *MainDatabaseTx) RenameObject(ctx context.Context, bucket, keyOld, keyNew string, dirID int64, force bool) error { + if force { + // delete potentially existing object at destination + if _, err := tx.DeleteObject(ctx, bucket, keyNew); err != nil { + return fmt.Errorf("RenameObject: failed to delete object: %w", err) + } + } else { + var exists bool + if err := tx.QueryRow(ctx, "SELECT EXISTS (SELECT 1 FROM objects WHERE object_id = ? AND db_bucket_id = (SELECT id FROM buckets WHERE buckets.name = ?))", keyNew, bucket).Scan(&exists); err != nil { + return err + } else if exists { + return api.ErrObjectExists + } + } + resp, err := tx.Exec(ctx, `UPDATE objects SET object_id = ?, db_directory_id = ? WHERE object_id = ? AND db_bucket_id = (SELECT id FROM buckets WHERE buckets.name = ?)`, keyNew, dirID, keyOld, bucket) + if err != nil { + return err + } else if n, err := resp.RowsAffected(); err != nil { + return err + } else if n == 0 { + return fmt.Errorf("%w: key %v", api.ErrObjectNotFound, keyOld) + } + return nil +} + +func (tx *MainDatabaseTx) RenameObjects(ctx context.Context, bucket, prefixOld, prefixNew string, dirID int64, force bool) error { + if force { + _, err := tx.Exec(ctx, ` + DELETE + FROM objects + WHERE object_id IN ( + SELECT CONCAT(?, SUBSTR(object_id, ?)) + FROM objects + WHERE object_id LIKE ? + AND SUBSTR(object_id, 1, ?) = ? + AND db_bucket_id = (SELECT id FROM buckets WHERE buckets.name = ?) + )`, + prefixNew, + utf8.RuneCountInString(prefixOld)+1, + prefixOld+"%", + utf8.RuneCountInString(prefixOld), prefixOld, + bucket) + if err != nil { + return err + } + } + resp, err := tx.Exec(ctx, ` + UPDATE objects + SET object_id = ? || SUBSTR(object_id, ?), + db_directory_id = ? + WHERE object_id LIKE ? + AND SUBSTR(object_id, 1, ?) = ? + AND db_bucket_id = (SELECT id FROM buckets WHERE buckets.name = ?)`, + prefixNew, utf8.RuneCountInString(prefixOld)+1, + dirID, + prefixOld+"%", + utf8.RuneCountInString(prefixOld), prefixOld, + bucket) + if err != nil && strings.Contains(err.Error(), "UNIQUE constraint failed") { + return api.ErrObjectExists + } else if err != nil { + return err + } else if n, err := resp.RowsAffected(); err != nil { + return err + } else if n == 0 { + return fmt.Errorf("%w: prefix %v", api.ErrObjectNotFound, prefixOld) + } + return nil +} + +func (tx *MainDatabaseTx) RenewedContract(ctx context.Context, renwedFrom types.FileContractID) (api.ContractMetadata, error) { + return ssql.RenewedContract(ctx, tx, renwedFrom) +} + +func (tx *MainDatabaseTx) ResetConsensusSubscription(ctx context.Context) (types.ChainIndex, error) { + return ssql.ResetConsensusSubscription(ctx, tx) +} + +func (tx *MainDatabaseTx) ResetLostSectors(ctx context.Context, hk types.PublicKey) error { + return ssql.ResetLostSectors(ctx, tx, hk) +} + +func (tx *MainDatabaseTx) SaveAccounts(ctx context.Context, accounts []api.Account) error { + // clean_shutdown = 1 after save + stmt, err := tx.Prepare(ctx, ` + INSERT INTO ephemeral_accounts (created_at, account_id, clean_shutdown, host, balance, drift, requires_sync) + VAlUES (?, ?, 1, ?, ?, ?, ?) + ON CONFLICT(account_id) DO UPDATE SET + account_id = EXCLUDED.account_id, + clean_shutdown = 1, + host = EXCLUDED.host, + balance = EXCLUDED.balance, + drift = EXCLUDED.drift, + requires_sync = EXCLUDED.requires_sync + `) + if err != nil { + return err + } + defer stmt.Close() + + for _, acc := range accounts { + res, err := stmt.Exec(ctx, time.Now(), (ssql.PublicKey)(acc.ID), (ssql.PublicKey)(acc.HostKey), (*ssql.BigInt)(acc.Balance), (*ssql.BigInt)(acc.Drift), acc.RequiresSync) + if err != nil { + return fmt.Errorf("failed to insert account %v: %w", acc.ID, err) + } else if n, err := res.RowsAffected(); err != nil { + return fmt.Errorf("failed to get rows affected: %w", err) + } else if n != 1 { + return fmt.Errorf("expected 1 row affected, got %v", n) + } + } + return nil +} + +func (tx *MainDatabaseTx) SearchHosts(ctx context.Context, autopilotID, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) { + return ssql.SearchHosts(ctx, tx, autopilotID, filterMode, usabilityMode, addressContains, keyIn, offset, limit) +} + +func (tx *MainDatabaseTx) Setting(ctx context.Context, key string) (string, error) { + return ssql.Setting(ctx, tx, key) +} + +func (tx *MainDatabaseTx) Settings(ctx context.Context) ([]string, error) { + return ssql.Settings(ctx, tx) +} + +func (tx *MainDatabaseTx) SetUncleanShutdown(ctx context.Context) error { + return ssql.SetUncleanShutdown(ctx, tx) +} + +func (tx *MainDatabaseTx) SlabBuffers(ctx context.Context) (map[string]string, error) { + return ssql.SlabBuffers(ctx, tx) +} + +func (tx *MainDatabaseTx) UpdateAutopilot(ctx context.Context, ap api.Autopilot) error { + res, err := tx.Exec(ctx, ` + INSERT INTO autopilots (created_at, identifier, config, current_period) + VALUES (?, ?, ?, ?) + ON CONFLICT(identifier) DO UPDATE SET + config = EXCLUDED.config, + current_period = EXCLUDED.current_period + `, time.Now(), ap.ID, (*ssql.AutopilotConfig)(&ap.Config), ap.CurrentPeriod) + if err != nil { + return err + } else if n, err := res.RowsAffected(); err != nil { + return err + } else if n != 1 { + return fmt.Errorf("expected 1 row affected, got %v", n) + } + return nil +} + +func (tx *MainDatabaseTx) UpdateBucketPolicy(ctx context.Context, bucket string, policy api.BucketPolicy) error { + return ssql.UpdateBucketPolicy(ctx, tx, bucket, policy) +} + +func (tx *MainDatabaseTx) UpdateHostAllowlistEntries(ctx context.Context, add, remove []types.PublicKey, clear bool) error { + if clear { + if _, err := tx.Exec(ctx, "DELETE FROM host_allowlist_entries"); err != nil { + return fmt.Errorf("failed to clear host allowlist entries: %w", err) + } + } + + if len(add) > 0 { + insertStmt, err := tx.Prepare(ctx, "INSERT INTO host_allowlist_entries (entry) VALUES (?) ON CONFLICT(entry) DO UPDATE SET id = id RETURNING id") + if err != nil { + return fmt.Errorf("failed to prepare insert statement: %w", err) + } + defer insertStmt.Close() + joinStmt, err := tx.Prepare(ctx, ` + INSERT OR IGNORE INTO host_allowlist_entry_hosts (db_allowlist_entry_id, db_host_id) + SELECT ?, id FROM ( + SELECT id + FROM hosts + WHERE public_key = ? + )`) + if err != nil { + return fmt.Errorf("failed to prepare join statement: %w", err) + } + defer joinStmt.Close() + + for _, pk := range add { + if res, err := insertStmt.Exec(ctx, ssql.PublicKey(pk)); err != nil { + return fmt.Errorf("failed to insert host allowlist entry: %w", err) + } else if entryID, err := res.LastInsertId(); err != nil { + return fmt.Errorf("failed to fetch host allowlist entry id: %w", err) + } else if _, err := joinStmt.Exec(ctx, entryID, ssql.PublicKey(pk)); err != nil { + return fmt.Errorf("failed to join host allowlist entry: %w", err) + } + } + } + + if !clear && len(remove) > 0 { + deleteStmt, err := tx.Prepare(ctx, "DELETE FROM host_allowlist_entries WHERE entry = ?") + if err != nil { + return fmt.Errorf("failed to prepare delete statement: %w", err) + } + defer deleteStmt.Close() + + for _, pk := range remove { + if _, err := deleteStmt.Exec(ctx, ssql.PublicKey(pk)); err != nil { + return fmt.Errorf("failed to delete host allowlist entry: %w", err) + } + } + } + return nil +} + +func (tx *MainDatabaseTx) UpdateHostBlocklistEntries(ctx context.Context, add, remove []string, clear bool) error { + if clear { + if _, err := tx.Exec(ctx, "DELETE FROM host_blocklist_entries"); err != nil { + return fmt.Errorf("failed to clear host blocklist entries: %w", err) + } + } + + if len(add) > 0 { + insertStmt, err := tx.Prepare(ctx, "INSERT INTO host_blocklist_entries (entry) VALUES (?) ON CONFLICT(entry) DO UPDATE SET id = id RETURNING id") + if err != nil { + return fmt.Errorf("failed to prepare insert statement: %w", err) + } + defer insertStmt.Close() + joinStmt, err := tx.Prepare(ctx, ` + INSERT OR IGNORE INTO host_blocklist_entry_hosts (db_blocklist_entry_id, db_host_id) + SELECT ?, id FROM ( + SELECT id + FROM hosts + WHERE net_address == ? OR + rtrim(rtrim(net_address, replace(net_address, ':', '')),':') == ? OR + rtrim(rtrim(net_address, replace(net_address, ':', '')),':') LIKE ? + )`) + if err != nil { + return fmt.Errorf("failed to prepare join statement: %w", err) + } + defer joinStmt.Close() + + for _, entry := range add { + if res, err := insertStmt.Exec(ctx, entry); err != nil { + return fmt.Errorf("failed to insert host blocklist entry: %w", err) + } else if entryID, err := res.LastInsertId(); err != nil { + return fmt.Errorf("failed to fetch host blocklist entry id: %w", err) + } else if _, err := joinStmt.Exec(ctx, entryID, entry, entry, fmt.Sprintf("%%.%s", entry)); err != nil { + return fmt.Errorf("failed to join host blocklist entry: %w", err) + } + } + } + + if !clear && len(remove) > 0 { + deleteStmt, err := tx.Prepare(ctx, "DELETE FROM host_blocklist_entries WHERE entry = ?") + if err != nil { + return fmt.Errorf("failed to prepare delete statement: %w", err) + } + defer deleteStmt.Close() + + for _, entry := range remove { + if _, err := deleteStmt.Exec(ctx, entry); err != nil { + return fmt.Errorf("failed to delete host blocklist entry: %w", err) + } + } + } + return nil +} + +func (tx *MainDatabaseTx) UpdateHostCheck(ctx context.Context, autopilot string, hk types.PublicKey, hc api.HostCheck) error { + _, err := tx.Exec(ctx, ` + INSERT INTO host_checks (created_at, db_autopilot_id, db_host_id, usability_blocked, usability_offline, usability_low_score, + usability_redundant_ip, usability_gouging, usability_not_accepting_contracts, usability_not_announced, usability_not_completing_scan, + score_age, score_collateral, score_interactions, score_storage_remaining, score_uptime, score_version, score_prices, + gouging_contract_err, gouging_download_err, gouging_gouging_err, gouging_prune_err, gouging_upload_err) + VALUES (?, + (SELECT id FROM autopilots WHERE identifier = ?), + (SELECT id FROM hosts WHERE public_key = ?), + ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + ON CONFLICT (db_autopilot_id, db_host_id) DO UPDATE SET + created_at = EXCLUDED.created_at, db_autopilot_id = EXCLUDED.db_autopilot_id, db_host_id = EXCLUDED.db_host_id, + usability_blocked = EXCLUDED.usability_blocked, usability_offline = EXCLUDED.usability_offline, usability_low_score = EXCLUDED.usability_low_score, + usability_redundant_ip = EXCLUDED.usability_redundant_ip, usability_gouging = EXCLUDED.usability_gouging, usability_not_accepting_contracts = EXCLUDED.usability_not_accepting_contracts, + usability_not_announced = EXCLUDED.usability_not_announced, usability_not_completing_scan = EXCLUDED.usability_not_completing_scan, + score_age = EXCLUDED.score_age, score_collateral = EXCLUDED.score_collateral, score_interactions = EXCLUDED.score_interactions, + score_storage_remaining = EXCLUDED.score_storage_remaining, score_uptime = EXCLUDED.score_uptime, score_version = EXCLUDED.score_version, + score_prices = EXCLUDED.score_prices, gouging_contract_err = EXCLUDED.gouging_contract_err, gouging_download_err = EXCLUDED.gouging_download_err, + gouging_gouging_err = EXCLUDED.gouging_gouging_err, gouging_prune_err = EXCLUDED.gouging_prune_err, gouging_upload_err = EXCLUDED.gouging_upload_err + `, time.Now(), autopilot, ssql.PublicKey(hk), hc.Usability.Blocked, hc.Usability.Offline, hc.Usability.LowScore, + hc.Usability.RedundantIP, hc.Usability.Gouging, hc.Usability.NotAcceptingContracts, hc.Usability.NotAnnounced, hc.Usability.NotCompletingScan, + hc.Score.Age, hc.Score.Collateral, hc.Score.Interactions, hc.Score.StorageRemaining, hc.Score.Uptime, hc.Score.Version, hc.Score.Prices, + hc.Gouging.ContractErr, hc.Gouging.DownloadErr, hc.Gouging.GougingErr, hc.Gouging.PruneErr, hc.Gouging.UploadErr, + ) + if err != nil { + return fmt.Errorf("failed to insert host check: %w", err) + } + return nil +} + +func (tx *MainDatabaseTx) UpdateSetting(ctx context.Context, key, value string) error { + _, err := tx.Exec(ctx, "INSERT INTO settings (created_at, `key`, value) VALUES (?, ?, ?) ON CONFLICT (key) DO UPDATE SET value = EXCLUDED.value", + time.Now(), key, value) + if err != nil { + return fmt.Errorf("failed to update setting '%s': %w", key, err) + } + return nil +} + +func (tx *MainDatabaseTx) UpdateSlab(ctx context.Context, s object.Slab, contractSet string, fcids []types.FileContractID) error { + // find all used contracts + usedContracts, err := ssql.FetchUsedContracts(ctx, tx, fcids) + if err != nil { + return fmt.Errorf("failed to fetch used contracts: %w", err) + } + + // extract the slab key + key, err := s.Key.MarshalBinary() + if err != nil { + return fmt.Errorf("failed to marshal slab key: %w", err) + } + + // update slab + var slabID, totalShards int64 + err = tx.QueryRow(ctx, ` + UPDATE slabs + SET db_contract_set_id = (SELECT id FROM contract_sets WHERE name = ?), + health_valid_until = ?, + health = ? + WHERE key = ? + RETURNING id, total_shards + `, contractSet, time.Now().Unix(), 1, ssql.SecretKey(key)). + Scan(&slabID, &totalShards) + if errors.Is(err, dsql.ErrNoRows) { + return fmt.Errorf("%w: slab with key '%s' not found: %w", api.ErrSlabNotFound, string(key), err) + } else if err != nil { + return err + } + + // find shards of slab + var roots []types.Hash256 + rows, err := tx.Query(ctx, "SELECT root FROM sectors WHERE db_slab_id = ? ORDER BY sectors.slab_index ASC", slabID) + if err != nil { + return fmt.Errorf("failed to fetch sectors: %w", err) + } + defer rows.Close() + + for rows.Next() { + var root ssql.Hash256 + if err := rows.Scan(&root); err != nil { + return fmt.Errorf("failed to scan sector id: %w", err) + } + roots = append(roots, types.Hash256(root)) + } + nSectors := len(roots) + + // make sure the number of shards doesn't change. + // NOTE: check both the slice as well as the TotalShards field to be + // safe. + if len(s.Shards) != int(totalShards) { + return fmt.Errorf("%w: expected %v shards (TotalShards) but got %v", sql.ErrInvalidNumberOfShards, totalShards, len(s.Shards)) + } else if len(s.Shards) != nSectors { + return fmt.Errorf("%w: expected %v shards (Shards) but got %v", sql.ErrInvalidNumberOfShards, nSectors, len(s.Shards)) + } + + // make sure the roots stay the same. + for i, root := range roots { + if root != types.Hash256(s.Shards[i].Root) { + return fmt.Errorf("%w: shard %v has changed root from %v to %v", sql.ErrShardRootChanged, i, s.Shards[i].Root, root[:]) + } + } + + // update sectors + var upsertSectors []upsertSector + for i := range s.Shards { + upsertSectors = append(upsertSectors, upsertSector{ + slabID, + i + 1, + s.Shards[i].LatestHost, + s.Shards[i].Root, + }) + } + sectorIDs, err := tx.upsertSectors(ctx, upsertSectors) + if err != nil { + return fmt.Errorf("failed to insert sectors: %w", err) + } + + // build contract <-> sector links + var upsertContractSectors []upsertContractSector + for i, shard := range s.Shards { + sectorID := sectorIDs[i] + + // ensure the associations are updated + for _, fcids := range shard.Contracts { + for _, fcid := range fcids { + if _, ok := usedContracts[fcid]; ok { + upsertContractSectors = append(upsertContractSectors, upsertContractSector{ + sectorID, + usedContracts[fcid].ID, + }) + } else { + tx.log.Named("UpdateSlab").Warn("missing contract for shard", + "contract", fcid, + "root", shard.Root, + "latest_host", shard.LatestHost, + ) + } + } + } + } + if err := tx.upsertContractSectors(ctx, upsertContractSectors); err != nil { + return err + } + + return nil +} + +func (tx *MainDatabaseTx) UpdateSlabHealth(ctx context.Context, limit int64, minDuration, maxDuration time.Duration) (int64, error) { + now := time.Now() + if err := ssql.PrepareSlabHealth(ctx, tx, limit, now); err != nil { + return 0, fmt.Errorf("failed to compute slab health: %w", err) + } + + res, err := tx.Exec(ctx, "UPDATE slabs SET health = inner.health, health_valid_until = (ABS(RANDOM()) % (? - ?) + ?) FROM slabs_health AS inner WHERE slabs.id=inner.id", + maxDuration.Seconds(), minDuration.Seconds(), now.Add(minDuration).Unix()) + if err != nil { + return 0, fmt.Errorf("failed to update slab health: %w", err) + } + + _, err = tx.Exec(ctx, ` + UPDATE objects SET health = ( + SELECT MIN(sla.health) + FROM slabs sla + INNER JOIN slices ON slices.db_slab_id = sla.id + WHERE slices.db_object_id = objects.id + ) WHERE EXISTS ( + SELECT 1 + FROM slabs_health h + INNER JOIN slices ON slices.db_slab_id = h.id + WHERE slices.db_object_id = objects.id + )`) + if err != nil { + return 0, fmt.Errorf("failed to update object health: %w", err) + } + return res.RowsAffected() +} + +func (tx *MainDatabaseTx) Webhooks(ctx context.Context) ([]webhooks.Webhook, error) { + return ssql.Webhooks(ctx, tx) +} + +func (tx *MainDatabaseTx) insertSlabs(ctx context.Context, objID, partID *int64, contractSet string, slices object.SlabSlices) error { + if (objID == nil) == (partID == nil) { + return errors.New("exactly one of objID and partID must be set") + } else if len(slices) == 0 { + return nil // nothing to do + } + + usedContracts, err := ssql.FetchUsedContracts(ctx, tx.Tx, slices.Contracts()) + if err != nil { + return fmt.Errorf("failed to fetch used contracts: %w", err) + } + + // get contract set id + var contractSetID int64 + if err := tx.QueryRow(ctx, "SELECT id FROM contract_sets WHERE contract_sets.name = ?", contractSet). + Scan(&contractSetID); err != nil { + return fmt.Errorf("failed to fetch contract set id: %w", err) + } + + // insert slabs + insertSlabStmt, err := tx.Prepare(ctx, `INSERT INTO slabs (created_at, db_contract_set_id, key, min_shards, total_shards) + VALUES (?, ?, ?, ?, ?) + ON CONFLICT(key) DO NOTHING RETURNING id`) + if err != nil { + return fmt.Errorf("failed to prepare statement to insert slab: %w", err) + } + defer insertSlabStmt.Close() + + querySlabIDStmt, err := tx.Prepare(ctx, "SELECT id FROM slabs WHERE key = ?") + if err != nil { + return fmt.Errorf("failed to prepare statement to query slab id: %w", err) + } + defer querySlabIDStmt.Close() + + slabIDs := make([]int64, len(slices)) + for i := range slices { + slabKey, err := slices[i].Key.MarshalBinary() + if err != nil { + return fmt.Errorf("failed to marshal slab key: %w", err) + } + err = insertSlabStmt.QueryRow(ctx, + time.Now(), + contractSetID, + ssql.SecretKey(slabKey), + slices[i].MinShards, + uint8(len(slices[i].Shards)), + ).Scan(&slabIDs[i]) + if errors.Is(err, dsql.ErrNoRows) { + if err := querySlabIDStmt.QueryRow(ctx, ssql.SecretKey(slabKey)).Scan(&slabIDs[i]); err != nil { + return fmt.Errorf("failed to fetch slab id: %w", err) + } + } else if err != nil { + return fmt.Errorf("failed to insert slab: %w", err) + } + } + + // insert slices + insertSliceStmt, err := tx.Prepare(ctx, `INSERT INTO slices (created_at, db_object_id, object_index, db_multipart_part_id, db_slab_id, offset, length) + VALUES (?, ?, ?, ?, ?, ?, ?)`) + if err != nil { + return fmt.Errorf("failed to prepare statement to insert slice: %w", err) + } + defer insertSliceStmt.Close() + + for i := range slices { + res, err := insertSliceStmt.Exec(ctx, + time.Now(), + objID, + uint(i+1), + partID, + slabIDs[i], + slices[i].Offset, + slices[i].Length, + ) + if err != nil { + return fmt.Errorf("failed to insert slice: %w", err) + } else if n, err := res.RowsAffected(); err != nil { + return fmt.Errorf("failed to get rows affected: %w", err) + } else if n == 0 { + return fmt.Errorf("failed to insert slice: no rows affected") + } + } + + // insert sectors + var upsertSectors []upsertSector + for i, ss := range slices { + for j := range ss.Shards { + upsertSectors = append(upsertSectors, upsertSector{ + slabIDs[i], + j + 1, + ss.Shards[j].LatestHost, + ss.Shards[j].Root, + }) + } + } + sectorIDs, err := tx.upsertSectors(ctx, upsertSectors) + if err != nil { + return fmt.Errorf("failed to insert sectors: %w", err) + } + + // insert contract <-> sector links + sectorIdx := 0 + var upsertContractSectors []upsertContractSector + for _, ss := range slices { + for _, shard := range ss.Shards { + for _, fcids := range shard.Contracts { + for _, fcid := range fcids { + if _, ok := usedContracts[fcid]; ok { + upsertContractSectors = append(upsertContractSectors, upsertContractSector{ + sectorIDs[sectorIdx], + usedContracts[fcid].ID, + }) + } else { + tx.log.Named("InsertObject").Warn("missing contract for shard", + "contract", fcid, + "root", shard.Root, + "latest_host", shard.LatestHost, + ) + } + } + } + sectorIdx++ + } + } + if err := tx.upsertContractSectors(ctx, upsertContractSectors); err != nil { + return err + } + return nil +} + +type upsertContractSector struct { + sectorID int64 + contractID int64 +} + +func (tx *MainDatabaseTx) upsertContractSectors(ctx context.Context, contractSectors []upsertContractSector) error { + if len(contractSectors) == 0 { + return nil + } + + // insert contract <-> sector links + insertContractSectorStmt, err := tx.Prepare(ctx, `INSERT INTO contract_sectors (db_sector_id, db_contract_id) + VALUES (?, ?) ON CONFLICT(db_sector_id, db_contract_id) DO NOTHING`) + if err != nil { + return fmt.Errorf("failed to prepare statement to insert contract sector link: %w", err) + } + defer insertContractSectorStmt.Close() + + for _, cs := range contractSectors { + _, err := insertContractSectorStmt.Exec(ctx, + cs.sectorID, + cs.contractID, + ) + if err != nil { + return fmt.Errorf("failed to insert contract sector link: %w", err) + } + } + return nil +} + +type upsertSector struct { + slabID int64 + slabIndex int + latestHost types.PublicKey + root types.Hash256 +} + +func (tx *MainDatabaseTx) upsertSectors(ctx context.Context, sectors []upsertSector) ([]int64, error) { + if len(sectors) == 0 { + return nil, nil + } + + // insert sectors - make sure to update last_insert_id in case of a + // duplicate key to be able to retrieve the id + insertSectorStmt, err := tx.Prepare(ctx, `INSERT INTO sectors (created_at, db_slab_id, slab_index, latest_host, root) + VALUES (?, ?, ?, ?, ?) ON CONFLICT(root) DO UPDATE SET latest_host = EXCLUDED.latest_host RETURNING id, db_slab_id`) + if err != nil { + return nil, fmt.Errorf("failed to prepare statement to insert sector: %w", err) + } + defer insertSectorStmt.Close() + + var sectorIDs []int64 + for _, s := range sectors { + var sectorID, slabID int64 + err := insertSectorStmt.QueryRow(ctx, + time.Now(), + s.slabID, + s.slabIndex, + ssql.PublicKey(s.latestHost), + s.root[:], + ).Scan(§orID, &slabID) + if err != nil { + return nil, fmt.Errorf("failed to insert sector: %w", err) + } else if slabID != s.slabID { + return nil, fmt.Errorf("failed to insert sector for slab %v: already exists for slab %v", s.slabID, slabID) + } + sectorIDs = append(sectorIDs, sectorID) + } + return sectorIDs, nil +} diff --git a/stores/sql/sqlite/metrics.go b/stores/sql/sqlite/metrics.go new file mode 100644 index 000000000..c52417fee --- /dev/null +++ b/stores/sql/sqlite/metrics.go @@ -0,0 +1,124 @@ +package sqlite + +import ( + "context" + dsql "database/sql" + "encoding/hex" + "time" + + "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/sql" + ssql "go.sia.tech/renterd/stores/sql" + "lukechampine.com/frand" + + "go.uber.org/zap" +) + +type ( + MetricsDatabase struct { + db *sql.DB + log *zap.SugaredLogger + } + + MetricsDatabaseTx struct { + sql.Tx + log *zap.SugaredLogger + } +) + +var _ ssql.MetricsDatabaseTx = (*MetricsDatabaseTx)(nil) + +// NewSQLiteDatabase creates a new SQLite backend. +func NewMetricsDatabase(db *dsql.DB, log *zap.SugaredLogger, lqd, ltd time.Duration) (*MetricsDatabase, error) { + store, err := sql.NewDB(db, log.Desugar(), deadlockMsgs, lqd, ltd) + return &MetricsDatabase{ + db: store, + log: log, + }, err +} + +func (b *MetricsDatabase) ApplyMigration(ctx context.Context, fn func(tx sql.Tx) (bool, error)) error { + return applyMigration(ctx, b.db, fn) +} + +func (b *MetricsDatabase) Close() error { + return closeDB(b.db, b.log) +} + +func (b *MetricsDatabase) DB() *sql.DB { + return b.db +} + +func (b *MetricsDatabase) CreateMigrationTable(ctx context.Context) error { + return createMigrationTable(ctx, b.db) +} + +func (b *MetricsDatabase) Migrate(ctx context.Context) error { + return sql.PerformMigrations(ctx, b, migrationsFs, "metrics", sql.MetricsMigrations(ctx, migrationsFs, b.log)) +} + +func (b *MetricsDatabase) Transaction(ctx context.Context, fn func(tx ssql.MetricsDatabaseTx) error) error { + return b.db.Transaction(ctx, func(tx sql.Tx) error { + return fn(b.wrapTxn(tx)) + }) +} + +func (b *MetricsDatabase) Version(ctx context.Context) (string, string, error) { + return version(ctx, b.db) +} + +func (b *MetricsDatabase) wrapTxn(tx sql.Tx) *MetricsDatabaseTx { + return &MetricsDatabaseTx{tx, b.log.Named(hex.EncodeToString(frand.Bytes(16)))} +} + +func (tx *MetricsDatabaseTx) ContractMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.ContractMetricsQueryOpts) ([]api.ContractMetric, error) { + return ssql.ContractMetrics(ctx, tx, start, n, interval, ssql.ContractMetricsQueryOpts{ContractMetricsQueryOpts: opts}) +} + +func (tx *MetricsDatabaseTx) ContractPruneMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.ContractPruneMetricsQueryOpts) ([]api.ContractPruneMetric, error) { + return ssql.ContractPruneMetrics(ctx, tx, start, n, interval, opts) +} + +func (tx *MetricsDatabaseTx) ContractSetChurnMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.ContractSetChurnMetricsQueryOpts) ([]api.ContractSetChurnMetric, error) { + return ssql.ContractSetChurnMetrics(ctx, tx, start, n, interval, opts) +} + +func (tx *MetricsDatabaseTx) ContractSetMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.ContractSetMetricsQueryOpts) (metrics []api.ContractSetMetric, _ error) { + return ssql.ContractSetMetrics(ctx, tx, start, n, interval, opts) +} + +func (tx *MetricsDatabaseTx) PerformanceMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.PerformanceMetricsQueryOpts) ([]api.PerformanceMetric, error) { + return ssql.PerformanceMetrics(ctx, tx, start, n, interval, opts) +} + +func (tx *MetricsDatabaseTx) PruneMetrics(ctx context.Context, metric string, cutoff time.Time) error { + return ssql.PruneMetrics(ctx, tx, metric, cutoff) +} + +func (tx *MetricsDatabaseTx) RecordContractMetric(ctx context.Context, metrics ...api.ContractMetric) error { + return ssql.RecordContractMetric(ctx, tx, metrics...) +} + +func (tx *MetricsDatabaseTx) RecordContractPruneMetric(ctx context.Context, metrics ...api.ContractPruneMetric) error { + return ssql.RecordContractPruneMetric(ctx, tx, metrics...) +} + +func (tx *MetricsDatabaseTx) RecordContractSetChurnMetric(ctx context.Context, metrics ...api.ContractSetChurnMetric) error { + return ssql.RecordContractSetChurnMetric(ctx, tx, metrics...) +} + +func (tx *MetricsDatabaseTx) RecordContractSetMetric(ctx context.Context, metrics ...api.ContractSetMetric) error { + return ssql.RecordContractSetMetric(ctx, tx, metrics...) +} + +func (tx *MetricsDatabaseTx) RecordPerformanceMetric(ctx context.Context, metrics ...api.PerformanceMetric) error { + return ssql.RecordPerformanceMetric(ctx, tx, metrics...) +} + +func (tx *MetricsDatabaseTx) RecordWalletMetric(ctx context.Context, metrics ...api.WalletMetric) error { + return ssql.RecordWalletMetric(ctx, tx, metrics...) +} + +func (tx *MetricsDatabaseTx) WalletMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.WalletMetricsQueryOpts) ([]api.WalletMetric, error) { + return ssql.WalletMetrics(ctx, tx, start, n, interval, opts) +} diff --git a/stores/migrations/sqlite/main/migration_00001_object_metadata.sql b/stores/sql/sqlite/migrations/main/migration_00001_object_metadata.sql similarity index 100% rename from stores/migrations/sqlite/main/migration_00001_object_metadata.sql rename to stores/sql/sqlite/migrations/main/migration_00001_object_metadata.sql diff --git a/stores/migrations/sqlite/main/migration_00002_prune_slabs_trigger.sql b/stores/sql/sqlite/migrations/main/migration_00002_prune_slabs_trigger.sql similarity index 100% rename from stores/migrations/sqlite/main/migration_00002_prune_slabs_trigger.sql rename to stores/sql/sqlite/migrations/main/migration_00002_prune_slabs_trigger.sql diff --git a/stores/migrations/sqlite/main/migration_00003_idx_objects_size.sql b/stores/sql/sqlite/migrations/main/migration_00003_idx_objects_size.sql similarity index 100% rename from stores/migrations/sqlite/main/migration_00003_idx_objects_size.sql rename to stores/sql/sqlite/migrations/main/migration_00003_idx_objects_size.sql diff --git a/stores/migrations/sqlite/main/migration_00004_prune_slabs_cascade.sql b/stores/sql/sqlite/migrations/main/migration_00004_prune_slabs_cascade.sql similarity index 100% rename from stores/migrations/sqlite/main/migration_00004_prune_slabs_cascade.sql rename to stores/sql/sqlite/migrations/main/migration_00004_prune_slabs_cascade.sql diff --git a/stores/migrations/sqlite/main/migration_00005_zero_size_object_health.sql b/stores/sql/sqlite/migrations/main/migration_00005_zero_size_object_health.sql similarity index 100% rename from stores/migrations/sqlite/main/migration_00005_zero_size_object_health.sql rename to stores/sql/sqlite/migrations/main/migration_00005_zero_size_object_health.sql diff --git a/stores/migrations/sqlite/main/migration_00006_idx_objects_created_at.sql b/stores/sql/sqlite/migrations/main/migration_00006_idx_objects_created_at.sql similarity index 100% rename from stores/migrations/sqlite/main/migration_00006_idx_objects_created_at.sql rename to stores/sql/sqlite/migrations/main/migration_00006_idx_objects_created_at.sql diff --git a/stores/migrations/sqlite/main/migration_00007_host_checks.sql b/stores/sql/sqlite/migrations/main/migration_00007_host_checks.sql similarity index 100% rename from stores/migrations/sqlite/main/migration_00007_host_checks.sql rename to stores/sql/sqlite/migrations/main/migration_00007_host_checks.sql diff --git a/stores/sql/sqlite/migrations/main/migration_00008_directories_1.sql b/stores/sql/sqlite/migrations/main/migration_00008_directories_1.sql new file mode 100644 index 000000000..a283f39ad --- /dev/null +++ b/stores/sql/sqlite/migrations/main/migration_00008_directories_1.sql @@ -0,0 +1,23 @@ +-- dbDirectory +DROP TABLE IF EXISTS `directories`; +CREATE TABLE `directories` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_parent_id` integer,`name` text, CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`db_parent_id`) REFERENCES `directories`(`id`)); +CREATE INDEX `idx_directories_parent_id` ON `directories`(`db_parent_id`); +CREATE UNIQUE INDEX `idx_directories_name` ON `directories`(`name`); + +-- dbObject: add column and constraint +DROP TABLE IF EXISTS `objects_temp`; +CREATE TABLE `objects_temp` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_bucket_id` integer NOT NULL, `db_directory_id` integer NOT NULL, `object_id` text,`key` blob,`health` real NOT NULL DEFAULT 1,`size` integer,`mime_type` text,`etag` text,CONSTRAINT `fk_objects_db_bucket` FOREIGN KEY (`db_bucket_id`) REFERENCES `buckets`(`id`),CONSTRAINT `fk_objects_db_directories` FOREIGN KEY (`db_directory_id`) REFERENCES `directories`(`id`)); + +INSERT INTO `objects_temp` (`id`, `created_at`, `db_bucket_id`, `db_directory_id`, `object_id`, `key`, `health`, `size`, `mime_type`, `etag`) +SELECT `id`, `created_at`, `db_bucket_id`, 0, `object_id`, `key`, `health`, `size`, `mime_type`, `etag` +FROM `objects`; +DROP TABLE `objects`; +ALTER TABLE `objects_temp` RENAME TO `objects`; + +CREATE INDEX `idx_objects_db_bucket_id` ON `objects`(`db_bucket_id`); +CREATE INDEX `idx_objects_etag` ON `objects`(`etag`); +CREATE INDEX `idx_objects_health` ON `objects`(`health`); +CREATE INDEX `idx_objects_object_id` ON `objects`(`object_id`); +CREATE INDEX `idx_objects_size` ON `objects`(`size`); +CREATE UNIQUE INDEX `idx_object_bucket` ON `objects`(`db_bucket_id`,`object_id`); +CREATE INDEX `idx_objects_created_at` ON `objects`(`created_at`); diff --git a/stores/sql/sqlite/migrations/main/migration_00008_directories_2.sql b/stores/sql/sqlite/migrations/main/migration_00008_directories_2.sql new file mode 100644 index 000000000..ce7dc625f --- /dev/null +++ b/stores/sql/sqlite/migrations/main/migration_00008_directories_2.sql @@ -0,0 +1 @@ +-- nothing to do \ No newline at end of file diff --git a/stores/sql/sqlite/migrations/main/migration_00009_json_settings.sql b/stores/sql/sqlite/migrations/main/migration_00009_json_settings.sql new file mode 100644 index 000000000..ce7dc625f --- /dev/null +++ b/stores/sql/sqlite/migrations/main/migration_00009_json_settings.sql @@ -0,0 +1 @@ +-- nothing to do \ No newline at end of file diff --git a/stores/sql/sqlite/migrations/main/migration_00010_webhook_headers.sql b/stores/sql/sqlite/migrations/main/migration_00010_webhook_headers.sql new file mode 100644 index 000000000..7e3d0453b --- /dev/null +++ b/stores/sql/sqlite/migrations/main/migration_00010_webhook_headers.sql @@ -0,0 +1 @@ +ALTER TABLE `webhooks` ADD COLUMN `headers` text DEFAULT '{}'; \ No newline at end of file diff --git a/stores/sql/sqlite/migrations/main/migration_00011_host_subnets.sql b/stores/sql/sqlite/migrations/main/migration_00011_host_subnets.sql new file mode 100644 index 000000000..4bdb4472d --- /dev/null +++ b/stores/sql/sqlite/migrations/main/migration_00011_host_subnets.sql @@ -0,0 +1,2 @@ +ALTER TABLE `hosts` ADD COLUMN `subnets` text NOT NULL DEFAULT ''; +UPDATE `hosts` SET last_scan = 0; \ No newline at end of file diff --git a/stores/migrations/sqlite/main/schema.sql b/stores/sql/sqlite/migrations/main/schema.sql similarity index 94% rename from stores/migrations/sqlite/main/schema.sql rename to stores/sql/sqlite/migrations/main/schema.sql index 3fca53a3a..eadbc425c 100644 --- a/stores/migrations/sqlite/main/schema.sql +++ b/stores/sql/sqlite/migrations/main/schema.sql @@ -12,7 +12,7 @@ CREATE INDEX `idx_archived_contracts_state` ON `archived_contracts`(`state`); CREATE INDEX `idx_archived_contracts_renewed_from` ON `archived_contracts`(`renewed_from`); -- dbHost -CREATE TABLE `hosts` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`public_key` blob NOT NULL UNIQUE,`settings` text,`price_table` text,`price_table_expiry` datetime,`total_scans` integer,`last_scan` integer,`last_scan_success` numeric,`second_to_last_scan_success` numeric,`scanned` numeric,`uptime` integer,`downtime` integer,`recent_downtime` integer,`recent_scan_failures` integer,`successful_interactions` real,`failed_interactions` real,`lost_sectors` integer,`last_announcement` datetime,`net_address` text); +CREATE TABLE `hosts` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`public_key` blob NOT NULL UNIQUE,`settings` text,`price_table` text,`price_table_expiry` datetime,`total_scans` integer,`last_scan` integer,`last_scan_success` numeric,`second_to_last_scan_success` numeric,`scanned` numeric,`uptime` integer,`downtime` integer,`recent_downtime` integer,`recent_scan_failures` integer,`successful_interactions` real,`failed_interactions` real,`lost_sectors` integer,`last_announcement` datetime,`net_address` text,`subnets` text NOT NULL DEFAULT ''); CREATE INDEX `idx_hosts_recent_scan_failures` ON `hosts`(`recent_scan_failures`); CREATE INDEX `idx_hosts_recent_downtime` ON `hosts`(`recent_downtime`); CREATE INDEX `idx_hosts_scanned` ON `hosts`(`scanned`); @@ -44,8 +44,13 @@ CREATE INDEX `idx_contract_set_contracts_db_contract_id` ON `contract_set_contra CREATE TABLE `buckets` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`policy` text,`name` text NOT NULL UNIQUE); CREATE INDEX `idx_buckets_name` ON `buckets`(`name`); +-- dbDirectory +CREATE TABLE `directories` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_parent_id` integer,`name` text, CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`db_parent_id`) REFERENCES `directories`(`id`)); +CREATE INDEX `idx_directories_parent_id` ON `directories`(`db_parent_id`); +CREATE UNIQUE INDEX `idx_directories_name` ON `directories`(`name`); + -- dbObject -CREATE TABLE `objects` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_bucket_id` integer NOT NULL,`object_id` text,`key` blob,`health` real NOT NULL DEFAULT 1,`size` integer,`mime_type` text,`etag` text,CONSTRAINT `fk_objects_db_bucket` FOREIGN KEY (`db_bucket_id`) REFERENCES `buckets`(`id`)); +CREATE TABLE `objects` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_bucket_id` integer NOT NULL, `db_directory_id` integer NOT NULL, `object_id` text,`key` blob,`health` real NOT NULL DEFAULT 1,`size` integer,`mime_type` text,`etag` text,CONSTRAINT `fk_objects_db_bucket` FOREIGN KEY (`db_bucket_id`) REFERENCES `buckets`(`id`),CONSTRAINT `fk_objects_db_directories` FOREIGN KEY (`db_directory_id`) REFERENCES `directories`(`id`)); CREATE INDEX `idx_objects_db_bucket_id` ON `objects`(`db_bucket_id`); CREATE INDEX `idx_objects_etag` ON `objects`(`etag`); CREATE INDEX `idx_objects_health` ON `objects`(`health`); @@ -142,7 +147,7 @@ CREATE INDEX `idx_ephemeral_accounts_requires_sync` ON `ephemeral_accounts`(`req CREATE TABLE `autopilots` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`identifier` text NOT NULL UNIQUE,`config` text,`current_period` integer DEFAULT 0); -- dbWebhook -CREATE TABLE `webhooks` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`module` text NOT NULL,`event` text NOT NULL,`url` text NOT NULL); +CREATE TABLE `webhooks` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`module` text NOT NULL,`event` text NOT NULL,`url` text NOT NULL,`headers` text DEFAULT ('{}')); CREATE UNIQUE INDEX `idx_module_event_url` ON `webhooks`(`module`,`event`,`url`); -- dbObjectUserMetadata diff --git a/stores/migrations/sqlite/metrics/migration_00001_idx_contracts_fcid_timestamp.sql b/stores/sql/sqlite/migrations/metrics/migration_00001_idx_contracts_fcid_timestamp.sql similarity index 100% rename from stores/migrations/sqlite/metrics/migration_00001_idx_contracts_fcid_timestamp.sql rename to stores/sql/sqlite/migrations/metrics/migration_00001_idx_contracts_fcid_timestamp.sql diff --git a/stores/migrations/sqlite/metrics/schema.sql b/stores/sql/sqlite/migrations/metrics/schema.sql similarity index 100% rename from stores/migrations/sqlite/metrics/schema.sql rename to stores/sql/sqlite/migrations/metrics/schema.sql diff --git a/stores/sql/types.go b/stores/sql/types.go new file mode 100644 index 000000000..00242be2f --- /dev/null +++ b/stores/sql/types.go @@ -0,0 +1,356 @@ +package sql + +import ( + "database/sql" + "database/sql/driver" + "encoding/json" + "errors" + "fmt" + "math/big" + "strconv" + "strings" + "time" + + rhpv2 "go.sia.tech/core/rhp/v2" + rhpv3 "go.sia.tech/core/rhp/v3" + "go.sia.tech/core/types" + "go.sia.tech/renterd/api" + "go.sia.tech/siad/modules" +) + +const ( + secretKeySize = 32 +) + +type ( + AutopilotConfig api.AutopilotConfig + BigInt big.Int + CCID modules.ConsensusChangeID + Currency types.Currency + FileContractID types.FileContractID + Hash256 types.Hash256 + BusSetting string + HostSettings rhpv2.HostSettings + PriceTable rhpv3.HostPriceTable + PublicKey types.PublicKey + SecretKey []byte + UnixTimeMS time.Time + UnixTimeNS time.Time + Unsigned64 uint64 +) + +type scannerValuer interface { + driver.Valuer + sql.Scanner +} + +var ( + _ scannerValuer = (*AutopilotConfig)(nil) + _ scannerValuer = (*BigInt)(nil) + _ scannerValuer = (*BusSetting)(nil) + _ scannerValuer = (*CCID)(nil) + _ scannerValuer = (*Currency)(nil) + _ scannerValuer = (*FileContractID)(nil) + _ scannerValuer = (*Hash256)(nil) + _ scannerValuer = (*HostSettings)(nil) + _ scannerValuer = (*PriceTable)(nil) + _ scannerValuer = (*PublicKey)(nil) + _ scannerValuer = (*SecretKey)(nil) + _ scannerValuer = (*UnixTimeMS)(nil) + _ scannerValuer = (*UnixTimeNS)(nil) + _ scannerValuer = (*Unsigned64)(nil) +) + +// Scan scan value into AutopilotConfig, implements sql.Scanner interface. +func (cfg *AutopilotConfig) Scan(value interface{}) error { + var bytes []byte + switch value := value.(type) { + case string: + bytes = []byte(value) + case []byte: + bytes = value + default: + return fmt.Errorf("failed to unmarshal AutopilotConfig value: %v %T", value, value) + } + return json.Unmarshal(bytes, cfg) +} + +// Value returns a AutopilotConfig value, implements driver.Valuer interface. +func (cfg AutopilotConfig) Value() (driver.Value, error) { + return json.Marshal(cfg) +} + +// Scan scan value into BigInt, implements sql.Scanner interface. +func (b *BigInt) Scan(value interface{}) error { + var s string + switch value := value.(type) { + case string: + s = value + case []byte: + s = string(value) + default: + return fmt.Errorf("failed to unmarshal BigInt value: %v %t", value, value) + } + if _, success := (*big.Int)(b).SetString(s, 10); !success { + return errors.New(fmt.Sprint("failed to scan BigInt value", value)) + } + return nil +} + +// Value returns a BigInt value, implements driver.Valuer interface. +func (b BigInt) Value() (driver.Value, error) { + return (*big.Int)(&b).String(), nil +} + +// Scan scan value into CCID, implements sql.Scanner interface. +func (c *CCID) Scan(value interface{}) error { + switch value := value.(type) { + case []byte: + copy(c[:], value) + default: + return fmt.Errorf("failed to unmarshal CCID value: %v %t", value, value) + } + return nil +} + +// Value returns a publicKey value, implements driver.Valuer interface. +func (c CCID) Value() (driver.Value, error) { + return c[:], nil +} + +// Scan scan value into Currency, implements sql.Scanner interface. +func (c *Currency) Scan(value interface{}) error { + var s string + switch value := value.(type) { + case string: + s = value + case []byte: + s = string(value) + default: + return fmt.Errorf("failed to unmarshal Currency value: %v %t", value, value) + } + curr, err := types.ParseCurrency(s) + if err != nil { + return err + } + *c = Currency(curr) + return nil +} + +// Value returns a publicKey value, implements driver.Valuer interface. +func (c Currency) Value() (driver.Value, error) { + return types.Currency(c).ExactString(), nil +} + +// Scan scan value into fileContractID, implements sql.Scanner interface. +func (fcid *FileContractID) Scan(value interface{}) error { + bytes, ok := value.([]byte) + if !ok { + return errors.New(fmt.Sprint("failed to unmarshal fcid value:", value)) + } + if len(bytes) != len(FileContractID{}) { + return fmt.Errorf("failed to unmarshal fcid value due to invalid number of bytes %v != %v: %v", len(bytes), len(FileContractID{}), value) + } + *fcid = *(*FileContractID)(bytes) + return nil +} + +// Value returns a fileContractID value, implements driver.Valuer interface. +func (fcid FileContractID) Value() (driver.Value, error) { + return fcid[:], nil +} + +// Scan scan value into address, implements sql.Scanner interface. +func (h *Hash256) Scan(value interface{}) error { + bytes, ok := value.([]byte) + if !ok { + return errors.New(fmt.Sprint("failed to unmarshal Hash256 value:", value)) + } + if len(bytes) != len(Hash256{}) { + return fmt.Errorf("failed to unmarshal Hash256 value due to invalid number of bytes %v != %v: %v", len(bytes), len(Hash256{}), value) + } + *h = *(*Hash256)(bytes) + return nil +} + +// Value returns an addr value, implements driver.Valuer interface. +func (h Hash256) Value() (driver.Value, error) { + return h[:], nil +} + +// Scan scan value into HostSettings, implements sql.Scanner interface. +func (hs *HostSettings) Scan(value interface{}) error { + bytes, ok := value.([]byte) + if !ok { + return errors.New(fmt.Sprint("failed to unmarshal Settings value:", value)) + } + return json.Unmarshal(bytes, hs) +} + +// Value returns a HostSettings value, implements driver.Valuer interface. +func (hs HostSettings) Value() (driver.Value, error) { + return json.Marshal(hs) +} + +// Scan scan value into PriceTable, implements sql.Scanner interface. +func (pt *PriceTable) Scan(value interface{}) error { + bytes, ok := value.([]byte) + if !ok { + return errors.New(fmt.Sprint("failed to unmarshal PriceTable value:", value)) + } + return json.Unmarshal(bytes, pt) +} + +// Value returns a PriceTable value, implements driver.Valuer interface. +func (pt PriceTable) Value() (driver.Value, error) { + return json.Marshal(pt) +} + +// Scan scan value into publicKey, implements sql.Scanner interface. +func (pk *PublicKey) Scan(value interface{}) error { + bytes, ok := value.([]byte) + if !ok { + return errors.New(fmt.Sprint("failed to unmarshal publicKey value:", value)) + } + if len(bytes) != len(types.PublicKey{}) { + return fmt.Errorf("failed to unmarshal publicKey value due invalid number of bytes %v != %v: %v", len(bytes), len(PublicKey{}), value) + } + *pk = *(*PublicKey)(bytes) + return nil +} + +// Value returns a publicKey value, implements driver.Valuer interface. +func (pk PublicKey) Value() (driver.Value, error) { + return pk[:], nil +} + +// String implements fmt.Stringer to prevent the key from getting leaked in +// logs. +func (k SecretKey) String() string { + return "*****" +} + +// Scan scans value into key, implements sql.Scanner interface. +func (k *SecretKey) Scan(value interface{}) error { + bytes, ok := value.([]byte) + if !ok { + return errors.New(fmt.Sprint("failed to unmarshal secretKey value:", value)) + } else if len(bytes) != secretKeySize { + return fmt.Errorf("failed to unmarshal secretKey value due to invalid number of bytes %v != %v: %v", len(bytes), secretKeySize, value) + } + *k = append(SecretKey{}, SecretKey(bytes)...) + return nil +} + +// Value returns an key value, implements driver.Valuer interface. +func (k SecretKey) Value() (driver.Value, error) { + return []byte(k), nil +} + +// String implements fmt.Stringer to prevent "s3authentication" settings from +// getting leaked. +func (s BusSetting) String() string { + if strings.Contains(string(s), "v4Keypairs") { + return "*****" + } + return string(s) +} + +// Scan scans value into the BusSetting +func (s *BusSetting) Scan(value interface{}) error { + switch value := value.(type) { + case string: + *s = BusSetting(value) + case []byte: + *s = BusSetting(value) + default: + return fmt.Errorf("failed to unmarshal BusSetting value from type %t", value) + } + return nil +} + +// Value returns a BusSetting value, implements driver.Valuer interface. +func (s BusSetting) Value() (driver.Value, error) { + return string(s), nil +} + +// Scan scan value into unixTimeMS, implements sql.Scanner interface. +func (u *UnixTimeMS) Scan(value interface{}) error { + var msec int64 + var err error + switch value := value.(type) { + case int64: + msec = value + case []uint8: + msec, err = strconv.ParseInt(string(value), 10, 64) + if err != nil { + return fmt.Errorf("failed to unmarshal unixTimeMS value: %v %T", value, value) + } + default: + return fmt.Errorf("failed to unmarshal unixTimeMS value: %v %T", value, value) + } + + *u = UnixTimeMS(time.UnixMilli(msec)) + return nil +} + +// Value returns a int64 value representing a unix timestamp in milliseconds, +// implements driver.Valuer interface. +func (u UnixTimeMS) Value() (driver.Value, error) { + return time.Time(u).UnixMilli(), nil +} + +// Scan scan value into UnixTimeNS, implements sql.Scanner interface. +func (u *UnixTimeNS) Scan(value interface{}) error { + var nsec int64 + var err error + switch value := value.(type) { + case int64: + nsec = value + case []uint8: + nsec, err = strconv.ParseInt(string(value), 10, 64) + if err != nil { + return fmt.Errorf("failed to unmarshal UnixTimeNS value: %v %T", value, value) + } + default: + return fmt.Errorf("failed to unmarshal UnixTimeNS value: %v %T", value, value) + } + + if nsec == 0 { + *u = UnixTimeNS{} + } else { + *u = UnixTimeNS(time.Unix(0, nsec)) + } + return nil +} + +// Value returns a int64 value representing a unix timestamp in milliseconds, +// implements driver.Valuer interface. +func (u UnixTimeNS) Value() (driver.Value, error) { + return time.Time(u).UnixNano(), nil +} + +// Scan scan value into Unsigned64, implements sql.Scanner interface. +func (u *Unsigned64) Scan(value interface{}) error { + var n int64 + var err error + switch value := value.(type) { + case int64: + n = value + case []uint8: + n, err = strconv.ParseInt(string(value), 10, 64) + if err != nil { + return fmt.Errorf("failed to unmarshal Unsigned64 value: %v %T", value, value) + } + default: + return fmt.Errorf("failed to unmarshal Unsigned64 value: %v %T", value, value) + } + + *u = Unsigned64(n) + return nil +} + +// Value returns an Unsigned64 value, implements driver.Valuer interface. +func (u Unsigned64) Value() (driver.Value, error) { + return int64(u), nil +} diff --git a/stores/sql_test.go b/stores/sql_test.go index 18b7f5609..228165370 100644 --- a/stores/sql_test.go +++ b/stores/sql_test.go @@ -3,6 +3,7 @@ package stores import ( "bytes" "context" + dsql "database/sql" "encoding/hex" "errors" "fmt" @@ -17,7 +18,12 @@ import ( "go.sia.tech/core/types" "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/config" + isql "go.sia.tech/renterd/internal/sql" "go.sia.tech/renterd/object" + sql "go.sia.tech/renterd/stores/sql" + "go.sia.tech/renterd/stores/sql/mysql" + "go.sia.tech/renterd/stores/sql/sqlite" "go.sia.tech/siad/modules" "go.uber.org/zap" "go.uber.org/zap/zapcore" @@ -43,19 +49,12 @@ var ( ) type testSQLStore struct { - t *testing.T + cfg testSQLStoreConfig + t *testing.T *SQLStore - - dbName string - dbMetricsName string - dir string - ccid modules.ConsensusChangeID } type testSQLStoreConfig struct { - dbURI string - dbUser string - dbPassword string dbName string dbMetricsName string dir string @@ -66,78 +65,114 @@ type testSQLStoreConfig struct { var defaultTestSQLStoreConfig = testSQLStoreConfig{} +func randomDBName() string { + return "db" + hex.EncodeToString(frand.Bytes(16)) +} + +func (cfg *testSQLStoreConfig) dbConnections() (gorm.Dialector, sql.MetricsDatabase, error) { + var connMain gorm.Dialector + var dbm *dsql.DB + var dbMetrics sql.MetricsDatabase + var err error + if mysqlCfg := config.MySQLConfigFromEnv(); mysqlCfg.URI != "" { + // create MySQL connections if URI is set + + // sanity check config + if cfg.persistent { + return nil, nil, errors.New("invalid store config, can't use both persistent and dbURI") + } + + // use db names from config if not set + if mysqlCfg.Database == "" { + mysqlCfg.Database = cfg.dbName + } + if mysqlCfg.MetricsDatabase == "" { + mysqlCfg.MetricsDatabase = cfg.dbMetricsName + } + + // use a tmp connection to precreate the two databases + if tmpDB, err := gorm.Open(NewMySQLConnection(mysqlCfg.User, mysqlCfg.Password, mysqlCfg.URI, "")); err != nil { + return nil, nil, err + } else if err := tmpDB.Exec(fmt.Sprintf("CREATE DATABASE IF NOT EXISTS `%s`", mysqlCfg.Database)).Error; err != nil { + return nil, nil, err + } else if err := tmpDB.Exec(fmt.Sprintf("CREATE DATABASE IF NOT EXISTS `%s`", mysqlCfg.MetricsDatabase)).Error; err != nil { + return nil, nil, err + } + + connMain = NewMySQLConnection(mysqlCfg.User, mysqlCfg.Password, mysqlCfg.URI, mysqlCfg.Database) + dbm, err = mysql.Open(mysqlCfg.User, mysqlCfg.Password, mysqlCfg.URI, mysqlCfg.MetricsDatabase) + if err != nil { + return nil, nil, fmt.Errorf("failed to open MySQL metrics database: %w", err) + } + dbMetrics, err = mysql.NewMetricsDatabase(dbm, zap.NewNop().Sugar(), 100*time.Millisecond, 100*time.Millisecond) + } else if cfg.persistent { + // create SQL connections if we want a persistent store + connMain = NewSQLiteConnection(filepath.Join(cfg.dir, "db.sqlite")) + dbm, err = sqlite.Open(filepath.Join(cfg.dir, "metrics.sqlite")) + if err != nil { + return nil, nil, fmt.Errorf("failed to open SQLite metrics database: %w", err) + } + dbMetrics, err = sqlite.NewMetricsDatabase(dbm, zap.NewNop().Sugar(), 100*time.Millisecond, 100*time.Millisecond) + } else { + // otherwise return ephemeral connections + connMain = NewEphemeralSQLiteConnection(cfg.dbName) + dbm, err = sqlite.OpenEphemeral(cfg.dbMetricsName) + if err != nil { + return nil, nil, fmt.Errorf("failed to open ephemeral SQLite metrics database: %w", err) + } + dbMetrics, err = sqlite.NewMetricsDatabase(dbm, zap.NewNop().Sugar(), 100*time.Millisecond, 100*time.Millisecond) + } + if err != nil { + return nil, nil, fmt.Errorf("failed to create metrics database: %w", err) + } + return connMain, dbMetrics, nil +} + // newTestSQLStore creates a new SQLStore for testing. func newTestSQLStore(t *testing.T, cfg testSQLStoreConfig) *testSQLStore { t.Helper() - dir := cfg.dir - if dir == "" { - dir = t.TempDir() - } - dbURI, dbUser, dbPassword, dbName := DBConfigFromEnv() - if dbURI == "" { - dbURI = cfg.dbURI - } - if cfg.persistent && dbURI != "" { - t.Fatal("invalid store config, can't use both persistent and dbURI") + // default dir to tmp dir + if cfg.dir == "" { + cfg.dir = t.TempDir() } - if dbUser == "" { - dbUser = cfg.dbUser - } - if dbPassword == "" { - dbPassword = cfg.dbPassword - } - if dbName == "" { - if cfg.dbName != "" { - dbName = cfg.dbName - } else { - dbName = hex.EncodeToString(frand.Bytes(32)) // random name for db - } + + // default db names to random strings if not set + if cfg.dbName == "" { + cfg.dbName = randomDBName() } - dbMetricsName := cfg.dbMetricsName - if dbMetricsName == "" { - dbMetricsName = hex.EncodeToString(frand.Bytes(32)) // random name for metrics db + if cfg.dbMetricsName == "" { + cfg.dbMetricsName = randomDBName() } - var conn, connMetrics gorm.Dialector - if dbURI != "" { - if tmpDB, err := gorm.Open(NewMySQLConnection(dbUser, dbPassword, dbURI, "")); err != nil { - t.Fatal(err) - } else if err := tmpDB.Exec(fmt.Sprintf("CREATE DATABASE IF NOT EXISTS `%s`", dbName)).Error; err != nil { - t.Fatal(err) - } else if err := tmpDB.Exec(fmt.Sprintf("CREATE DATABASE IF NOT EXISTS `%s`", dbMetricsName)).Error; err != nil { - t.Fatal(err) - } - - conn = NewMySQLConnection(dbUser, dbPassword, dbURI, dbName) - connMetrics = NewMySQLConnection(dbUser, dbPassword, dbURI, dbMetricsName) - } else if cfg.persistent { - conn = NewSQLiteConnection(filepath.Join(dir, "db.sqlite")) - connMetrics = NewSQLiteConnection(filepath.Join(dir, "metrics.sqlite")) - } else { - conn = NewEphemeralSQLiteConnection(dbName) - connMetrics = NewEphemeralSQLiteConnection(dbMetricsName) + // create db connections + conn, dbMetrics, err := cfg.dbConnections() + if err != nil { + t.Fatal("failed to create db connections", err) } walletAddrs := types.Address(frand.Entropy256()) alerts := alerts.WithOrigin(alerts.NewManager(), "test") - sqlStore, ccid, err := NewSQLStore(Config{ + sqlStore, _, err := NewSQLStore(Config{ Conn: conn, - ConnMetrics: connMetrics, Alerts: alerts, - PartialSlabDir: dir, + DBMetrics: dbMetrics, + PartialSlabDir: cfg.dir, Migrate: !cfg.skipMigrate, AnnouncementMaxAge: time.Hour, PersistInterval: time.Second, WalletAddress: walletAddrs, SlabBufferCompletionThreshold: 0, Logger: zap.NewNop().Sugar(), + LongQueryDuration: 100 * time.Millisecond, + LongTxDuration: 100 * time.Millisecond, GormLogger: newTestLogger(), RetryTransactionIntervals: []time.Duration{50 * time.Millisecond, 100 * time.Millisecond, 200 * time.Millisecond}, }) if err != nil { t.Fatal("failed to create SQLStore", err) } + if !cfg.skipContractSet { err = sqlStore.SetContractSet(context.Background(), testContractSet, []types.FileContractID{}) if err != nil { @@ -145,13 +180,34 @@ func newTestSQLStore(t *testing.T, cfg testSQLStoreConfig) *testSQLStore { } } return &testSQLStore{ - SQLStore: sqlStore, - dbName: dbName, - dbMetricsName: dbMetricsName, - dir: dir, - ccid: ccid, - t: t, + cfg: cfg, + t: t, + SQLStore: sqlStore, + } +} + +func (s *testSQLStore) DB() *isql.DB { + switch db := s.bMain.(type) { + case *sqlite.MainDatabase: + return db.DB() + case *mysql.MainDatabase: + return db.DB() + default: + s.t.Fatal("unknown db type", db) + } + panic("unreachable") +} + +func (s *testSQLStore) DBMetrics() *isql.DB { + switch db := s.bMetrics.(type) { + case *sqlite.MetricsDatabase: + return db.DB() + case *mysql.MetricsDatabase: + return db.DB() + default: + s.t.Fatal("unknown db type", db) } + panic("unreachable") } func (s *testSQLStore) Close() error { @@ -175,10 +231,7 @@ func (s *testSQLStore) DefaultBucketID() uint { func (s *testSQLStore) Reopen() *testSQLStore { s.t.Helper() - cfg := defaultTestSQLStoreConfig - cfg.dir = s.dir - cfg.dbName = s.dbName - cfg.dbMetricsName = s.dbMetricsName + cfg := s.cfg cfg.skipContractSet = true cfg.skipMigrate = true return newTestSQLStore(s.t, cfg) @@ -470,8 +523,18 @@ func TestRetryTransaction(t *testing.T) { t.Fatal("unexpected logs", cmp.Diff(got, want)) } - // retry transaction that aborts, assert no logs were added - ss.retryTransaction(context.Background(), func(tx *gorm.DB) error { return context.Canceled }) + // retry transaction with cancelled context + ctx, cancel := context.WithCancel(context.Background()) + cancel() + ss.retryTransaction(ctx, func(tx *gorm.DB) error { return nil }) + if len(observedLogs.All()) != len(want) { + t.Fatal("expected no logs") + } + + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Microsecond) + defer cancel() + time.Sleep(time.Millisecond) + ss.retryTransaction(ctx, func(tx *gorm.DB) error { return nil }) if len(observedLogs.All()) != len(want) { t.Fatal("expected no logs") } diff --git a/stores/types.go b/stores/types.go index a2628d834..7020f49e6 100644 --- a/stores/types.go +++ b/stores/types.go @@ -6,7 +6,6 @@ import ( "encoding/json" "errors" "fmt" - "math/big" "strconv" "strings" "time" @@ -32,8 +31,6 @@ type ( publicKey types.PublicKey hostSettings rhpv2.HostSettings hostPriceTable rhpv3.HostPriceTable - balance big.Int - unsigned64 uint64 // used for storing large uint64 values in sqlite secretKey []byte setting string ) @@ -230,27 +227,6 @@ func (hs hostPriceTable) Value() (driver.Value, error) { return json.Marshal(hs) } -func (balance) GormDataType() string { - return "string" -} - -// Scan scan value into balance, implements sql.Scanner interface. -func (hs *balance) Scan(value interface{}) error { - var s string - switch value := value.(type) { - case string: - s = value - case []byte: - s = string(value) - default: - return fmt.Errorf("failed to unmarshal balance value: %v %t", value, value) - } - if _, success := (*big.Int)(hs).SetString(s, 10); !success { - return errors.New(fmt.Sprint("failed to scan balance value", value)) - } - return nil -} - // SQLiteTimestampFormats were taken from github.com/mattn/go-sqlite3 and are // used when parsing a string to a date var SQLiteTimestampFormats = []string{ @@ -265,17 +241,12 @@ var SQLiteTimestampFormats = []string{ "2006-01-02", } -// Value returns a balance value, implements driver.Valuer interface. -func (hs balance) Value() (driver.Value, error) { - return (*big.Int)(&hs).String(), nil -} - // GormDataType implements gorm.GormDataTypeInterface. func (datetime) GormDataType() string { return "string" } -// Scan scan value into balance, implements sql.Scanner interface. +// Scan scan value into datetime, implements sql.Scanner interface. func (dt *datetime) Scan(value interface{}) error { var s string switch value := value.(type) { @@ -318,7 +289,7 @@ func (unixTimeMS) GormDataType() string { return "BIGINT" } -// Scan scan value into balance, implements sql.Scanner interface. +// Scan scan value into unixTimeMS, implements sql.Scanner interface. func (u *unixTimeMS) Scan(value interface{}) error { var msec int64 var err error @@ -344,36 +315,6 @@ func (u unixTimeMS) Value() (driver.Value, error) { return time.Time(u).UnixMilli(), nil } -// GormDataType implements gorm.GormDataTypeInterface. -func (unsigned64) GormDataType() string { - return "BIGINT" -} - -// Scan scan value into balance, implements sql.Scanner interface. -func (u *unsigned64) Scan(value interface{}) error { - var n int64 - var err error - switch value := value.(type) { - case int64: - n = value - case []uint8: - n, err = strconv.ParseInt(string(value), 10, 64) - if err != nil { - return fmt.Errorf("failed to unmarshal unsigned64 value: %v %T", value, value) - } - default: - return fmt.Errorf("failed to unmarshal unsigned64 value: %v %T", value, value) - } - - *u = unsigned64(n) - return nil -} - -// Value returns a datetime value, implements driver.Valuer interface. -func (u unsigned64) Value() (driver.Value, error) { - return int64(u), nil -} - func (bCurrency) GormDataType() string { return "bytes" } diff --git a/stores/webhooks.go b/stores/webhooks.go index 4db325698..02516c419 100644 --- a/stores/webhooks.go +++ b/stores/webhooks.go @@ -3,62 +3,26 @@ package stores import ( "context" + sql "go.sia.tech/renterd/stores/sql" "go.sia.tech/renterd/webhooks" - "gorm.io/gorm" - "gorm.io/gorm/clause" ) -type ( - dbWebhook struct { - Model - - Module string `gorm:"uniqueIndex:idx_module_event_url;NOT NULL;size:255"` - Event string `gorm:"uniqueIndex:idx_module_event_url;NOT NULL;size:255"` - URL string `gorm:"uniqueIndex:idx_module_event_url;NOT NULL;size:255"` - } -) - -func (dbWebhook) TableName() string { - return "webhooks" -} - -func (s *SQLStore) DeleteWebhook(ctx context.Context, wb webhooks.Webhook) error { - return s.retryTransaction(ctx, func(tx *gorm.DB) error { - res := tx.Exec("DELETE FROM webhooks WHERE module = ? AND event = ? AND url = ?", - wb.Module, wb.Event, wb.URL) - if res.Error != nil { - return res.Error - } else if res.RowsAffected == 0 { - return gorm.ErrRecordNotFound - } - return nil +func (s *SQLStore) AddWebhook(ctx context.Context, wh webhooks.Webhook) error { + return s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + return tx.AddWebhook(ctx, wh) }) } -func (s *SQLStore) AddWebhook(ctx context.Context, wb webhooks.Webhook) error { - return s.retryTransaction(ctx, func(tx *gorm.DB) error { - return tx.Clauses(clause.OnConflict{ - DoNothing: true, - }).Create(&dbWebhook{ - Module: wb.Module, - Event: wb.Event, - URL: wb.URL, - }).Error +func (s *SQLStore) DeleteWebhook(ctx context.Context, wh webhooks.Webhook) error { + return s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + return tx.DeleteWebhook(ctx, wh) }) } -func (s *SQLStore) Webhooks(ctx context.Context) ([]webhooks.Webhook, error) { - var dbWebhooks []dbWebhook - if err := s.db.WithContext(ctx).Find(&dbWebhooks).Error; err != nil { - return nil, err - } - var whs []webhooks.Webhook - for _, wb := range dbWebhooks { - whs = append(whs, webhooks.Webhook{ - Module: wb.Module, - Event: wb.Event, - URL: wb.URL, - }) - } - return whs, nil +func (s *SQLStore) Webhooks(ctx context.Context) (whs []webhooks.Webhook, err error) { + err = s.bMain.Transaction(ctx, func(tx sql.DatabaseTx) error { + whs, err = tx.Webhooks(ctx) + return err + }) + return } diff --git a/stores/webhooks_test.go b/stores/webhooks_test.go index b306eef2c..f38dc0756 100644 --- a/stores/webhooks_test.go +++ b/stores/webhooks_test.go @@ -16,11 +16,17 @@ func TestWebhooks(t *testing.T) { Module: "foo", Event: "bar", URL: "http://example.com", + Headers: map[string]string{ + "foo1": "bar1", + }, } wh2 := webhooks.Webhook{ Module: "foo2", Event: "bar2", URL: "http://example2.com", + Headers: map[string]string{ + "foo2": "bar2", + }, } // Add hook. diff --git a/webhooks/webhooks.go b/webhooks/webhooks.go index 665f8a2c4..20bf94381 100644 --- a/webhooks/webhooks.go +++ b/webhooks/webhooks.go @@ -3,6 +3,7 @@ package webhooks import ( "bytes" "context" + "encoding/base64" "encoding/json" "errors" "fmt" @@ -29,6 +30,14 @@ type ( } ) +type HeaderOption func(headers map[string]string) + +func WithBasicAuth(username, password string) HeaderOption { + return func(headers map[string]string) { + headers["Authorization"] = "Basic " + base64.StdEncoding.EncodeToString([]byte(username+":"+password)) + } +} + type NoopBroadcaster struct{} func (NoopBroadcaster) BroadcastAction(_ context.Context, _ Event) error { return nil } @@ -40,9 +49,10 @@ const ( type ( Webhook struct { - Module string `json:"module"` - Event string `json:"event"` - URL string `json:"url"` + Module string `json:"module"` + Event string `json:"event"` + URL string `json:"url"` + Headers map[string]string `json:"headers,omitempty"` } WebhookQueueInfo struct { @@ -72,9 +82,10 @@ type Manager struct { } type eventQueue struct { - ctx context.Context - logger *zap.SugaredLogger - url string + ctx context.Context + logger *zap.SugaredLogger + headers map[string]string + url string mu sync.Mutex isDequeueing bool @@ -93,9 +104,10 @@ func (m *Manager) BroadcastAction(_ context.Context, event Event) error { queue, exists := m.queues[hook.URL] if !exists { queue = &eventQueue{ - ctx: m.shutdownCtx, - logger: m.logger, - url: hook.URL, + ctx: m.shutdownCtx, + logger: m.logger, + headers: hook.Headers, + url: hook.URL, } m.queues[hook.URL] = queue } @@ -162,7 +174,7 @@ func (m *Manager) Register(ctx context.Context, wh Webhook) error { defer cancel() // Test URL. - err := sendEvent(ctx, wh.URL, Event{ + err := sendEvent(ctx, wh.URL, wh.Headers, Event{ Event: WebhookEventPing, }) if err != nil { @@ -195,7 +207,7 @@ func (q *eventQueue) dequeue() { q.events = q.events[1:] q.mu.Unlock() - err := sendEvent(q.ctx, q.url, next) + err := sendEvent(q.ctx, q.url, q.headers, next) if err != nil { q.logger.Errorf("failed to send Webhook event %v to %v: %v", next.String(), q.url, err) } @@ -235,7 +247,7 @@ func NewManager(logger *zap.SugaredLogger, store WebhookStore) (*Manager, error) return m, nil } -func sendEvent(ctx context.Context, url string, action Event) error { +func sendEvent(ctx context.Context, url string, headers map[string]string, action Event) error { body, err := json.Marshal(action) if err != nil { return err @@ -245,6 +257,9 @@ func sendEvent(ctx context.Context, url string, action Event) error { if err != nil { return err } + for k, v := range headers { + req.Header.Set(k, v) + } defer io.ReadAll(req.Body) // always drain body resp, err := http.DefaultClient.Do(req) diff --git a/worker/client/client.go b/worker/client/client.go index 71fd200ad..c1ab8a70e 100644 --- a/worker/client/client.go +++ b/worker/client/client.go @@ -16,6 +16,7 @@ import ( "go.sia.tech/jape" "go.sia.tech/renterd/api" "go.sia.tech/renterd/object" + "go.sia.tech/renterd/webhooks" ) // A Client provides methods for interacting with a worker. @@ -268,6 +269,12 @@ func (c *Client) UploadStats() (resp api.UploadStatsResponse, err error) { return } +// RegisterEvent register an event. +func (c *Client) RegisterEvent(ctx context.Context, e webhooks.Event) (err error) { + err = c.c.WithContext(ctx).POST("/events", e, nil) + return +} + func (c *Client) object(ctx context.Context, bucket, path string, opts api.DownloadObjectOptions) (_ io.ReadCloser, _ http.Header, err error) { values := url.Values{} values.Set("bucket", url.QueryEscape(bucket)) diff --git a/worker/client/rhp.go b/worker/client/rhp.go index ec7e10b3d..d1fb2d9e8 100644 --- a/worker/client/rhp.go +++ b/worker/client/rhp.go @@ -79,13 +79,14 @@ func (c *Client) RHPPruneContract(ctx context.Context, contractID types.FileCont } // RHPRenew renews an existing contract with a host. -func (c *Client) RHPRenew(ctx context.Context, contractID types.FileContractID, endHeight uint64, hostKey types.PublicKey, siamuxAddr string, hostAddress, renterAddress types.Address, renterFunds, minNewCollateral types.Currency, expectedStorage, windowSize uint64) (resp api.RHPRenewResponse, err error) { +func (c *Client) RHPRenew(ctx context.Context, contractID types.FileContractID, endHeight uint64, hostKey types.PublicKey, siamuxAddr string, hostAddress, renterAddress types.Address, renterFunds, minNewCollateral, maxFundAmount types.Currency, expectedStorage, windowSize uint64) (resp api.RHPRenewResponse, err error) { req := api.RHPRenewRequest{ ContractID: contractID, EndHeight: endHeight, ExpectedNewStorage: expectedStorage, HostAddress: hostAddress, HostKey: hostKey, + MaxFundAmount: maxFundAmount, MinNewCollateral: minNewCollateral, RenterAddress: renterAddress, RenterFunds: renterFunds, diff --git a/worker/host.go b/worker/host.go index f092534a0..c4c84be21 100644 --- a/worker/host.go +++ b/worker/host.go @@ -15,6 +15,10 @@ import ( "go.uber.org/zap" ) +var ( + errFailedToCreatePayment = errors.New("failed to create payment") +) + type ( Host interface { PublicKey() types.PublicKey @@ -28,7 +32,7 @@ type ( FundAccount(ctx context.Context, balance types.Currency, rev *types.FileContractRevision) error SyncAccount(ctx context.Context, rev *types.FileContractRevision) error - RenewContract(ctx context.Context, rrr api.RHPRenewRequest) (_ rhpv2.ContractRevision, _ []types.Transaction, _ types.Currency, err error) + RenewContract(ctx context.Context, rrr api.RHPRenewRequest) (_ rhpv2.ContractRevision, _ []types.Transaction, _, _ types.Currency, err error) } HostManager interface { @@ -106,11 +110,14 @@ func (h *host) DownloadSector(ctx context.Context, w io.Writer, root types.Hash2 return err } - var refund types.Currency payment := rhpv3.PayByEphemeralAccount(h.acc.id, cost, pt.HostBlockHeight+defaultWithdrawalExpiryBlocks, h.accountKey) - cost, refund, err = RPCReadSector(ctx, t, w, hpt, &payment, offset, length, root) + cost, refund, err := RPCReadSector(ctx, t, w, hpt, &payment, offset, length, root) + if err != nil { + return err + } + amount = cost.Sub(refund) - return err + return nil }) return }) @@ -136,7 +143,7 @@ func (h *host) UploadSector(ctx context.Context, sectorRoot types.Hash256, secto } payment, ok := rhpv3.PayByContract(&rev, expectedCost, h.acc.id, h.renterKey) if !ok { - return errors.New("failed to create payment") + return errFailedToCreatePayment } var cost types.Currency @@ -153,7 +160,7 @@ func (h *host) UploadSector(ctx context.Context, sectorRoot types.Hash256, secto return nil } -func (h *host) RenewContract(ctx context.Context, rrr api.RHPRenewRequest) (_ rhpv2.ContractRevision, _ []types.Transaction, _ types.Currency, err error) { +func (h *host) RenewContract(ctx context.Context, rrr api.RHPRenewRequest) (_ rhpv2.ContractRevision, _ []types.Transaction, _, _ types.Currency, err error) { // Try to get a valid pricetable. ptCtx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() @@ -169,21 +176,22 @@ func (h *host) RenewContract(ctx context.Context, rrr api.RHPRenewRequest) (_ rh var rev rhpv2.ContractRevision var txnSet []types.Transaction var renewErr error + var fundAmount types.Currency err = h.transportPool.withTransportV3(ctx, h.hk, h.siamuxAddr, func(ctx context.Context, t *transportV3) (err error) { // NOTE: to avoid an edge case where the contract is drained and can // therefore not be used to pay for the revision, we simply don't pay // for it. _, err = RPCLatestRevision(ctx, t, h.fcid, func(revision *types.FileContractRevision) (rhpv3.HostPriceTable, rhpv3.PaymentMethod, error) { // Renew contract. - rev, txnSet, contractPrice, renewErr = RPCRenew(ctx, rrr, h.bus, t, pt, *revision, h.renterKey, h.logger) + rev, txnSet, contractPrice, fundAmount, renewErr = RPCRenew(ctx, rrr, h.bus, t, pt, *revision, h.renterKey, h.logger) return rhpv3.HostPriceTable{}, nil, nil }) return err }) if err != nil { - return rhpv2.ContractRevision{}, nil, contractPrice, err + return rhpv2.ContractRevision{}, nil, contractPrice, fundAmount, err } - return rev, txnSet, contractPrice, renewErr + return rev, txnSet, contractPrice, fundAmount, renewErr } func (h *host) FetchPriceTable(ctx context.Context, rev *types.FileContractRevision) (hpt api.HostPriceTable, err error) { @@ -191,14 +199,6 @@ func (h *host) FetchPriceTable(ctx context.Context, rev *types.FileContractRevis fetchPT := func(paymentFn PriceTablePaymentFunc) (hpt api.HostPriceTable, err error) { err = h.transportPool.withTransportV3(ctx, h.hk, h.siamuxAddr, func(ctx context.Context, t *transportV3) (err error) { hpt, err = RPCPriceTable(ctx, t, paymentFn) - h.bus.RecordPriceTables(ctx, []api.HostPriceTableUpdate{ - { - HostKey: h.hk, - Success: isSuccessfulInteraction(err), - Timestamp: time.Now(), - PriceTable: hpt, - }, - }) return }) return diff --git a/worker/host_test.go b/worker/host_test.go index cb88f1748..e329e4b90 100644 --- a/worker/host_test.go +++ b/worker/host_test.go @@ -118,8 +118,8 @@ func (h *testHost) FundAccount(ctx context.Context, balance types.Currency, rev return nil } -func (h *testHost) RenewContract(ctx context.Context, rrr api.RHPRenewRequest) (_ rhpv2.ContractRevision, _ []types.Transaction, _ types.Currency, err error) { - return rhpv2.ContractRevision{}, nil, types.ZeroCurrency, nil +func (h *testHost) RenewContract(ctx context.Context, rrr api.RHPRenewRequest) (_ rhpv2.ContractRevision, _ []types.Transaction, _, _ types.Currency, err error) { + return rhpv2.ContractRevision{}, nil, types.Currency{}, types.Currency{}, nil } func (h *testHost) SyncAccount(ctx context.Context, rev *types.FileContractRevision) error { diff --git a/worker/mocks_test.go b/worker/mocks_test.go index 897d96cdb..192f4c169 100644 --- a/worker/mocks_test.go +++ b/worker/mocks_test.go @@ -85,6 +85,7 @@ type busMock struct { *syncerMock *walletMock *webhookBroadcasterMock + *webhookStoreMock } func newBusMock(cs *contractStoreMock, hs *hostStoreMock, os *objectStoreMock) *busMock { @@ -673,7 +674,7 @@ func (*walletMock) WalletPrepareForm(context.Context, types.Address, types.Publi return nil, nil } -func (*walletMock) WalletPrepareRenew(context.Context, types.FileContractRevision, types.Address, types.Address, types.PrivateKey, types.Currency, types.Currency, rhpv3.HostPriceTable, uint64, uint64, uint64) (api.WalletPrepareRenewResponse, error) { +func (*walletMock) WalletPrepareRenew(context.Context, types.FileContractRevision, types.Address, types.Address, types.PrivateKey, types.Currency, types.Currency, types.Currency, rhpv3.HostPriceTable, uint64, uint64, uint64) (api.WalletPrepareRenewResponse, error) { return api.WalletPrepareRenewResponse{}, nil } @@ -688,3 +689,11 @@ type webhookBroadcasterMock struct{} func (*webhookBroadcasterMock) BroadcastAction(context.Context, webhooks.Event) error { return nil } + +var _ WebhookStore = (*webhookStoreMock)(nil) + +type webhookStoreMock struct{} + +func (*webhookStoreMock) RegisterWebhook(ctx context.Context, webhook webhooks.Webhook) error { + return nil +} diff --git a/worker/net.go b/worker/net.go index f751be9c9..8401062ab 100644 --- a/worker/net.go +++ b/worker/net.go @@ -2,40 +2,9 @@ package worker import ( "context" - "fmt" "net" ) -var privateSubnets []*net.IPNet - -func init() { - for _, subnet := range []string{ - "10.0.0.0/8", - "172.16.0.0/12", - "192.168.0.0/16", - "100.64.0.0/10", - } { - _, subnet, err := net.ParseCIDR(subnet) - if err != nil { - panic(fmt.Sprintf("failed to parse subnet: %v", err)) - } - privateSubnets = append(privateSubnets, subnet) - } -} - -func isPrivateIP(addr net.IP) bool { - if addr.IsLoopback() || addr.IsLinkLocalUnicast() || addr.IsLinkLocalMulticast() { - return true - } - - for _, block := range privateSubnets { - if block.Contains(addr) { - return true - } - } - return false -} - func dial(ctx context.Context, hostIP string) (net.Conn, error) { conn, err := (&net.Dialer{}).DialContext(ctx, "tcp", hostIP) return conn, err diff --git a/worker/pricetables.go b/worker/pricetables.go index c4c693e0a..592037146 100644 --- a/worker/pricetables.go +++ b/worker/pricetables.go @@ -172,9 +172,22 @@ func (p *priceTable) fetch(ctx context.Context, rev *types.FileContractRevision) // otherwise fetch it h := p.hm.Host(p.hk, types.FileContractID{}, host.Settings.SiamuxAddr()) hpt, err = h.FetchPriceTable(ctx, rev) + + // record it in the background + go func(hpt api.HostPriceTable, success bool) { + p.hs.RecordPriceTables(context.Background(), []api.HostPriceTableUpdate{ + { + HostKey: p.hk, + Success: success, + Timestamp: time.Now(), + PriceTable: hpt, + }, + }) + }(hpt, isSuccessfulInteraction(err)) + + // handle error after recording if err != nil { return api.HostPriceTable{}, fmt.Errorf("failed to update pricetable, err %v", err) } - return } diff --git a/worker/rhpv2.go b/worker/rhpv2.go index 1a6bd3cfd..c0e01b1b4 100644 --- a/worker/rhpv2.go +++ b/worker/rhpv2.go @@ -14,6 +14,7 @@ import ( rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/utils" "go.sia.tech/siad/build" "go.sia.tech/siad/crypto" "lukechampine.com/frand" @@ -22,6 +23,11 @@ import ( const ( // minMessageSize is the minimum size of an RPC message minMessageSize = 4096 + + // maxMerkleProofResponseSize caps the response message size to a generous + // value of 100 MB worth of roots. This is approximately double the size of + // what we have observed on the live network for 5TB+ contracts to be safe. + maxMerkleProofResponseSize = 100 * 1 << 20 // 100 MB ) var ( @@ -85,9 +91,12 @@ func (hes HostErrorSet) Error() string { return "\n" + strings.Join(strs, "\n") } -func wrapErr(err *error, fnName string) { +func wrapErr(ctx context.Context, fnName string, err *error) { if *err != nil { *err = fmt.Errorf("%s: %w", fnName, *err) + if cause := context.Cause(ctx); cause != nil && !utils.IsErr(*err, cause) { + *err = fmt.Errorf("%w; %w", cause, *err) + } } } @@ -133,7 +142,7 @@ func updateRevisionOutputs(rev *types.FileContractRevision, cost, collateral typ // RPCSettings calls the Settings RPC, returning the host's reported settings. func RPCSettings(ctx context.Context, t *rhpv2.Transport) (settings rhpv2.HostSettings, err error) { - defer wrapErr(&err, "Settings") + defer wrapErr(ctx, "Settings", &err) var resp rhpv2.RPCSettingsResponse if err := t.Call(rhpv2.RPCSettingsID, nil, &resp); err != nil { @@ -147,7 +156,7 @@ func RPCSettings(ctx context.Context, t *rhpv2.Transport) (settings rhpv2.HostSe // RPCFormContract forms a contract with a host. func RPCFormContract(ctx context.Context, t *rhpv2.Transport, renterKey types.PrivateKey, txnSet []types.Transaction) (_ rhpv2.ContractRevision, _ []types.Transaction, err error) { - defer wrapErr(&err, "FormContract") + defer wrapErr(ctx, "FormContract", &err) // strip our signatures before sending parents, txn := txnSet[:len(txnSet)-1], txnSet[len(txnSet)-1] @@ -424,17 +433,15 @@ func (w *worker) deleteContractRoots(t *rhpv2.Transport, rev *rhpv2.ContractRevi } cost, _ = rpcCost.Total() - // calculate the response size - proofSize := rhpv2.DiffProofSize(actions, numSectors) - responseSize := (proofSize + 1) * crypto.HashSize - - // TODO: remove once the host network is updated - if build.VersionCmp(settings.Version, "1.6.0") < 0 { - if responseSize < minMessageSize { - responseSize = minMessageSize - } - cost = settings.BaseRPCPrice.Add(settings.DownloadBandwidthPrice.Mul64(responseSize)) - cost = cost.Mul64(2) // generous leeway + // NOTE: we currently overpay hosts by quite a large margin (~10x) + // to ensure we cover both 1.5.9 and pre v0.2.1 hosts. + // + // TODO: remove once host network is updated, or once we include the + // host release in the scoring and stop using old hosts + proofSize := (128 + uint64(len(actions))) * crypto.HashSize + compatCost := settings.BaseRPCPrice.Add(settings.DownloadBandwidthPrice.Mul64(proofSize)) + if cost.Cmp(compatCost) < 0 { + cost = compatCost } if rev.RenterFunds().Cmp(cost) < 0 { @@ -470,7 +477,7 @@ func (w *worker) deleteContractRoots(t *rhpv2.Transport, rev *rhpv2.ContractRevi var merkleResp rhpv2.RPCWriteMerkleProof if err := t.WriteRequest(rhpv2.RPCWriteID, wReq); err != nil { return err - } else if err := t.ReadResponse(&merkleResp, minMessageSize+responseSize); err != nil { + } else if err := t.ReadResponse(&merkleResp, maxMerkleProofResponseSize); err != nil { err := fmt.Errorf("couldn't read Merkle proof response, err: %v", err) logger.Infow(fmt.Sprintf("processing batch %d/%d failed, err %v", i+1, len(batches), err)) return err @@ -556,12 +563,11 @@ func (w *worker) fetchContractRoots(t *rhpv2.Transport, rev *rhpv2.ContractRevis // calculate the cost cost, _ := settings.RPCSectorRootsCost(offset, n).Total() - // calculate the response size - proofSize := rhpv2.RangeProofSize(numsectors, offset, offset+n) - responseSize := (proofSize + n) * crypto.HashSize - // TODO: remove once host network is updated if build.VersionCmp(settings.Version, "1.6.0") < 0 { + // calculate the response size + proofSize := rhpv2.RangeProofSize(numsectors, offset, offset+n) + responseSize := (proofSize + n) * crypto.HashSize if responseSize < minMessageSize { responseSize = minMessageSize } @@ -602,7 +608,7 @@ func (w *worker) fetchContractRoots(t *rhpv2.Transport, rev *rhpv2.ContractRevis var rootsResp rhpv2.RPCSectorRootsResponse if err := t.WriteRequest(rhpv2.RPCSectorRootsID, req); err != nil { return nil, err - } else if err := t.ReadResponse(&rootsResp, minMessageSize+responseSize); err != nil { + } else if err := t.ReadResponse(&rootsResp, maxMerkleProofResponseSize); err != nil { return nil, fmt.Errorf("couldn't read sector roots response: %w", err) } @@ -654,6 +660,12 @@ func (w *worker) withTransportV2(ctx context.Context, hostKey types.PublicKey, h return err } defer t.Close() + + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("panic (withTransportV2): %v", r) + } + }() return fn(t) } diff --git a/worker/rhpv3.go b/worker/rhpv3.go index dc483c340..4d7518d2e 100644 --- a/worker/rhpv3.go +++ b/worker/rhpv3.go @@ -52,6 +52,9 @@ var ( // errTransport is used to wrap rpc errors caused by the transport. errTransport = errors.New("transport error") + // errDialTransport is returned when the worker could not dial the host. + errDialTransport = errors.New("could not dial transport") + // errBalanceInsufficient occurs when a withdrawal failed because the // account balance was insufficient. errBalanceInsufficient = errors.New("ephemeral account balance was insufficient") @@ -175,7 +178,7 @@ func (t *transportV3) DialStream(ctx context.Context) (*streamV3, error) { newTransport, err := dialTransport(ctx, t.siamuxAddr, t.hostKey) if err != nil { t.mu.Unlock() - return nil, fmt.Errorf("DialStream: could not dial transport: %w (%v)", err, time.Since(start)) + return nil, fmt.Errorf("DialStream: %w: %w (%v)", errDialTransport, err, time.Since(start)) } t.t = newTransport } @@ -258,7 +261,14 @@ func (p *transportPoolV3) withTransportV3(ctx context.Context, hostKey types.Pub p.mu.Unlock() // Execute function. - err = fn(ctx, t) + err = func() (err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("panic (withTransportV3): %v", r) + } + }() + return fn(ctx, t) + }() // Decrement refcounter again and clean up pool. p.mu.Lock() @@ -623,7 +633,7 @@ type PriceTablePaymentFunc func(pt rhpv3.HostPriceTable) (rhpv3.PaymentMethod, e // RPCPriceTable calls the UpdatePriceTable RPC. func RPCPriceTable(ctx context.Context, t *transportV3, paymentFunc PriceTablePaymentFunc) (_ api.HostPriceTable, err error) { - defer wrapErr(&err, "PriceTable") + defer wrapErr(ctx, "PriceTable", &err) s, err := t.DialStream(ctx) if err != nil { @@ -660,7 +670,7 @@ func RPCPriceTable(ctx context.Context, t *transportV3, paymentFunc PriceTablePa // RPCAccountBalance calls the AccountBalance RPC. func RPCAccountBalance(ctx context.Context, t *transportV3, payment rhpv3.PaymentMethod, account rhpv3.Account, settingsID rhpv3.SettingsID) (bal types.Currency, err error) { - defer wrapErr(&err, "AccountBalance") + defer wrapErr(ctx, "AccountBalance", &err) s, err := t.DialStream(ctx) if err != nil { return types.ZeroCurrency, err @@ -685,7 +695,7 @@ func RPCAccountBalance(ctx context.Context, t *transportV3, payment rhpv3.Paymen // RPCFundAccount calls the FundAccount RPC. func RPCFundAccount(ctx context.Context, t *transportV3, payment rhpv3.PaymentMethod, account rhpv3.Account, settingsID rhpv3.SettingsID) (err error) { - defer wrapErr(&err, "FundAccount") + defer wrapErr(ctx, "FundAccount", &err) s, err := t.DialStream(ctx) if err != nil { return err @@ -712,7 +722,7 @@ func RPCFundAccount(ctx context.Context, t *transportV3, payment rhpv3.PaymentMe // fetching a pricetable using the fetched revision to pay for it. If // paymentFunc returns 'nil' as payment, the host is not paid. func RPCLatestRevision(ctx context.Context, t *transportV3, contractID types.FileContractID, paymentFunc func(rev *types.FileContractRevision) (rhpv3.HostPriceTable, rhpv3.PaymentMethod, error)) (_ types.FileContractRevision, err error) { - defer wrapErr(&err, "LatestRevision") + defer wrapErr(ctx, "LatestRevision", &err) s, err := t.DialStream(ctx) if err != nil { return types.FileContractRevision{}, err @@ -738,7 +748,7 @@ func RPCLatestRevision(ctx context.Context, t *transportV3, contractID types.Fil // RPCReadSector calls the ExecuteProgram RPC with a ReadSector instruction. func RPCReadSector(ctx context.Context, t *transportV3, w io.Writer, pt rhpv3.HostPriceTable, payment rhpv3.PaymentMethod, offset, length uint32, merkleRoot types.Hash256) (cost, refund types.Currency, err error) { - defer wrapErr(&err, "ReadSector") + defer wrapErr(ctx, "ReadSector", &err) s, err := t.DialStream(ctx) if err != nil { return types.ZeroCurrency, types.ZeroCurrency, err @@ -803,7 +813,7 @@ func RPCReadSector(ctx context.Context, t *transportV3, w io.Writer, pt rhpv3.Ho } func RPCAppendSector(ctx context.Context, t *transportV3, renterKey types.PrivateKey, pt rhpv3.HostPriceTable, rev *types.FileContractRevision, payment rhpv3.PaymentMethod, sectorRoot types.Hash256, sector *[rhpv2.SectorSize]byte) (cost types.Currency, err error) { - defer wrapErr(&err, "AppendSector") + defer wrapErr(ctx, "AppendSector", &err) // sanity check revision first if rev.RevisionNumber == math.MaxUint64 { @@ -940,11 +950,12 @@ func RPCAppendSector(ctx context.Context, t *transportV3, renterKey types.Privat return } -func RPCRenew(ctx context.Context, rrr api.RHPRenewRequest, bus Bus, t *transportV3, pt *rhpv3.HostPriceTable, rev types.FileContractRevision, renterKey types.PrivateKey, l *zap.SugaredLogger) (_ rhpv2.ContractRevision, _ []types.Transaction, _ types.Currency, err error) { - defer wrapErr(&err, "RPCRenew") +func RPCRenew(ctx context.Context, rrr api.RHPRenewRequest, bus Bus, t *transportV3, pt *rhpv3.HostPriceTable, rev types.FileContractRevision, renterKey types.PrivateKey, l *zap.SugaredLogger) (_ rhpv2.ContractRevision, _ []types.Transaction, _, _ types.Currency, err error) { + defer wrapErr(ctx, "RPCRenew", &err) + s, err := t.DialStream(ctx) if err != nil { - return rhpv2.ContractRevision{}, nil, types.ZeroCurrency, fmt.Errorf("failed to dial stream: %w", err) + return rhpv2.ContractRevision{}, nil, types.Currency{}, types.Currency{}, fmt.Errorf("failed to dial stream: %w", err) } defer s.Close() @@ -954,7 +965,7 @@ func RPCRenew(ctx context.Context, rrr api.RHPRenewRequest, bus Bus, t *transpor ptUID = pt.UID } if err = s.WriteRequest(rhpv3.RPCRenewContractID, &ptUID); err != nil { - return rhpv2.ContractRevision{}, nil, types.Currency{}, fmt.Errorf("failed to send ptUID: %w", err) + return rhpv2.ContractRevision{}, nil, types.Currency{}, types.Currency{}, fmt.Errorf("failed to send ptUID: %w", err) } // If we didn't have a valid pricetable, read the temporary one from the @@ -962,28 +973,28 @@ func RPCRenew(ctx context.Context, rrr api.RHPRenewRequest, bus Bus, t *transpor if ptUID == (rhpv3.SettingsID{}) { var ptResp rhpv3.RPCUpdatePriceTableResponse if err = s.ReadResponse(&ptResp, defaultRPCResponseMaxSize); err != nil { - return rhpv2.ContractRevision{}, nil, types.Currency{}, fmt.Errorf("failed to read RPCUpdatePriceTableResponse: %w", err) + return rhpv2.ContractRevision{}, nil, types.Currency{}, types.Currency{}, fmt.Errorf("failed to read RPCUpdatePriceTableResponse: %w", err) } pt = new(rhpv3.HostPriceTable) if err = json.Unmarshal(ptResp.PriceTableJSON, pt); err != nil { - return rhpv2.ContractRevision{}, nil, types.Currency{}, fmt.Errorf("failed to unmarshal price table: %w", err) + return rhpv2.ContractRevision{}, nil, types.Currency{}, types.Currency{}, fmt.Errorf("failed to unmarshal price table: %w", err) } } // Perform gouging checks. gc, err := GougingCheckerFromContext(ctx, false) if err != nil { - return rhpv2.ContractRevision{}, nil, types.Currency{}, fmt.Errorf("failed to get gouging checker: %w", err) + return rhpv2.ContractRevision{}, nil, types.Currency{}, types.Currency{}, fmt.Errorf("failed to get gouging checker: %w", err) } if breakdown := gc.Check(nil, pt); breakdown.Gouging() { - return rhpv2.ContractRevision{}, nil, types.Currency{}, fmt.Errorf("host gouging during renew: %v", breakdown) + return rhpv2.ContractRevision{}, nil, types.Currency{}, types.Currency{}, fmt.Errorf("host gouging during renew: %v", breakdown) } // Prepare the signed transaction that contains the final revision as well // as the new contract - wprr, err := bus.WalletPrepareRenew(ctx, rev, rrr.HostAddress, rrr.RenterAddress, renterKey, rrr.RenterFunds, rrr.MinNewCollateral, *pt, rrr.EndHeight, rrr.WindowSize, rrr.ExpectedNewStorage) + wprr, err := bus.WalletPrepareRenew(ctx, rev, rrr.HostAddress, rrr.RenterAddress, renterKey, rrr.RenterFunds, rrr.MinNewCollateral, rrr.MaxFundAmount, *pt, rrr.EndHeight, rrr.WindowSize, rrr.ExpectedNewStorage) if err != nil { - return rhpv2.ContractRevision{}, nil, types.Currency{}, fmt.Errorf("failed to prepare renew: %w", err) + return rhpv2.ContractRevision{}, nil, types.Currency{}, types.Currency{}, fmt.Errorf("failed to prepare renew: %w", err) } // Starting from here, we need to make sure to release the txn on error. @@ -1006,13 +1017,13 @@ func RPCRenew(ctx context.Context, rrr api.RHPRenewRequest, bus Bus, t *transpor FinalRevisionSignature: finalRevisionSignature, } if err = s.WriteResponse(&req); err != nil { - return rhpv2.ContractRevision{}, nil, types.Currency{}, fmt.Errorf("failed to send RPCRenewContractRequest: %w", err) + return rhpv2.ContractRevision{}, nil, types.Currency{}, types.Currency{}, fmt.Errorf("failed to send RPCRenewContractRequest: %w", err) } // Incorporate the host's additions. var hostAdditions rhpv3.RPCRenewContractHostAdditions if err = s.ReadResponse(&hostAdditions, defaultRPCResponseMaxSize); err != nil { - return rhpv2.ContractRevision{}, nil, types.Currency{}, fmt.Errorf("failed to read RPCRenewContractHostAdditions: %w", err) + return rhpv2.ContractRevision{}, nil, types.Currency{}, types.Currency{}, fmt.Errorf("failed to read RPCRenewContractHostAdditions: %w", err) } parents = append(parents, hostAdditions.Parents...) txn.SiacoinInputs = append(txn.SiacoinInputs, hostAdditions.SiacoinInputs...) @@ -1044,7 +1055,7 @@ func RPCRenew(ctx context.Context, rrr api.RHPRenewRequest, bus Bus, t *transpor Signatures: []uint64{0, 1}, } if err := bus.WalletSign(ctx, &txn, wprr.ToSign, cf); err != nil { - return rhpv2.ContractRevision{}, nil, types.Currency{}, fmt.Errorf("failed to sign transaction: %w", err) + return rhpv2.ContractRevision{}, nil, types.Currency{}, types.Currency{}, fmt.Errorf("failed to sign transaction: %w", err) } // Create a new no-op revision and sign it. @@ -1068,13 +1079,13 @@ func RPCRenew(ctx context.Context, rrr api.RHPRenewRequest, bus Bus, t *transpor RevisionSignature: renterNoOpRevisionSignature, } if err = s.WriteResponse(&rs); err != nil { - return rhpv2.ContractRevision{}, nil, types.Currency{}, fmt.Errorf("failed to send RPCRenewSignatures: %w", err) + return rhpv2.ContractRevision{}, nil, types.Currency{}, types.Currency{}, fmt.Errorf("failed to send RPCRenewSignatures: %w", err) } // Receive the host's signatures. var hostSigs rhpv3.RPCRenewSignatures if err = s.ReadResponse(&hostSigs, defaultRPCResponseMaxSize); err != nil { - return rhpv2.ContractRevision{}, nil, types.Currency{}, fmt.Errorf("failed to read RPCRenewSignatures: %w", err) + return rhpv2.ContractRevision{}, nil, types.Currency{}, types.Currency{}, fmt.Errorf("failed to read RPCRenewSignatures: %w", err) } txn.Signatures = append(txn.Signatures, hostSigs.TransactionSignatures...) @@ -1085,7 +1096,7 @@ func RPCRenew(ctx context.Context, rrr api.RHPRenewRequest, bus Bus, t *transpor return rhpv2.ContractRevision{ Revision: noOpRevision, Signatures: [2]types.TransactionSignature{renterNoOpRevisionSignature, hostSigs.RevisionSignature}, - }, txnSet, pt.ContractPrice, nil + }, txnSet, pt.ContractPrice, wprr.FundAmount, nil } // initialRevision returns the first revision of a file contract formation diff --git a/worker/s3/authentication.go b/worker/s3/authentication.go index 67017356b..58ebad677 100644 --- a/worker/s3/authentication.go +++ b/worker/s3/authentication.go @@ -24,6 +24,7 @@ type ( } permissions struct { + Authenticated bool ListBuckets bool ListBucket bool CreateBucket bool @@ -52,6 +53,7 @@ var ( // rootPerms are used for requests that were successfully authenticated // using v4 signatures. rootPerms = permissions{ + Authenticated: true, ListBuckets: true, ListBucket: true, CreateBucket: true, @@ -123,33 +125,56 @@ func (b *authenticatedBackend) reloadV4Keys(ctx context.Context) error { return nil } -func (b *authenticatedBackend) AuthenticationMiddleware(handler http.Handler) http.Handler { +func (b *authenticatedBackend) AuthenticationMiddleware(h http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, rq *http.Request) { + // start with no permissions perms := noAccessPerms - if rq.Header.Get("Authorization") == "" { - // No auth header, we continue without permissions. Request might - // still succeed due to bucket policy. - } else if err := b.reloadV4Keys(rq.Context()); err != nil { - writeResponse(w, signature.APIError{ - Code: string(gofakes3.ErrInternal), - Description: fmt.Sprintf("failed to reload v4 keys: %v", err), - HTTPStatusCode: http.StatusInternalServerError, - }) - return - } else if result := signature.V4SignVerify(rq); result != signature.ErrNone { - // Authentication attempted but failed. - writeResponse(w, signature.GetAPIError(result)) - return - } else { - // Authenticated request, treat as root user. + + if rq.Header.Get("Authorization") != "" { + // auth header found, refresh keys + if err := b.reloadV4Keys(rq.Context()); err != nil { + writeResponse(w, signature.APIError{ + Code: string(gofakes3.ErrInternal), + Description: fmt.Sprintf("failed to reload v4 keys: %v", err), + HTTPStatusCode: http.StatusInternalServerError, + }) + return + } + // verify signature + if _, result := signature.V4SignVerify(rq); result != signature.ErrNone { + // authentication attempted but failed. + writeResponse(w, signature.GetAPIError(result)) + return + } + // authenticated request successfully perms = rootPerms } - // Add permissions to context. - ctx := context.WithValue(rq.Context(), permissionKey, &perms) - handler.ServeHTTP(w, rq.WithContext(ctx)) + + // add permissions to context + h.ServeHTTP(w, rq.WithContext(context.WithValue(rq.Context(), permissionKey, &perms))) }) } +func (b *authenticatedBackend) IsAuthenticated(w http.ResponseWriter, rq *http.Request, bucket string) bool { + // fetch permissions from context + perms := rq.Context().Value(permissionKey).(*permissions) + + // if no authentication has happened so far and no bucket is specified, deny + // access + if !perms.Authenticated && bucket == "" { + writeResponse(w, signature.APIError{ + Code: string(gofakes3.ErrAccessDenied), + Description: "no authentication provided", + HTTPStatusCode: http.StatusForbidden, + }) + return false + } + + // apply bucket-specific policies + b.applyBucketPolicy(rq.Context(), bucket, perms) + return true +} + func (b *authenticatedBackend) ListBuckets(ctx context.Context) ([]gofakes3.BucketInfo, error) { if !b.permsFromCtx(ctx, "").ListBuckets { return nil, gofakes3.ErrAccessDenied diff --git a/worker/s3/s3.go b/worker/s3/s3.go index ee93edcf2..cc6021652 100644 --- a/worker/s3/s3.go +++ b/worker/s3/s3.go @@ -21,6 +21,7 @@ type gofakes3Logger struct { type Opts struct { AuthDisabled bool HostBucketEnabled bool + HostBucketBases []string } type Bus interface { @@ -82,6 +83,7 @@ func New(b Bus, w Worker, logger *zap.SugaredLogger, opts Opts) (http.Handler, e faker, err := gofakes3.New( backend, gofakes3.WithHostBucket(opts.HostBucketEnabled), + gofakes3.WithHostBucketBase(opts.HostBucketBases...), gofakes3.WithLogger(&gofakes3Logger{ l: namedLogger, }), diff --git a/worker/upload.go b/worker/upload.go index 4a97099bb..4b7595bc0 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -30,10 +30,11 @@ const ( ) var ( - errContractExpired = errors.New("contract expired") - errNoCandidateUploader = errors.New("no candidate uploader found") - errNotEnoughContracts = errors.New("not enough contracts to support requested redundancy") - errUploadInterrupted = errors.New("upload was interrupted") + errContractExpired = errors.New("contract expired") + errNoCandidateUploader = errors.New("no candidate uploader found") + errNotEnoughContracts = errors.New("not enough contracts to support requested redundancy") + errUploadInterrupted = errors.New("upload was interrupted") + errSectorUploadFinished = errors.New("sector upload already finished") ) type ( @@ -117,7 +118,7 @@ type ( root types.Hash256 ctx context.Context - cancel context.CancelFunc + cancel context.CancelCauseFunc mu sync.Mutex uploaded object.Sector @@ -415,7 +416,7 @@ func (mgr *uploadManager) Upload(ctx context.Context, r io.Reader, contracts []a // defer a function that finishes the upload defer func() { ctx, cancel := context.WithTimeout(mgr.shutdownCtx, time.Minute) - if err := mgr.os.FinishUpload(ctx, upload.id); err != nil { + if err := mgr.os.FinishUpload(ctx, upload.id); err != nil && !errors.Is(err, context.Canceled) { mgr.logger.Errorf("failed to mark upload %v as finished: %v", upload.id, err) } cancel() @@ -750,7 +751,7 @@ func (u *upload) newSlabUpload(ctx context.Context, shards [][]byte, uploaders [ wg.Add(1) go func(idx int) { // create the ctx - sCtx, sCancel := context.WithCancel(ctx) + sCtx, sCancel := context.WithCancelCause(ctx) // create the sector // NOTE: we are computing the sector root here and pass it all the @@ -1087,7 +1088,7 @@ func (s *sectorUpload) finish(sector object.Sector) { s.mu.Lock() defer s.mu.Unlock() - s.cancel() + s.cancel(errSectorUploadFinished) s.uploaded = sector s.data = nil } diff --git a/worker/upload_test.go b/worker/upload_test.go index b9cc05ba2..320c71736 100644 --- a/worker/upload_test.go +++ b/worker/upload_test.go @@ -498,20 +498,11 @@ func TestRefreshUploaders(t *testing.T) { ul := w.uploadManager cs := w.cs hm := w.hm + bh := uint64(1) - // create test data - data := frand.Bytes(128) - - // create upload params - params := testParameters(t.Name()) - opts := testOpts() - - // upload data + // refresh uploaders contracts := w.Contracts() - _, err := w.upload(context.Background(), params.bucket, t.Name(), bytes.NewReader(data), contracts, opts...) - if err != nil { - t.Fatal(err) - } + ul.refreshUploaders(contracts, bh) // assert we have the expected number of uploaders if len(ul.uploaders) != len(contracts) { @@ -530,12 +521,9 @@ func TestRefreshUploaders(t *testing.T) { // add a new host/contract hNew := w.AddHost() - // upload data + // refresh uploaders contracts = w.Contracts() - _, _, err = ul.Upload(context.Background(), bytes.NewReader(data), contracts, params, lockingPriorityUpload) - if err != nil { - t.Fatal(err) - } + ul.refreshUploaders(contracts, bh) // assert we added and renewed exactly one uploader var added, renewed int @@ -570,9 +558,10 @@ func TestRefreshUploaders(t *testing.T) { } } - // upload data again but now with a blockheight that should expire most uploaders - params.bh = c1.WindowEnd - ul.Upload(context.Background(), bytes.NewReader(data), contracts, params, lockingPriorityUpload) + // refresh uploaders, use blockheight that expires most uploaders + bh = c1.WindowEnd + contracts = w.Contracts() + ul.refreshUploaders(contracts, bh) // assert we only have one uploader left if len(ul.uploaders) != 1 { diff --git a/worker/uploader.go b/worker/uploader.go index 80bd2393b..4a9d9aa9a 100644 --- a/worker/uploader.go +++ b/worker/uploader.go @@ -21,7 +21,9 @@ const ( ) var ( - errUploaderStopped = errors.New("uploader was stopped") + errAcquireContractFailed = errors.New("failed to acquire contract lock") + errFetchRevisionFailed = errors.New("failed to fetch revision") + errUploaderStopped = errors.New("uploader was stopped") ) type ( @@ -116,9 +118,9 @@ outer: } // execute it - elapsed, err := u.execute(req) - - // the uploader's contract got renewed, requeue the request + start := time.Now() + duration, err := u.execute(req) + elapsed := time.Since(start) if errors.Is(err, errMaxRevisionReached) { if u.tryRefresh(req.sector.ctx) { u.enqueue(req) @@ -126,6 +128,18 @@ outer: } } + // track stats + success, failure, uploadEstimateMS, uploadSpeedBytesPerMS := handleSectorUpload(err, duration, elapsed, req.overdrive) + u.trackSectorUploadStats(uploadEstimateMS, uploadSpeedBytesPerMS) + u.trackConsecutiveFailures(success, failure) + + // debug log + if uploadEstimateMS > 0 && !success { + u.logger.Debugw("sector upload failure was penalised", "uploadError", err, "uploadDuration", duration, "totalDuration", elapsed, "overdrive", req.overdrive, "penalty", uploadEstimateMS, "hk", u.hk) + } else if uploadEstimateMS == 0 && err != nil && !utils.IsErr(err, errSectorUploadFinished) { + u.logger.Debugw("sector upload failure was ignored", "uploadError", err, "uploadDuration", duration, "totalDuration", elapsed, "overdrive", req.overdrive, "hk", u.hk) + } + // send the response select { case <-req.sector.ctx.Done(): @@ -134,16 +148,50 @@ outer: err: err, }: } + } + } +} - // track the error, ignore gracefully closed streams and canceled overdrives - canceledOverdrive := req.done() && req.overdrive && err != nil - if !canceledOverdrive && !isClosedStream(err) { - u.trackSectorUpload(err, elapsed) - } else { - u.logger.Debugw("not tracking sector upload metric", zap.Error(err)) - } +func handleSectorUpload(uploadErr error, uploadDuration, totalDuration time.Duration, overdrive bool) (success bool, failure bool, uploadEstimateMS float64, uploadSpeedBytesPerMS float64) { + // no-op cases + if utils.IsErr(uploadErr, errMaxRevisionReached) { + return false, false, 0, 0 + } else if utils.IsErr(uploadErr, context.Canceled) { + return false, false, 0, 0 + } + + // happy case, upload was successful + if uploadErr == nil { + ms := uploadDuration.Milliseconds() + if ms == 0 { + ms = 1 // avoid division by zero } + return true, false, float64(ms), float64(rhpv2.SectorSize / ms) + } + + // upload failed because we weren't able to create a payment, in this case + // we want to punish the host but only to ensure we stop using it, meaning + // we don't increment consecutive failures + if utils.IsErr(uploadErr, errFailedToCreatePayment) { + return false, false, float64(time.Hour.Milliseconds()), 0 } + + // upload failed because the sector was already uploaded by another host, in + // this case we want to punish the host for being too slow but only when we + // weren't overdriving or when it took too long to dial + if utils.IsErr(uploadErr, errSectorUploadFinished) { + slowDial := utils.IsErr(uploadErr, errDialTransport) && totalDuration > time.Second + slowLock := utils.IsErr(uploadErr, errAcquireContractFailed) && totalDuration > time.Second + slowFetchRev := utils.IsErr(uploadErr, errFetchRevisionFailed) && totalDuration > time.Second + if !overdrive || slowDial || slowLock || slowFetchRev { + failure = overdrive + uploadEstimateMS = float64(totalDuration.Milliseconds() * 10) + } + return false, failure, uploadEstimateMS, 0 + } + + // in all other cases we want to punish the host for failing the upload + return false, true, float64(time.Hour.Milliseconds()), 0 } func (u *uploader) Stop(err error) { @@ -198,17 +246,30 @@ func (u *uploader) estimate() float64 { return numSectors * estimateP90 } -func (u *uploader) execute(req *sectorUploadReq) (time.Duration, error) { +// execute executes the sector upload request, if the upload was successful it +// returns the time it took to upload the sector to the host +func (u *uploader) execute(req *sectorUploadReq) (_ time.Duration, err error) { // grab fields u.mu.Lock() host := u.host fcid := u.fcid u.mu.Unlock() + // wrap cause + defer func() { + if cause := context.Cause(req.sector.ctx); cause != nil && !utils.IsErr(err, cause) { + if err != nil { + err = fmt.Errorf("%w; %w", cause, err) + } else { + err = cause + } + } + }() + // acquire contract lock lockID, err := u.cl.AcquireContract(req.sector.ctx, fcid, req.contractLockPriority, req.contractLockDuration) if err != nil { - return 0, err + return 0, fmt.Errorf("%w; %w", errAcquireContractFailed, err) } // defer the release @@ -226,26 +287,24 @@ func (u *uploader) execute(req *sectorUploadReq) (time.Duration, error) { // fetch the revision rev, err := host.FetchRevision(ctx, defaultRevisionFetchTimeout) if err != nil { - return 0, err + return 0, fmt.Errorf("%w; %w", errFetchRevisionFailed, err) } else if rev.RevisionNumber == math.MaxUint64 { return 0, errMaxRevisionReached } // update the bus if err := u.os.AddUploadingSector(ctx, req.uploadID, fcid, req.sector.root); err != nil { - return 0, fmt.Errorf("failed to add uploading sector to contract %v, err: %v", fcid, err) + return 0, fmt.Errorf("failed to add uploading sector to contract %v; %w", fcid, err) } // upload the sector start := time.Now() err = host.UploadSector(ctx, req.sector.root, req.sector.sectorData(), rev) if err != nil { - return 0, fmt.Errorf("failed to upload sector to contract %v, err: %v", fcid, err) + return 0, fmt.Errorf("failed to upload sector to contract %v; %w", fcid, err) } - // calculate elapsed time - elapsed := time.Since(start) - return elapsed, nil + return time.Since(start), nil } func (u *uploader) pop() *sectorUploadReq { @@ -268,21 +327,26 @@ func (u *uploader) signalWork() { } } -func (u *uploader) trackSectorUpload(err error, d time.Duration) { +func (u *uploader) trackConsecutiveFailures(success, failure bool) { u.mu.Lock() defer u.mu.Unlock() - if err != nil { - u.consecutiveFailures++ - u.statsSectorUploadEstimateInMS.Track(float64(time.Hour.Milliseconds())) - } else { - ms := d.Milliseconds() - if ms == 0 { - ms = 1 // avoid division by zero - } + if success { u.consecutiveFailures = 0 - u.statsSectorUploadEstimateInMS.Track(float64(ms)) // duration in ms - u.statsSectorUploadSpeedBytesPerMS.Track(float64(rhpv2.SectorSize / ms)) // bytes per ms + } else if failure { + u.consecutiveFailures++ + } +} + +func (u *uploader) trackSectorUploadStats(uploadEstimateMS, uploadSpeedBytesPerMS float64) { + u.mu.Lock() + defer u.mu.Unlock() + + if uploadEstimateMS > 0 { + u.statsSectorUploadEstimateInMS.Track(uploadEstimateMS) + } + if uploadSpeedBytesPerMS > 0 { + u.statsSectorUploadSpeedBytesPerMS.Track(uploadSpeedBytesPerMS) } } diff --git a/worker/uploader_test.go b/worker/uploader_test.go index b203827a5..46df5f584 100644 --- a/worker/uploader_test.go +++ b/worker/uploader_test.go @@ -3,8 +3,11 @@ package worker import ( "context" "errors" + "fmt" "testing" "time" + + rhpv2 "go.sia.tech/core/rhp/v2" ) func TestUploaderStopped(t *testing.T) { @@ -32,3 +35,66 @@ func TestUploaderStopped(t *testing.T) { t.Fatal("no response") } } + +func TestHandleSectorUpload(t *testing.T) { + ms := time.Millisecond + ss := float64(rhpv2.SectorSize) + overdrive := true + regular := false + + errHostError := errors.New("some host error") + errSectorUploadFinishedAndDial := fmt.Errorf("%w;%w", errDialTransport, errSectorUploadFinished) + + cases := []struct { + // input + uploadErr error + uploadDur time.Duration + totalDur time.Duration + overdrive bool + + // expected output + success bool + failure bool + uploadEstimateMS float64 + uploadSpeedBytesPerMS float64 + }{ + // happy case + {nil, ms, ms, regular, true, false, 1, ss}, + {nil, ms, ms, overdrive, true, false, 1, ss}, + + // renewed contract case + {errMaxRevisionReached, 0, ms, regular, false, false, 0, 0}, + {errMaxRevisionReached, 0, ms, overdrive, false, false, 0, 0}, + + // context canceled case + {context.Canceled, 0, ms, regular, false, false, 0, 0}, + {context.Canceled, 0, ms, overdrive, false, false, 0, 0}, + + // sector already uploaded case + {errSectorUploadFinished, ms, ms, regular, false, false, 10, 0}, + {errSectorUploadFinished, ms, ms, overdrive, false, false, 0, 0}, + {errSectorUploadFinishedAndDial, ms, ms, overdrive, false, false, 0, 0}, + {errSectorUploadFinishedAndDial, ms, 1001 * ms, overdrive, false, true, 10010, 0}, + + // payment failure case + {errFailedToCreatePayment, 0, ms, regular, false, false, 3600000, 0}, + {errFailedToCreatePayment, 0, ms, overdrive, false, false, 3600000, 0}, + + // host failure + {errHostError, ms, ms, regular, false, true, 3600000, 0}, + {errHostError, ms, ms, overdrive, false, true, 3600000, 0}, + } + + for i, c := range cases { + success, failure, uploadEstimateMS, uploadSpeedBytesPerMS := handleSectorUpload(c.uploadErr, c.uploadDur, c.totalDur, c.overdrive) + if success != c.success { + t.Fatalf("case %d failed: expected success %v, got %v", i+1, c.success, success) + } else if failure != c.failure { + t.Fatalf("case %d failed: expected failure %v, got %v", i+1, c.failure, failure) + } else if uploadEstimateMS != c.uploadEstimateMS { + t.Fatalf("case %d failed: expected uploadEstimateMS %v, got %v", i+1, c.uploadEstimateMS, uploadEstimateMS) + } else if uploadSpeedBytesPerMS != c.uploadSpeedBytesPerMS { + t.Fatalf("case %d failed: expected uploadSpeedBytesPerMS %v, got %v", i+1, c.uploadSpeedBytesPerMS, uploadSpeedBytesPerMS) + } + } +} diff --git a/worker/worker.go b/worker/worker.go index 60dfb475d..03ce9c874 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -8,7 +8,6 @@ import ( "io" "math" "math/big" - "net" "net/http" "os" "runtime" @@ -26,6 +25,7 @@ import ( "go.sia.tech/renterd/api" "go.sia.tech/renterd/build" "go.sia.tech/renterd/internal/utils" + iworker "go.sia.tech/renterd/internal/worker" "go.sia.tech/renterd/object" "go.sia.tech/renterd/webhooks" "go.sia.tech/renterd/worker/client" @@ -35,8 +35,8 @@ import ( ) const ( - batchSizeDeleteSectors = uint64(500000) // ~16MiB of roots - batchSizeFetchSectors = uint64(130000) // ~4MiB of roots + batchSizeDeleteSectors = uint64(1000) // 4GiB of contract data + batchSizeFetchSectors = uint64(25600) // 100GiB of contract data defaultLockTimeout = time.Minute defaultRevisionFetchTimeout = 30 * time.Second @@ -79,6 +79,7 @@ type ( HostStore ObjectStore SettingStore + WebhookStore Syncer Wallet @@ -151,10 +152,14 @@ type ( WalletDiscard(ctx context.Context, txn types.Transaction) error WalletFund(ctx context.Context, txn *types.Transaction, amount types.Currency, useUnconfirmedTxns bool) ([]types.Hash256, []types.Transaction, error) WalletPrepareForm(ctx context.Context, renterAddress types.Address, renterKey types.PublicKey, renterFunds, hostCollateral types.Currency, hostKey types.PublicKey, hostSettings rhpv2.HostSettings, endHeight uint64) (txns []types.Transaction, err error) - WalletPrepareRenew(ctx context.Context, revision types.FileContractRevision, hostAddress, renterAddress types.Address, renterKey types.PrivateKey, renterFunds, minNewCollateral types.Currency, pt rhpv3.HostPriceTable, endHeight, windowSize, expectedStorage uint64) (api.WalletPrepareRenewResponse, error) + WalletPrepareRenew(ctx context.Context, revision types.FileContractRevision, hostAddress, renterAddress types.Address, renterKey types.PrivateKey, renterFunds, minNewCollateral, maxFundAmount types.Currency, pt rhpv3.HostPriceTable, endHeight, windowSize, expectedStorage uint64) (api.WalletPrepareRenewResponse, error) WalletSign(ctx context.Context, txn *types.Transaction, toSign []types.Hash256, cf types.CoveredFields) error } + WebhookStore interface { + RegisterWebhook(ctx context.Context, webhook webhooks.Webhook) error + } + ConsensusState interface { ConsensusState(ctx context.Context) (api.ConsensusState, error) } @@ -208,6 +213,7 @@ type worker struct { uploadManager *uploadManager accounts *accounts + cache iworker.WorkerCache priceTables *priceTables transportPoolV3 *transportPoolV3 @@ -338,26 +344,18 @@ func (w *worker) fetchContracts(ctx context.Context, metadatas []api.ContractMet } func (w *worker) rhpPriceTableHandler(jc jape.Context) { - ctx := jc.Request.Context() - // decode the request var rptr api.RHPPriceTableRequest if jc.Decode(&rptr) != nil { return } - // apply timeout - if rptr.Timeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, time.Duration(rptr.Timeout)) - defer cancel() - } - - // defer interaction recording + // defer interaction recording before applying timeout to make sure we still + // record the failed update if it timed out var err error var hpt api.HostPriceTable defer func() { - w.bus.RecordPriceTables(ctx, []api.HostPriceTableUpdate{ + w.bus.RecordPriceTables(jc.Request.Context(), []api.HostPriceTableUpdate{ { HostKey: rptr.HostKey, Success: isSuccessfulInteraction(err), @@ -367,6 +365,14 @@ func (w *worker) rhpPriceTableHandler(jc jape.Context) { }) }() + // apply timeout + ctx := jc.Request.Context() + if rptr.Timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, time.Duration(rptr.Timeout)) + defer cancel() + } + err = w.transportPoolV3.withTransportV3(ctx, rptr.HostKey, rptr.SiamuxAddr, func(ctx context.Context, t *transportV3) error { hpt, err = RPCPriceTable(ctx, t, func(pt rhpv3.HostPriceTable) (rhpv3.PaymentMethod, error) { return nil, nil }) return err @@ -641,10 +647,10 @@ func (w *worker) rhpRenewHandler(jc jape.Context) { // renew the contract var renewed rhpv2.ContractRevision var txnSet []types.Transaction - var contractPrice types.Currency + var contractPrice, fundAmount types.Currency if jc.Check("couldn't renew contract", w.withRevision(ctx, defaultRevisionFetchTimeout, rrr.ContractID, rrr.HostKey, rrr.SiamuxAddr, lockingPriorityRenew, func(_ types.FileContractRevision) (err error) { h := w.Host(rrr.HostKey, rrr.ContractID, rrr.SiamuxAddr) - renewed, txnSet, contractPrice, err = h.RenewContract(ctx, rrr) + renewed, txnSet, contractPrice, fundAmount, err = h.RenewContract(ctx, rrr) return err })) != nil { return @@ -661,6 +667,7 @@ func (w *worker) rhpRenewHandler(jc jape.Context) { ContractID: renewed.ID(), Contract: renewed, ContractPrice: contractPrice, + FundAmount: fundAmount, TransactionSet: txnSet, }) } @@ -1223,6 +1230,25 @@ func (w *worker) idHandlerGET(jc jape.Context) { jc.Encode(w.id) } +func (w *worker) eventsHandler(jc jape.Context) { + var event webhooks.Event + if jc.Decode(&event) != nil { + return + } else if event.Event == webhooks.WebhookEventPing { + jc.ResponseWriter.WriteHeader(http.StatusOK) + return + } + + err := w.cache.HandleEvent(event) + if errors.Is(err, api.ErrUnknownEvent) { + jc.ResponseWriter.WriteHeader(http.StatusAccepted) + return + } else if err != nil { + jc.Error(err, http.StatusBadRequest) + return + } +} + func (w *worker) memoryGET(jc jape.Context) { jc.Encode(api.MemoryResponse{ Download: w.downloadManager.mm.Status(), @@ -1275,19 +1301,21 @@ func New(masterKey [32]byte, id string, b Bus, contractLockingDuration, busFlush } l = l.Named("worker").Named(id) - ctx, cancel := context.WithCancel(context.Background()) + cache := iworker.NewCache(b, l) + shutdownCtx, shutdownCancel := context.WithCancel(context.Background()) w := &worker{ alerts: alerts.WithOrigin(b, fmt.Sprintf("worker.%s", id)), allowPrivateIPs: allowPrivateIPs, contractLockingDuration: contractLockingDuration, + cache: cache, id: id, bus: b, masterKey: masterKey, logger: l.Sugar(), startTime: time.Now(), uploadingPackedSlabs: make(map[string]struct{}), - shutdownCtx: ctx, - shutdownCtxCancel: cancel, + shutdownCtx: shutdownCtx, + shutdownCtxCancel: shutdownCancel, } w.initAccounts(b) @@ -1307,6 +1335,8 @@ func (w *worker) Handler() http.Handler { "GET /account/:hostkey": w.accountHandlerGET, "GET /id": w.idHandlerGET, + "POST /events": w.eventsHandler, + "GET /memory": w.memoryGET, "GET /rhp/contracts": w.rhpContractsHandlerGET, @@ -1335,6 +1365,12 @@ func (w *worker) Handler() http.Handler { }) } +// Setup initializes the worker cache. +func (w *worker) Setup(ctx context.Context, apiURL, apiPassword string) error { + webhookOpts := []webhooks.HeaderOption{webhooks.WithBasicAuth("", apiPassword)} + return w.cache.Initialize(ctx, apiURL, webhookOpts...) +} + // Shutdown shuts down the worker. func (w *worker) Shutdown(ctx context.Context) error { // cancel shutdown context @@ -1351,42 +1387,22 @@ func (w *worker) Shutdown(ctx context.Context) error { func (w *worker) scanHost(ctx context.Context, timeout time.Duration, hostKey types.PublicKey, hostIP string) (rhpv2.HostSettings, rhpv3.HostPriceTable, time.Duration, error) { logger := w.logger.With("host", hostKey).With("hostIP", hostIP).With("timeout", timeout) - // prepare a helper for scanning - scan := func() (rhpv2.HostSettings, rhpv3.HostPriceTable, time.Duration, error) { - // helper to prepare a context for scanning - withTimeoutCtx := func() (context.Context, context.CancelFunc) { - if timeout > 0 { - return context.WithTimeout(ctx, timeout) - } - return ctx, func() {} - } - // resolve the address - { - scanCtx, cancel := withTimeoutCtx() - defer cancel() - // resolve hostIP. We don't want to scan hosts on private networks. - if !w.allowPrivateIPs { - host, _, err := net.SplitHostPort(hostIP) - if err != nil { - return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, err - } - addrs, err := (&net.Resolver{}).LookupIPAddr(scanCtx, host) - if err != nil { - return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, err - } - for _, addr := range addrs { - if isPrivateIP(addr.IP) { - return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, api.ErrHostOnPrivateNetwork - } - } - } + + // prepare a helper to create a context for scanning + timeoutCtx := func() (context.Context, context.CancelFunc) { + if timeout > 0 { + return context.WithTimeout(ctx, timeout) } + return ctx, func() {} + } + // prepare a helper for scanning + scan := func() (rhpv2.HostSettings, rhpv3.HostPriceTable, time.Duration, error) { // fetch the host settings start := time.Now() var settings rhpv2.HostSettings { - scanCtx, cancel := withTimeoutCtx() + scanCtx, cancel := timeoutCtx() defer cancel() err := w.withTransportV2(scanCtx, hostKey, hostIP, func(t *rhpv2.Transport) error { var err error @@ -1406,7 +1422,7 @@ func (w *worker) scanHost(ctx context.Context, timeout time.Duration, hostKey ty // fetch the host pricetable var pt rhpv3.HostPriceTable { - scanCtx, cancel := withTimeoutCtx() + scanCtx, cancel := timeoutCtx() defer cancel() err := w.transportPoolV3.withTransportV3(scanCtx, hostKey, settings.SiamuxAddr(), func(ctx context.Context, t *transportV3) error { if hpt, err := RPCPriceTable(ctx, t, func(pt rhpv3.HostPriceTable) (rhpv3.PaymentMethod, error) { return nil, nil }); err != nil { @@ -1423,6 +1439,16 @@ func (w *worker) scanHost(ctx context.Context, timeout time.Duration, hostKey ty return settings, pt, time.Since(start), nil } + // resolve host ip, don't scan if the host is on a private network or if it + // resolves to more than two addresses of the same type, if it fails for + // another reason the host scan won't have subnets + subnets, private, err := utils.ResolveHostIP(ctx, hostIP) + if errors.Is(err, api.ErrHostTooManyAddresses) { + return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, err + } else if private && !w.allowPrivateIPs { + return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, api.ErrHostOnPrivateNetwork + } + // scan: first try settings, pt, duration, err := scan() if err != nil { @@ -1453,18 +1479,17 @@ func (w *worker) scanHost(ctx context.Context, timeout time.Duration, hostKey ty default: } - // record host scan - make sure this isn't interrupted by the same context - // used to time out the scan itself because otherwise we won't be able to - // record scans that timed out. - recordCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - scanErr := w.bus.RecordHostScans(recordCtx, []api.HostScan{ + // record host scan - make sure this is interrupted by the request ctx and + // not the context with the timeout used to time out the scan itself. + // Otherwise scans that time out won't be recorded. + scanErr := w.bus.RecordHostScans(ctx, []api.HostScan{ { HostKey: hostKey, + PriceTable: pt, + Subnets: subnets, Success: isSuccessfulInteraction(err), - Timestamp: time.Now(), Settings: settings, - PriceTable: pt, + Timestamp: time.Now(), }, }) if scanErr != nil { @@ -1480,7 +1505,7 @@ func discardTxnOnErr(ctx context.Context, bus Bus, l *zap.SugaredLogger, txn typ ctx, cancel := context.WithTimeout(ctx, 10*time.Second) if dErr := bus.WalletDiscard(ctx, txn); dErr != nil { - l.Errorf("%w: %v, failed to discard txn: %v", *err, errContext, dErr) + l.Errorf("%v: %s, failed to discard txn: %v", *err, errContext, dErr) } cancel() } @@ -1554,13 +1579,13 @@ func (w *worker) GetObject(ctx context.Context, bucket, path string, opts api.Do opts.Range.Length = hor.Range.Length // fetch gouging params - gp, err := w.bus.GougingParams(ctx) + gp, err := w.cache.GougingParams(ctx) if err != nil { return nil, fmt.Errorf("couldn't fetch gouging parameters from bus: %w", err) } // fetch all contracts - contracts, err := w.bus.Contracts(ctx, api.ContractsOpts{}) + contracts, err := w.cache.DownloadContracts(ctx) if err != nil { return nil, fmt.Errorf("couldn't fetch contracts from bus: %w", err) }