diff --git a/.github/release-drafter.yml b/.github/release-drafter.yml index 399a9bb..d75d78d 100644 --- a/.github/release-drafter.yml +++ b/.github/release-drafter.yml @@ -45,11 +45,6 @@ autolabeler: - label: "documentation" files: - "**/*.md" - - label: "enhancement" - files: - - "internal/**/*" - label: "maintenance" files: - ".github/**/*" - - "vendor/**/*" - - "testdata/**/*" diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml new file mode 100644 index 0000000..e5d2892 --- /dev/null +++ b/.github/workflows/ci.yaml @@ -0,0 +1,14 @@ +name: ci + +on: + pull_request: + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 + with: + go-version: '1.22.5' + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - run: go test ./... diff --git a/.github/workflows/workflow.yml b/.github/workflows/workflow.yml deleted file mode 100644 index 25a2097..0000000 --- a/.github/workflows/workflow.yml +++ /dev/null @@ -1,63 +0,0 @@ -# This is a basic workflow to help you get started with Actions - -name: build - -# Controls when the action will run. Triggers the workflow on push or pull request -# events but only for the master branch -on: - push: - branches: - - main - - go_attempt - pull_request: - branches: - - main - - go_attempt -env: - GO_VERSION_TO_USE: '1.22.5' # The Go version to download (if necessary) and use. -# A workflow run is made up of one or more jobs that can run sequentially or in parallel -jobs: - # The "build" workflow - build: - # The type of runner that the job will run on - runs-on: ubuntu-latest - - # Steps represent a sequence of tasks that will be executed as part of the job - steps: - # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - - uses: actions/checkout@v4 - - # Setup Go - - name: Setup Go - uses: actions/setup-go@v5 - with: - go-version: ${{ env.GO_VERSION_TO_USE }} - - # Install all the dependencies - - name: Install dependencies - run: | - go version - - # Run vet & lint on the code - - name: Run vet - run: | - go vet . - # Run build of the application - - name: Run build - run: go build . - # Run testing on the code - test: - # This job will run after the build job completes - needs: build - runs-on: ubuntu-latest - steps: - - name: Install Go - uses: actions/setup-go@v5 - with: - go-version: ${{ env.GO_VERSION_TO_USE }} - - name: Checkout code - uses: actions/checkout@v4 - - name: Test - run: go test ./... - - name: Run testing - run: go test -v diff --git a/.gitignore b/.gitignore index 484c573..6f6f5e6 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,22 @@ -.DS_Store -.vscode/ -main -tmp.yml \ No newline at end of file +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work +go.work.sum diff --git a/Dockerfile b/Dockerfile index d0c27ad..cb430f0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,22 +1,10 @@ -# Start from the latest golang base image FROM golang:1.22.5-alpine -# Set the Current Working Directory inside the container -WORKDIR /app -# Copy go mod and sum files + +WORKDIR /usr/src/app + COPY go.mod go.sum ./ -# Download all dependencies. Dependencies will be cached if the go.mod and go.sum files are not changed -RUN go mod download -RUN apk add -U curl jq -# Install doctl -RUN export DOCTL_VERSION="$(curl https://github.com/digitalocean/doctl/releases/latest -s -L -I -o /dev/null -w '%{url_effective}' | awk '{n=split($1,A,"/v"); print A[n]}')" && \ - curl -sL https://github.com/digitalocean/doctl/releases/download/v$DOCTL_VERSION/doctl-$DOCTL_VERSION-linux-amd64.tar.gz | tar -xz -C /usr/local/bin && \ - chmod +x /usr/local/bin/doctl +RUN go mod download && go mod verify -# Copy the source from the current directory to the Working Directory inside the container COPY . . -# Build the Go app -RUN go build -o app_action main.go -# Command to run the executable -RUN chmod +x app_action -# Run the app -ENTRYPOINT [ "/app/app_action" ] \ No newline at end of file +RUN go build -o /usr/local/bin/deploy ./deploy && \ + go build -o /usr/local/bin/delete ./delete diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..b4d310f --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 DigitalOcean, LLC. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/README.md b/README.md index 47bbba8..f79953d 100644 --- a/README.md +++ b/README.md @@ -1,98 +1,255 @@ # Deploy a [DigitalOcean App Platform](https://www.digitalocean.com/products/app-platform/) app using GitHub Actions. - - Auto-deploy your app from source on commit, while allowing you to run tests or perform other operations before. - - Auto-deploy your app from source and also update DockerHub / DigitalOcean Container Registry (DOCR) configuration in DigitalOcean [App Spec](https://docs.digitalocean.com/products/app-platform/reference/app-spec/) and deploy application with updated container image. - - -# Usage -### Deploy via GH Action and let DigitalOcean App Platform build and deploy your app. -- Get DigitalOcean Personal Access token by following this [instructions](https://docs.digitalocean.com/reference/api/create-personal-access-token/).**(skip this step if you already have DigitalOcean Personal Access Token)** -- Declare DigitalOcean Personal Access Token as DIGITALOCEAN_ACCESS_TOKEN variable in the [secrets](https://docs.github.com/en/actions/reference/encrypted-secrets#creating-encrypted-secrets-for-a-repository) of github repository. -- [Create a GitHub Action workflow file](https://docs.github.com/en/actions/learn-github-actions/introduction-to-github-actions#create-an-example-workflow) and add this step below to it or add this to your existing action. - ```yaml - - name: DigitalOcean App Platform deployment - uses: digitalocean/app_action@v1.1.5 - with: - app_name: my_DO_app - token: ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }} - ``` -- This step will trigger a deploy to your App on DigitalOcean App Platform - -### Deploy an one or more app components from a DigitalOcean Container Registry (DOCR) or DockerHub - -- Get DigitalOcean Personal Access token by following this [instructions](https://docs.digitalocean.com/reference/api/create-personal-access-token/)**(skip this step if you already have DigitalOcean Personal Access Token)** -- Declare DigitalOcean Personal Access Token as DIGITALOCEAN_ACCESS_TOKEN variable in the [secrets](https://docs.github.com/en/actions/reference/encrypted-secrets#creating-encrypted-secrets-for-a-repository) of github repository. -- Add this step to update DigitalOcean Container Registry configuration of single or multiple [component]((https://www.digitalocean.com/blog/build-component-based-apps-with-digitalocean-app-platform/)) in app_spec - ```yaml - - name: DigitalOcean App Platform deployment - uses: digitalocean/app_action@v1.1.5 # replace this with current version from https://github.com/digitalocean/app_action/releases - with: - app_name: my_DO_app - token: ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }} - images: '[ - { - "name": "sample-golang", - "image":{ - "registry_type": "DOCR", - "repository": "add_sample", - "tag": "a5cae3e" - }, - }, - { - "name": "sample-add", - "image":{ - "registry_type": "DOCKER_HUB", - "registry": "nginxdemos", - "repository": "hello", - "tag": "0.2" - }, - ]' - ``` -- DigitalOcean App Platform will now update your container image information in App Spec and then deploy your application. -- This step will trigger a DigitalOcean App Platform deployment of your app using the images specified. - -**Note: Always use unique tag names (i.e. `v1.1.15` instead of `latest` or `main`) to push image to the DigitalOcean Container Registry. This will allow you to deploy your application without delay. [ref](https://docs.digitalocean.com/products/container-registry/quickstart/)** - -# Inputs -- `app_name` - Name of the app on App Platform. -- `images` - (optional)List of json objects (of type ImageSourceSpec, see [Reference for App Specification](https://docs.digitalocean.com/products/app-platform/reference/app-spec/)) for providing information about name, registry type, repository, and tag of the image in . - ```json - [{ - "name": " ", #component name - "image":{ - "registry_type": "DOCKER_HUB", #Registry type, DOCR and DOCKER_HUB are supported - "registry": "nginxdemos", # DockerHub only, the registry name - "repository": "hello", # repository name - "tag": "0.2" # tag name - }, - }] - ``` - - `name` - name of the component in [App Spec](https://docs.digitalocean.com/products/app-platform/reference/app-spec/) - - `repository` - name of the DOCR repository with the following format- registry.digitalocean.com// - - `tag` - tag of the image provided while pushing to DOCR (by default latest tag is used). - **We suggest always use unique tag value)** -- `token` - doctl authentication token (generate token by following this [instructions](https://docs.digitalocean.com/reference/api/create-personal-access-token/) - -## Example: -Update DigitalOcean container image configuration of single component in App Spec [example](https://github.com/digitalocean/sample-golang-docr-github-action) - -DigitalOcean App Platform Auto-deploy with same app spec. [example](https://github.com/digitalocean/sample-golang-github-action) +Deploy an app from source (including the configuration) on commit, while allowing you to run tests or perform other operations as part of your CI/CD pipeline. + +- Supports picking up an in-repository (or filesystem really) `app.yaml` (defaults to `.do/app.yaml`, configurable via the `app_spec_location` input) to create the app from instead of having to rely on an already existing app that's then downloaded (though that is still supported). The in-filesystem app spec can also be templated with environment variables automatically (see examples below). +- Prints the build and deploy logs into the Github Action log on demand (configurable via `print_build_logs` and `print_deploy_logs`) and surfaces them as outputs `build_logs` and `deploy_logs`. +- Provides the app's metadata as the output `app`. +- Supports a "preview mode" geared towards orchestrating per-PR app previews. It can be enabled via `deploy_pr_review`, see the [Implementing Preview Apps](#launch-a-preview-app-per-pull-request) example. + +## Support + +If you require assistance or have a feature idea, please create a support ticket at the [official DigitalOcean Support](https://cloudsupport.digitalocean.com/s/). + +## Documentation + +### `deploy` action + +#### Inputs + +- `token`: DigitalOcean Personal Access Token. See https://docs.digitalocean.com/reference/api/create-personal-access-token/ for creating a new token. +- `app_spec_location`: Location of the app spec file. Defaults to `.do/app.yaml`. +- `app_name`: Name of the app to pull the spec from. The app must already exist. If an app name is given, a potential in-repository app spec is ignored. +- `print_build_logs`: Print build logs. Defaults to `false`. +- `print_deploy_logs`: Print deploy logs. Defaults to `false`. +- `deploy_pr_preview`: Deploy the app as a PR preview. The app name will be derived from the PR, the app spec will be modified to exclude conflicting configuration like domains and alerts and all Github references to the current repository will be updated to point to the PR's branch. Defaults to `false`. + +#### Outputs + +- `app`: A JSON representation of the entire app after the deployment. +- `build_logs`: The builds logs of the deployment. +- `deploy_logs`: The deploy logs of the deployment. + +### `delete` action + +#### Inputs + +- `token`: DigitalOcean Personal Access Token. See https://docs.digitalocean.com/reference/api/create-personal-access-token/ for creating a new token. +- `app_id`: ID of the app to delete. +- `app_name`: Name of the app to delete. +- `from_pr_preview`: Use this if the app was deployed as a PR preview. The app name will be derived from the PR and. +- `ignore_not_found`: Ignore if the app is not found. + +## Usage + +As a prerequisite for all examples, you'll need a `DIGITALOCEAN_ACCESS_TOKEN`[secret](https://docs.github.com/en/actions/reference/encrypted-secrets#creating-encrypted-secrets-for-a-repository) in the respective repository. If not already done, get a DigitalOcean Personal Access token by following this [instructions](https://docs.digitalocean.com/reference/api/create-personal-access-token/) and declare it as that secret in the repository you're working with. + +### Deploy an app + +With the following contents of `.do/app.yaml` in the repository: + +```yaml +name: sample +services: +- name: sample + github: + branch: main + repo: digitalocean/sample-nodejs +``` + +The following action deploys the app whenever a new commit is pushed to the main branch. Note that `deploy_on_push` is **not** used here, since the Github Action is the driving force behind the deployment. Also note that updates to `.do/app.yaml` will automatically be applied to the app. + +```yaml +name: Update App + +on: + push: + branches: [main] + +jobs: + deploy-app: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Deploy the app + uses: digitalocean/app_action/deploy@v2 + with: + token: ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }} +``` + +### Deploy an app with a prebuilt image + +With the following contents of `.do/app.yaml` in the repository: + +```yaml +name: sample +services: +- name: sample + image: + registry_type: GHCR + registry: YOUR_ORG + repository: YOUR_REPO + digest: ${SAMPLE_DIGEST} +``` + +The following action builds a new image from a Dockerfile in the repository and deploys the respective app from it. The build in App Platform is automatically bypassed. The built image is deployed from its digest, avoiding any inconsistencies around mutable tags and guaranteeing that **exactly** this image is deployed. + +```yaml +name: Build, Push and Deploy a Docker Image + +on: + push: + branches: [main] + +jobs: + build-push-deploy-image: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + id-token: write + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Log in to the Container registry + uses: docker/login-action@v3.3.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Build and push Docker image + id: push + uses: docker/build-push-action@v6.5.0 + with: + context: . + push: true + tags: ghcr.io/${{ github.repository }}:latest + - name: Deploy the app + uses: digitalocean/app_action/deploy@v2 + env: + SAMPLE_DIGEST: ${{ steps.push.outputs.digest }} + with: + token: ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }} +``` + +### Launch a preview app per pull request + +With the following contents of `.do/app.yaml` in the repository: + +```yaml +name: sample +services: +- name: sample + github: + branch: main + repo: digitalocean/sample-nodejs +``` + +The following 2 actions implement a "Preview Apps" feature, that provide a per-PR app to check if the deployment **would** work. If the deployment succeeds, a comment is posted with the live URL of the app. If the deployment fails, a link to the respective action run is posted alongside the build and deployment logs for quick debugging. + +Once the PR is closed or merged, the respective app is deleted again. + +```yaml +name: App Platform Preview + +on: + pull_request: + branches: [main] + +permissions: + pull-requests: write + +jobs: + test: + name: preview + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Deploy the app + id: deploy + uses: digitalocean/app_action/deploy@v2 + with: + deploy_pr_preview: "true" + token: ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }} + - uses: actions/github-script@v7 + env: + BUILD_LOGS: ${{ steps.deploy.outputs.build_logs }} + DEPLOY_LOGS: ${{ steps.deploy.outputs.deploy_logs }} + with: + script: | + const { BUILD_LOGS, DEPLOY_LOGS } = process.env + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: `:rocket: :rocket: :rocket: The app was successfully deployed at ${{ fromJson(steps.deploy.outputs.app).live_url }}.` + }) + - uses: actions/github-script@v7 + if: failure() + with: + script: | + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: `The app failed to be deployed. Logs can be found [here](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}). + + ## Logs +
+ Build logs + + \`\`\` + ${BUILD_LOGS} + \`\`\` +
+ +
+ Deploy logs + + \`\`\` + ${DEPLOY_LOGS} + \`\`\` +
` + }) +``` + +```yaml +name: Delete Preview + +on: + pull_request: + types: [ closed ] + +jobs: + closed: + runs-on: ubuntu-latest + steps: + - name: delete preview app + uses: digitalocean/app_action/delete@v2 + with: + from_pr_preview: "true" + ignore_not_found: "true" + token: ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }} +``` + +## Note for handling container images + +It is strongly suggested to use image digests to identify a specific image like in the example above. If that is not possible, it is strongly suggested to use a unique and descriptive tag for the respective image (not `latest`). + +## Upgrade from v1.x + +The v1 branch of this action is no longer under active development. Its documentation is [still available](https://github.com/digitalocean/app_action/blob/v1/README.md) though. + +The new deploy action does not support the `images` input from the old action. For in-repository app specs, it's suggested to use env-var-substitution as in the example above. If the spec of an existing app should be updated via the backwards-compatible `app_name` input, the `IMAGE_DIGEST_$component-name`/`IMAGE_TAG_$component-name` environment variables can be used to change the respective fields of the image reference. ## Resources to know more about DigitalOcean App Platform App Spec + - [App Platform Guided App Spec Declaration](https://www.digitalocean.com/community/tech_talks/defining-your-app-specification-on-digitalocean-app-platform) - [App Platform App Spec Blog](https://docs.digitalocean.com/products/app-platform/reference/app-spec/) - [App Platform App Spec Components](https://www.digitalocean.com/blog/build-component-based-apps-with-digitalocean-app-platform/) -## Note for handling DigitalOcean Container Registry images: -Because image manifests are cached in different regions, there may be a maximum delay of one hour between pushing to a tag that already exists in your registry and being able to pull the new image by tag. This may happen, for example, when using the :latest tag. To avoid the delay, use: -- Unique tags (other than :latest) -- SHA hash of Github commit -- SHA hash of the new manifest - -## Development - -- Install gomock with `go install github.com/golang/mock/mockgen@v1.6.0` -- `go generate ./...` to generate the mocks - ## License -This GitHub Action and associated scripts and documentation in this project are released under the [MIT License](LICENSE). + +This GitHub Action and associated scripts and documentation in this project are released under the [MIT License](LICENSE). \ No newline at end of file diff --git a/action.yml b/action.yml deleted file mode 100644 index ee2402f..0000000 --- a/action.yml +++ /dev/null @@ -1,25 +0,0 @@ -name: DigitalOcean App Platform deployment -description: Deploy application to DigitalOcean's App Platform or Update DOCR image in the DigitalOcean's App Platform. -branding: - icon: 'upload-cloud' - color: 'blue' - -inputs: - app_name: - description: Name of the app. (The name of the app must be unique across all apps in the same account.) - required: true - token: - description: DigitalOcean Personal Access Token.(use https://docs.digitalocean.com/reference/api/create-personal-access-token/ for creating new token) - required: true - images: - description: (OPTIONAL)a json array of objects with the properties name (description), repository (repo url), tag (image tag) - required: false - default: "" - -runs: - using: 'docker' - image: 'Dockerfile' - args: - - "${{ inputs.images }}" - - "${{ inputs.app_name }}" - - "${{ inputs.token }}" \ No newline at end of file diff --git a/delete/action.yml b/delete/action.yml new file mode 100644 index 0000000..322b7b8 --- /dev/null +++ b/delete/action.yml @@ -0,0 +1,31 @@ +name: DigitalOcean App Platform app delete +description: Delete an application from DigitalOcean's App Platform. +branding: + icon: 'upload-cloud' + color: 'blue' + +inputs: + token: + description: DigitalOcean Personal Access Token. See https://docs.digitalocean.com/reference/api/create-personal-access-token/ for creating a new token. + required: true + app_id: + description: ID of the app to delete. + required: false + default: '' + app_name: + description: Name of the app to delete. + required: false + default: '' + from_pr_preview: + description: Use this if the app was deployed as a PR preview. The app name will be derived from the PR number. + required: false + default: 'false' + ignore_not_found: + description: Ignore if the app is not found. + required: false + default: 'false' + +runs: + using: docker + image: ../Dockerfile + args: ['delete'] diff --git a/delete/inputs.go b/delete/inputs.go new file mode 100644 index 0000000..17f4ef9 --- /dev/null +++ b/delete/inputs.go @@ -0,0 +1,32 @@ +package main + +import ( + "github.com/digitalocean/app_action/utils" + gha "github.com/sethvargo/go-githubactions" +) + +// inputs are the inputs for the action. +type inputs struct { + token string + appName string + appID string + fromPRPreview bool + ignoreNotFound bool +} + +// getInputs gets the inputs for the action. +func getInputs(a *gha.Action) (inputs, error) { + var in inputs + for _, err := range []error{ + utils.InputAsString(a, "token", true, &in.token), + utils.InputAsString(a, "app_name", false, &in.appName), + utils.InputAsString(a, "app_id", false, &in.appID), + utils.InputAsBool(a, "from_pr_preview", false, &in.fromPRPreview), + utils.InputAsBool(a, "ignore_not_found", false, &in.ignoreNotFound), + } { + if err != nil { + return in, err + } + } + return in, nil +} diff --git a/delete/main.go b/delete/main.go new file mode 100644 index 0000000..0799e3f --- /dev/null +++ b/delete/main.go @@ -0,0 +1,64 @@ +package main + +import ( + "context" + "net/http" + + "github.com/digitalocean/app_action/utils" + "github.com/digitalocean/godo" + gha "github.com/sethvargo/go-githubactions" +) + +func main() { + ctx := context.Background() + a := gha.New() + + in, err := getInputs(a) + if err != nil { + a.Fatalf("failed to get inputs: %v", err) + } + // Mask the DO token to avoid accidentally leaking it. + a.AddMask(in.token) + + if in.appID == "" && in.appName == "" && !in.fromPRPreview { + a.Fatalf("either app_id, app_name, or from_pr_preview must be set") + } + + ghCtx, err := a.Context() + if err != nil { + a.Fatalf("failed to get GitHub context: %v", err) + } + + do := godo.NewFromToken(in.token) + do.UserAgent = "do-app-action-delete" + + appID := in.appID + if appID == "" { + appName := in.appName + if appName == "" { + repoOwner, repo := ghCtx.Repo() + appName = utils.GenerateAppName(repoOwner, repo, ghCtx.RefName) + } + + app, err := utils.FindAppByName(ctx, do.Apps, appName) + if err != nil { + a.Fatalf("failed to find app: %v", err) + } + if app == nil { + if in.ignoreNotFound { + a.Infof("app %q not found, ignoring", appName) + return + } + a.Fatalf("app %q not found", appName) + } + appID = app.ID + } + + if resp, err := do.Apps.Delete(ctx, appID); err != nil { + if resp.StatusCode == http.StatusNotFound && in.ignoreNotFound { + a.Infof("app %q not found, ignoring", appID) + return + } + a.Fatalf("failed to delete app: %v", err) + } +} diff --git a/deploy/action.yml b/deploy/action.yml new file mode 100644 index 0000000..6a83dd6 --- /dev/null +++ b/deploy/action.yml @@ -0,0 +1,43 @@ +name: DigitalOcean App Platform deployment +description: Deploy an application to DigitalOcean's App Platform. +branding: + icon: 'upload-cloud' + color: 'blue' + +inputs: + token: + description: DigitalOcean Personal Access Token. See https://docs.digitalocean.com/reference/api/create-personal-access-token/ for creating a new token. + required: true + app_spec_location: + description: Location of the app spec file. Mutually exclusive with `app_name`. + required: false + default: '.do/app.yaml' + app_name: + description: Name of the app to pull the spec from. The app must already exist. If an app name is given, a potential in-repository app spec is ignored. + required: false + default: '' + print_build_logs: + description: Print build logs. + required: false + default: 'false' + print_deploy_logs: + description: Print deploy logs. + required: false + default: 'false' + deploy_pr_preview: + description: Deploy the app as a PR preview. The app name will be derived from the PR, the app spec will be mangled to exclude conflicting configuration like domains and alerts and all Github references to the current repository will be updated to point to the PR's branch. + required: false + default: 'false' + +outputs: + app: + description: A JSON representation of the entire app after the deployment. + build_logs: + description: The builds logs of the deployment. + deploy_logs: + description: The deploy logs of the deployment. + +runs: + using: docker + image: ../Dockerfile + args: ['deploy'] diff --git a/deploy/images.go b/deploy/images.go new file mode 100644 index 0000000..b41aae0 --- /dev/null +++ b/deploy/images.go @@ -0,0 +1,36 @@ +package main + +import ( + "fmt" + "os" + "strings" + + "github.com/digitalocean/godo" +) + +// replaceImagesInSpec replaces the images in the given AppSpec with the ones defined in the environment. +func replaceImagesInSpec(spec *godo.AppSpec) error { + if err := godo.ForEachAppSpecComponent(spec, func(c godo.AppContainerComponentSpec) error { + image := c.GetImage() + if image == nil { + return nil + } + + if digest := os.Getenv("IMAGE_DIGEST_" + componentNameToEnvVar(c.GetName())); digest != "" { + image.Tag = "" + image.Digest = digest + } else if tag := os.Getenv("IMAGE_TAG_" + componentNameToEnvVar(c.GetName())); tag != "" { + image.Digest = "" + image.Tag = tag + } + return nil + }); err != nil { + return fmt.Errorf("failed to sanitize buildable components: %w", err) + } + return nil +} + +// componentNameToEnvVar converts a component name to an environment variable name. +func componentNameToEnvVar(name string) string { + return strings.ToUpper(strings.ReplaceAll(name, "-", "_")) +} diff --git a/deploy/images_test.go b/deploy/images_test.go new file mode 100644 index 0000000..a7637cf --- /dev/null +++ b/deploy/images_test.go @@ -0,0 +1,76 @@ +package main + +import ( + "testing" + + "github.com/digitalocean/godo" + "github.com/stretchr/testify/require" +) + +func TestReplaceImagesInSpec(t *testing.T) { + spec := &godo.AppSpec{ + Name: "foo", + Services: []*godo.AppServiceSpec{{ + Name: "web", + Image: &godo.ImageSourceSpec{ + RegistryType: godo.ImageSourceSpecRegistryType_Ghcr, + Registry: "foo", + Repository: "bar", + Tag: "latest", + }, + }}, + Workers: []*godo.AppWorkerSpec{{ + Name: "fancy-worker", + Image: &godo.ImageSourceSpec{ + RegistryType: godo.ImageSourceSpecRegistryType_DockerHub, + Registry: "foo", + Repository: "worker", + Tag: "latest", + }, + }}, + Jobs: []*godo.AppJobSpec{{ + Name: "job", + GitHub: &godo.GitHubSourceSpec{ + Repo: "foo/bar", + Branch: "main", + }, + }}, + } + + t.Setenv("IMAGE_TAG_WEB", "v1") + t.Setenv("IMAGE_DIGEST_FANCY_WORKER", "1234abcd") + t.Setenv("IMAGE_DIGEST_JOB", "1234abcd") + err := replaceImagesInSpec(spec) + require.NoError(t, err) + + expected := &godo.AppSpec{ + Name: "foo", + Services: []*godo.AppServiceSpec{{ + Name: "web", + Image: &godo.ImageSourceSpec{ + RegistryType: godo.ImageSourceSpecRegistryType_Ghcr, + Registry: "foo", + Repository: "bar", + Tag: "v1", // Tag was updated. + }, + }}, + Workers: []*godo.AppWorkerSpec{{ + Name: "fancy-worker", + Image: &godo.ImageSourceSpec{ + RegistryType: godo.ImageSourceSpecRegistryType_DockerHub, + Registry: "foo", + Repository: "worker", + Digest: "1234abcd", // Digest was updated, tag was removed. + }, + }}, + Jobs: []*godo.AppJobSpec{{ + Name: "job", + GitHub: &godo.GitHubSourceSpec{ + Repo: "foo/bar", // No change. + Branch: "main", + }, + }}, + } + + require.Equal(t, expected, spec) +} diff --git a/deploy/inputs.go b/deploy/inputs.go new file mode 100644 index 0000000..b6f71f2 --- /dev/null +++ b/deploy/inputs.go @@ -0,0 +1,34 @@ +package main + +import ( + "github.com/digitalocean/app_action/utils" + gha "github.com/sethvargo/go-githubactions" +) + +// inputs are the inputs for the action. +type inputs struct { + token string + appSpecLocation string + appName string + printBuildLogs bool + printDeployLogs bool + deployPRPreview bool +} + +// getInputs gets the inputs for the action. +func getInputs(a *gha.Action) (inputs, error) { + var in inputs + for _, err := range []error{ + utils.InputAsString(a, "token", true, &in.token), + utils.InputAsString(a, "app_spec_location", false, &in.appSpecLocation), + utils.InputAsString(a, "app_name", false, &in.appName), + utils.InputAsBool(a, "print_build_logs", true, &in.printBuildLogs), + utils.InputAsBool(a, "print_deploy_logs", true, &in.printDeployLogs), + utils.InputAsBool(a, "deploy_pr_preview", true, &in.deployPRPreview), + } { + if err != nil { + return in, err + } + } + return in, nil +} diff --git a/deploy/main.go b/deploy/main.go new file mode 100644 index 0000000..3f3a00b --- /dev/null +++ b/deploy/main.go @@ -0,0 +1,285 @@ +package main + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "time" + + "github.com/digitalocean/app_action/utils" + "github.com/digitalocean/godo" + gha "github.com/sethvargo/go-githubactions" + "sigs.k8s.io/yaml" +) + +func main() { + ctx := context.Background() + a := gha.New() + + in, err := getInputs(a) + if err != nil { + a.Fatalf("failed to get inputs: %v", err) + } + // Mask the DO token to avoid accidentally leaking it. + a.AddMask(in.token) + + do := godo.NewFromToken(in.token) + do.UserAgent = "do-app-action-deploy" + d := &deployer{ + action: a, + apps: do.Apps, + httpClient: http.DefaultClient, + inputs: in, + } + + spec, err := d.createSpec(ctx) + if err != nil { + a.Fatalf("failed to create spec: %v", err) + } + + if in.deployPRPreview { + ghCtx, err := a.Context() + if err != nil { + a.Fatalf("failed to get GitHub context: %v", err) + } + + // If this is a PR preview, we need to sanitize the spec. + if err := utils.SanitizeSpecForPullRequestPreview(spec, ghCtx); err != nil { + a.Fatalf("failed to sanitize spec for PR preview: %v", err) + } + } + + app, err := d.deploy(ctx, spec) + if app != nil { + // Surface a JSON representation of the app regardless of success or failure. + appJSON, err := json.Marshal(app) + if err != nil { + a.Errorf("failed to marshal app: %v", err) + } + a.SetOutput("app", string(appJSON)) + } + if err != nil { + a.Fatalf("failed to deploy: %v", err) + } + a.Infof("App is now live under URL: %s", app.GetLiveURL()) +} + +// deployer is responsible for deploying the app. +type deployer struct { + action *gha.Action + apps godo.AppsService + httpClient *http.Client + inputs inputs +} + +func (d *deployer) createSpec(ctx context.Context) (*godo.AppSpec, error) { + // First, fetch the app spec either from a pre-existing app or from the file system. + var spec *godo.AppSpec + if d.inputs.appName != "" { + app, err := utils.FindAppByName(ctx, d.apps, d.inputs.appName) + if err != nil { + return nil, fmt.Errorf("failed to get app: %w", err) + } + if app == nil { + return nil, fmt.Errorf("app %q does not exist", d.inputs.appName) + } + spec = app.Spec + } else { + appSpec, err := os.ReadFile(d.inputs.appSpecLocation) + if err != nil { + return nil, fmt.Errorf("failed to get app spec content: %w", err) + } + appSpecExpanded := os.ExpandEnv(string(appSpec)) + if err := yaml.Unmarshal([]byte(appSpecExpanded), &spec); err != nil { + return nil, fmt.Errorf("failed to parse app spec: %w", err) + } + } + + if err := replaceImagesInSpec(spec); err != nil { + return nil, fmt.Errorf("failed to replace images in spec: %w", err) + } + return spec, nil +} + +// deploy deploys the app and waits for it to be live. +func (d *deployer) deploy(ctx context.Context, spec *godo.AppSpec) (*godo.App, error) { + // Either create or update the app. + app, err := utils.FindAppByName(ctx, d.apps, spec.GetName()) + if err != nil { + return nil, fmt.Errorf("failed to get app: %w", err) + } + if app == nil { + d.action.Infof("app %q does not exist yet, creating...", spec.Name) + app, _, err = d.apps.Create(ctx, &godo.AppCreateRequest{Spec: spec}) + if err != nil { + return nil, fmt.Errorf("failed to create app: %w", err) + } + } else { + d.action.Infof("app %q already exists, updating...", spec.Name) + app, _, err = d.apps.Update(ctx, app.GetID(), &godo.AppUpdateRequest{Spec: spec}) + if err != nil { + return nil, fmt.Errorf("failed to update app: %w", err) + } + } + + ds, _, err := d.apps.ListDeployments(ctx, app.GetID(), &godo.ListOptions{PerPage: 1}) + if err != nil { + return nil, fmt.Errorf("failed to list deployments: %w", err) + } + if len(ds) == 0 { + return nil, fmt.Errorf("expected a deployment right after creating/updating the app, but got none") + } + // The latest deployment is the deployment we just created. + deploymentID := ds[0].GetID() + + d.action.Infof("wait for deployment to finish") + dep, err := d.waitForDeploymentTerminal(ctx, app.ID, deploymentID) + if err != nil { + return nil, fmt.Errorf("failed to wait deployment to finish: %w", err) + } + + buildLogs, err := d.getLogs(ctx, app.ID, deploymentID, godo.AppLogTypeBuild) + if err != nil { + return nil, fmt.Errorf("failed to get build logs: %w", err) + } + if len(buildLogs) > 0 { + d.action.SetOutput("build_logs", string(buildLogs)) + + if d.inputs.printBuildLogs { + d.action.Group("build logs") + d.action.Infof(string(buildLogs)) + d.action.EndGroup() + } + } + + deployLogs, err := d.getLogs(ctx, app.ID, deploymentID, godo.AppLogTypeDeploy) + if err != nil { + return nil, fmt.Errorf("failed to get deploy logs: %w", err) + } + if len(deployLogs) > 0 { + d.action.SetOutput("deploy_logs", string(deployLogs)) + + if d.inputs.printDeployLogs { + d.action.Group("deploy logs") + d.action.Infof(string(deployLogs)) + d.action.EndGroup() + } + } + + if dep.Phase != godo.DeploymentPhase_Active { + // Fetch the app to get the latest state before returning. + app, _, err := d.apps.Get(ctx, app.ID) + if err != nil { + return nil, fmt.Errorf("failed to get app after it failed: %w", err) + } + return app, fmt.Errorf("deployment failed in phase %q", dep.Phase) + } + + app, err = d.waitForAppLiveURL(ctx, app.ID) + if err != nil { + return nil, fmt.Errorf("failed to wait for app to have a live URL: %w", err) + } + + return app, nil +} + +// waitForDeploymentTerminal waits for the given deployment to be in a terminal state. +func (d *deployer) waitForDeploymentTerminal(ctx context.Context, appID, deploymentID string) (*godo.Deployment, error) { + t := time.NewTicker(2 * time.Second) + defer t.Stop() + + var dep *godo.Deployment + var currentPhase godo.DeploymentPhase + for { + var err error + dep, _, err = d.apps.GetDeployment(ctx, appID, deploymentID) + if err != nil { + return nil, fmt.Errorf("failed to get deployment: %w", err) + } + + if currentPhase != dep.GetPhase() { + d.action.Infof("deployment is in phase: %s", dep.GetPhase()) + currentPhase = dep.GetPhase() + } + + if isInTerminalPhase(dep) { + return dep, nil + } + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-t.C: + } + } +} + +// isInTerminalPhase returns whether or not the given deployment is in a terminal phase. +func isInTerminalPhase(d *godo.Deployment) bool { + switch d.GetPhase() { + case godo.DeploymentPhase_Active, godo.DeploymentPhase_Error, godo.DeploymentPhase_Canceled, godo.DeploymentPhase_Superseded: + return true + } + return false +} + +// waitForAppLiveURL waits for the given app to have a non-empty live URL. +func (d *deployer) waitForAppLiveURL(ctx context.Context, appID string) (*godo.App, error) { + t := time.NewTicker(2 * time.Second) + defer t.Stop() + + var a *godo.App + for { + var err error + a, _, err = d.apps.Get(ctx, appID) + if err != nil { + return nil, fmt.Errorf("failed to get deployment: %w", err) + } + + if a.GetLiveURL() != "" { + return a, nil + } + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-t.C: + } + } +} + +// getLogs retrieves the logs from the given historic URLs. +func (d *deployer) getLogs(ctx context.Context, appID, deploymentID string, logType godo.AppLogType) ([]byte, error) { + logsResp, resp, err := d.apps.GetLogs(ctx, appID, deploymentID, "", logType, true, -1) + if err != nil { + // Ignore if we get a 400, as this means the respective state was never reached or skipped. + if resp.StatusCode == http.StatusBadRequest { + return nil, nil + } + + return nil, fmt.Errorf("failed to get %s logs: %w", logType, err) + } + + var buf bytes.Buffer + for _, historicURL := range logsResp.HistoricURLs { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, historicURL, nil) + if err != nil { + return nil, fmt.Errorf("failed to create log request: %w", err) + } + resp, err := d.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("failed to get historic logs: %w", err) + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read historic logs: %w", err) + } + buf.Write(body) + } + return buf.Bytes(), nil +} diff --git a/deploy/main_test.go b/deploy/main_test.go new file mode 100644 index 0000000..46c61ea --- /dev/null +++ b/deploy/main_test.go @@ -0,0 +1,543 @@ +package main + +import ( + "bytes" + "context" + "errors" + "io" + "net/http" + "os" + "reflect" + "testing" + + "github.com/digitalocean/godo" + gha "github.com/sethvargo/go-githubactions" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "sigs.k8s.io/yaml" +) + +func TestCreateSpecFromFile(t *testing.T) { + spec := &godo.AppSpec{ + Name: "foo", + Services: []*godo.AppServiceSpec{{ + Name: "web", + Image: &godo.ImageSourceSpec{ + RegistryType: godo.ImageSourceSpecRegistryType_Ghcr, + Registry: "foo", + Repository: "bar", + Tag: "${ENV_VAR}", + }, + }, { + Name: "web2", + Image: &godo.ImageSourceSpec{ + RegistryType: godo.ImageSourceSpecRegistryType_Ghcr, + Registry: "foo", + Repository: "bar", + Tag: "latest", + }, + }}, + } + + bs, err := yaml.Marshal(spec) + if err != nil { + t.Fatalf("failed to marshal spec: %v", err) + } + specFilePath := t.TempDir() + "/spec.yaml" + if err := os.WriteFile(specFilePath, bs, 0644); err != nil { + t.Fatalf("failed to write spec file: %v", err) + } + + d := &deployer{ + inputs: inputs{appSpecLocation: specFilePath}, + } + + t.Setenv("ENV_VAR", "v1") // Put in via env substitution. + t.Setenv("IMAGE_TAG_WEB2", "v2") // Put in via "magic" env var. + got, err := d.createSpec(context.Background()) + if err != nil { + t.Fatalf("failed to create spec: %v", err) + } + + expected := &godo.AppSpec{ + Name: "foo", + Services: []*godo.AppServiceSpec{{ + Name: "web", + Image: &godo.ImageSourceSpec{ + RegistryType: godo.ImageSourceSpecRegistryType_Ghcr, + Registry: "foo", + Repository: "bar", + Tag: "v1", // Tag was updated. + }, + }, { + Name: "web2", + Image: &godo.ImageSourceSpec{ + RegistryType: godo.ImageSourceSpecRegistryType_Ghcr, + Registry: "foo", + Repository: "bar", + Tag: "v2", // Tag was updated. + }, + }}, + } + + if !reflect.DeepEqual(got, expected) { + t.Errorf("expected spec %+v, got %+v", expected, got) + } +} + +func TestCreateSpecFromExistingApp(t *testing.T) { + tests := []struct { + name string + appService *mockedAppsService + envs map[string]string + expected *godo.AppSpec + err bool + }{{ + name: "existing app", + appService: func() *mockedAppsService { + as := &mockedAppsService{} + as.On("List", mock.Anything, mock.Anything).Return([]*godo.App{{ + Spec: &godo.AppSpec{ + Name: "foo", + Services: []*godo.AppServiceSpec{{ + Name: "web", + Image: &godo.ImageSourceSpec{ + RegistryType: godo.ImageSourceSpecRegistryType_Ghcr, + Registry: "foo", + Repository: "bar", + Tag: "latest", + }, + }}, + }, + }}, &godo.Response{}, nil) + return as + }(), + envs: map[string]string{"IMAGE_TAG_WEB": "v1"}, + expected: &godo.AppSpec{ + Name: "foo", + Services: []*godo.AppServiceSpec{{ + Name: "web", + Image: &godo.ImageSourceSpec{ + RegistryType: godo.ImageSourceSpecRegistryType_Ghcr, + Registry: "foo", + Repository: "bar", + Tag: "v1", // Tag was updated. + }, + }}, + }, + }, { + name: "no app", + appService: func() *mockedAppsService { + as := &mockedAppsService{} + as.On("List", mock.Anything, mock.Anything).Return([]*godo.App{}, &godo.Response{}, nil) + return as + }(), + err: true, + }, { + name: "error listing apps", + appService: func() *mockedAppsService { + as := &mockedAppsService{} + as.On("List", mock.Anything, mock.Anything).Return([]*godo.App{}, &godo.Response{}, errors.New("an error")) + return as + }(), + err: true, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + d := &deployer{ + apps: test.appService, + inputs: inputs{appName: "foo"}, + } + + for k, v := range test.envs { + t.Setenv(k, v) + } + + spec, err := d.createSpec(context.Background()) + if err != nil && !test.err { + require.NoError(t, err) + } + if err == nil && test.err { + require.Error(t, err) + } + require.Equal(t, test.expected, spec) + }) + } +} + +func TestDeploy(t *testing.T) { + ctx := context.Background() + appID := "app-id" + deploymentID := "deployment-id" + spec := &godo.AppSpec{ + Name: "foo", + } + + tests := []struct { + name string + appService *mockedAppsService + logsRT *mockedRoundtripper + inputs inputs + expectedLogs []byte + expectedOutput []byte + err bool + }{{ + name: "success", + appService: func() *mockedAppsService { + as := &mockedAppsService{} + as.On("List", ctx, mock.Anything).Return([]*godo.App{}, &godo.Response{}, nil) + as.On("Create", ctx, mock.Anything).Return(&godo.App{ID: appID}, &godo.Response{}, nil) + as.On("ListDeployments", ctx, appID, mock.Anything).Return([]*godo.Deployment{{ + ID: deploymentID, + }}, &godo.Response{}, nil) + as.On("GetDeployment", ctx, appID, deploymentID).Return(&godo.Deployment{ + Phase: godo.DeploymentPhase_Active, + }, &godo.Response{}, nil) + as.On("GetLogs", ctx, appID, deploymentID, "", godo.AppLogTypeBuild, true, -1).Return(&godo.AppLogs{ + HistoricURLs: []string{"http://build.com"}, + }, &godo.Response{}, nil) + as.On("GetLogs", ctx, appID, deploymentID, "", godo.AppLogTypeDeploy, true, -1).Return(&godo.AppLogs{ + HistoricURLs: []string{"http://deploy.com"}, + }, &godo.Response{}, nil) + as.On("Get", ctx, appID).Return(&godo.App{ID: appID, LiveURL: "https://example.com"}, &godo.Response{}, nil) + return as + }(), + logsRT: func() *mockedRoundtripper { + rt := &mockedRoundtripper{} + rt.On("RoundTrip", mock.Anything).Return(&http.Response{ + Body: io.NopCloser(bytes.NewReader([]byte("build log"))), + }, nil).Once() + rt.On("RoundTrip", mock.Anything).Return(&http.Response{ + Body: io.NopCloser(bytes.NewReader([]byte("deploy log"))), + }, nil).Once() + return rt + }(), + inputs: inputs{ + printBuildLogs: true, + printDeployLogs: true, + }, + expectedLogs: []byte(`app "foo" does not exist yet, creating... +wait for deployment to finish +deployment is in phase: ACTIVE +::group::build logs +build log +::endgroup:: +::group::deploy logs +deploy log +::endgroup:: +`), + expectedOutput: []byte(`build_logs<<_GitHubActionsFileCommandDelimeter_ +build log +_GitHubActionsFileCommandDelimeter_ +deploy_logs<<_GitHubActionsFileCommandDelimeter_ +deploy log +_GitHubActionsFileCommandDelimeter_ +`), + }, { + name: "success on preexisting app", + appService: func() *mockedAppsService { + as := &mockedAppsService{} + as.On("List", ctx, mock.Anything).Return([]*godo.App{{ID: appID, Spec: spec}}, &godo.Response{}, nil) + as.On("Update", ctx, appID, mock.Anything).Return(&godo.App{ID: appID}, &godo.Response{}, nil) + as.On("ListDeployments", ctx, appID, mock.Anything).Return([]*godo.Deployment{{ + ID: deploymentID, + }}, &godo.Response{}, nil) + as.On("GetDeployment", ctx, appID, deploymentID).Return(&godo.Deployment{ + Phase: godo.DeploymentPhase_Active, + }, &godo.Response{}, nil) + as.On("GetLogs", ctx, appID, deploymentID, "", godo.AppLogTypeBuild, true, -1).Return(&godo.AppLogs{ + HistoricURLs: []string{"http://build.com"}, + }, &godo.Response{}, nil) + as.On("GetLogs", ctx, appID, deploymentID, "", godo.AppLogTypeDeploy, true, -1).Return(&godo.AppLogs{ + HistoricURLs: []string{"http://deploy.com"}, + }, &godo.Response{}, nil) + as.On("Get", ctx, appID).Return(&godo.App{ID: appID, LiveURL: "https://example.com"}, &godo.Response{}, nil) + return as + }(), + logsRT: func() *mockedRoundtripper { + rt := &mockedRoundtripper{} + rt.On("RoundTrip", mock.Anything).Return(&http.Response{ + Body: io.NopCloser(bytes.NewReader([]byte("build log"))), + }, nil).Once() + rt.On("RoundTrip", mock.Anything).Return(&http.Response{ + Body: io.NopCloser(bytes.NewReader([]byte("deploy log"))), + }, nil).Once() + return rt + }(), + expectedLogs: []byte(`app "foo" already exists, updating... +wait for deployment to finish +deployment is in phase: ACTIVE +`), + expectedOutput: []byte(`build_logs<<_GitHubActionsFileCommandDelimeter_ +build log +_GitHubActionsFileCommandDelimeter_ +deploy_logs<<_GitHubActionsFileCommandDelimeter_ +deploy log +_GitHubActionsFileCommandDelimeter_ +`), + }, { + name: "fails to deploy", + appService: func() *mockedAppsService { + as := &mockedAppsService{} + as.On("List", ctx, mock.Anything).Return([]*godo.App{{ID: appID, Spec: spec}}, &godo.Response{}, nil) + as.On("Update", ctx, appID, mock.Anything).Return(&godo.App{ID: appID}, &godo.Response{}, nil) + as.On("ListDeployments", ctx, appID, mock.Anything).Return([]*godo.Deployment{{ + ID: deploymentID, + }}, &godo.Response{}, nil) + as.On("GetDeployment", ctx, appID, deploymentID).Return(&godo.Deployment{ + Phase: godo.DeploymentPhase_Error, + }, &godo.Response{}, nil) + as.On("GetLogs", ctx, appID, deploymentID, "", godo.AppLogTypeBuild, true, -1).Return(&godo.AppLogs{ + HistoricURLs: []string{"http://build.com"}, + }, &godo.Response{}, nil) + as.On("GetLogs", ctx, appID, deploymentID, "", godo.AppLogTypeDeploy, true, -1).Return(&godo.AppLogs{ + HistoricURLs: []string{"http://deploy.com"}, + }, &godo.Response{}, nil) + as.On("Get", ctx, appID).Return(&godo.App{ID: appID}, &godo.Response{}, nil) + return as + }(), + logsRT: func() *mockedRoundtripper { + rt := &mockedRoundtripper{} + rt.On("RoundTrip", mock.Anything).Return(&http.Response{ + Body: io.NopCloser(bytes.NewReader([]byte("build log"))), + }, nil).Once() + rt.On("RoundTrip", mock.Anything).Return(&http.Response{ + Body: io.NopCloser(bytes.NewReader([]byte("deploy log"))), + }, nil).Once() + return rt + }(), + err: true, + expectedLogs: []byte(`app "foo" already exists, updating... +wait for deployment to finish +deployment is in phase: ERROR +`), + expectedOutput: []byte(`build_logs<<_GitHubActionsFileCommandDelimeter_ +build log +_GitHubActionsFileCommandDelimeter_ +deploy_logs<<_GitHubActionsFileCommandDelimeter_ +deploy log +_GitHubActionsFileCommandDelimeter_ +`), + }, { + name: "fails to list apps", + appService: func() *mockedAppsService { + as := &mockedAppsService{} + as.On("List", ctx, mock.Anything).Return([]*godo.App{}, &godo.Response{}, errors.New("an error")) + return as + }(), + err: true, + }, { + name: "fails to create app", + appService: func() *mockedAppsService { + as := &mockedAppsService{} + as.On("List", ctx, mock.Anything).Return([]*godo.App{}, &godo.Response{}, nil) + as.On("Create", ctx, mock.Anything).Return(&godo.App{ID: appID}, &godo.Response{}, errors.New("an error")) + return as + }(), + err: true, + expectedLogs: []byte(`app "foo" does not exist yet, creating... +`), + }, { + name: "fails to list deployments", + appService: func() *mockedAppsService { + as := &mockedAppsService{} + as.On("List", ctx, mock.Anything).Return([]*godo.App{}, &godo.Response{}, nil) + as.On("Create", ctx, mock.Anything).Return(&godo.App{ID: appID}, &godo.Response{}, nil) + as.On("ListDeployments", ctx, appID, mock.Anything).Return([]*godo.Deployment{}, &godo.Response{}, errors.New("an error")) + return as + }(), + err: true, + expectedLogs: []byte(`app "foo" does not exist yet, creating... +`), + }, { + name: "returns an empty deployment list", + appService: func() *mockedAppsService { + as := &mockedAppsService{} + as.On("List", ctx, mock.Anything).Return([]*godo.App{}, &godo.Response{}, nil) + as.On("Create", ctx, mock.Anything).Return(&godo.App{ID: appID}, &godo.Response{}, nil) + as.On("ListDeployments", ctx, appID, mock.Anything).Return([]*godo.Deployment{}, &godo.Response{}, nil) + return as + }(), + err: true, + expectedLogs: []byte(`app "foo" does not exist yet, creating... +`), + }, { + name: "fails to get deployment for phase poll", + appService: func() *mockedAppsService { + as := &mockedAppsService{} + as.On("List", ctx, mock.Anything).Return([]*godo.App{}, &godo.Response{}, nil) + as.On("Create", ctx, mock.Anything).Return(&godo.App{ID: appID}, &godo.Response{}, nil) + as.On("ListDeployments", ctx, appID, mock.Anything).Return([]*godo.Deployment{{ + ID: deploymentID, + }}, &godo.Response{}, nil) + as.On("GetDeployment", ctx, appID, deploymentID).Return(&godo.Deployment{}, &godo.Response{}, errors.New("an error")) + return as + }(), + err: true, + expectedLogs: []byte(`app "foo" does not exist yet, creating... +wait for deployment to finish +`), + }, { + name: "fails to get get logs", + appService: func() *mockedAppsService { + as := &mockedAppsService{} + as.On("List", ctx, mock.Anything).Return([]*godo.App{}, &godo.Response{}, nil) + as.On("Create", ctx, mock.Anything).Return(&godo.App{ID: appID}, &godo.Response{}, nil) + as.On("ListDeployments", ctx, appID, mock.Anything).Return([]*godo.Deployment{{ + ID: deploymentID, + }}, &godo.Response{}, nil) + as.On("GetDeployment", ctx, appID, deploymentID).Return(&godo.Deployment{ + Phase: godo.DeploymentPhase_Active, + }, &godo.Response{}, nil) + as.On("GetLogs", ctx, appID, deploymentID, "", godo.AppLogTypeBuild, true, -1).Return(&godo.AppLogs{ + HistoricURLs: []string{"http://build.com"}, + }, &godo.Response{Response: &http.Response{StatusCode: http.StatusBadGateway}}, errors.New("an error")) + return as + }(), + err: true, + expectedLogs: []byte(`app "foo" does not exist yet, creating... +wait for deployment to finish +deployment is in phase: ACTIVE +`), + }, { + name: "ignores log failures for 400 returns", + appService: func() *mockedAppsService { + as := &mockedAppsService{} + as.On("List", ctx, mock.Anything).Return([]*godo.App{}, &godo.Response{}, nil) + as.On("Create", ctx, mock.Anything).Return(&godo.App{ID: appID}, &godo.Response{}, nil) + as.On("ListDeployments", ctx, appID, mock.Anything).Return([]*godo.Deployment{{ + ID: deploymentID, + }}, &godo.Response{}, nil) + as.On("GetDeployment", ctx, appID, deploymentID).Return(&godo.Deployment{ + Phase: godo.DeploymentPhase_Active, + }, &godo.Response{}, nil) + as.On("GetLogs", ctx, appID, deploymentID, "", godo.AppLogTypeBuild, true, -1).Return(&godo.AppLogs{ + HistoricURLs: []string{"http://build.com"}, + }, &godo.Response{Response: &http.Response{StatusCode: http.StatusBadRequest}}, errors.New("an error")) + as.On("GetLogs", ctx, appID, deploymentID, "", godo.AppLogTypeDeploy, true, -1).Return(&godo.AppLogs{ + HistoricURLs: []string{"http://deploy.com"}, + }, &godo.Response{Response: &http.Response{StatusCode: http.StatusBadRequest}}, errors.New("an error")) + as.On("Get", ctx, appID).Return(&godo.App{ID: appID, LiveURL: "https://example.com"}, &godo.Response{}, nil) + return as + }(), + expectedLogs: []byte(`app "foo" does not exist yet, creating... +wait for deployment to finish +deployment is in phase: ACTIVE +`), + }, { + name: "fails to get app for live URL poll", + appService: func() *mockedAppsService { + as := &mockedAppsService{} + as.On("List", ctx, mock.Anything).Return([]*godo.App{}, &godo.Response{}, nil) + as.On("Create", ctx, mock.Anything).Return(&godo.App{ID: appID}, &godo.Response{}, nil) + as.On("ListDeployments", ctx, appID, mock.Anything).Return([]*godo.Deployment{{ + ID: deploymentID, + }}, &godo.Response{}, nil) + as.On("GetDeployment", ctx, appID, deploymentID).Return(&godo.Deployment{ + Phase: godo.DeploymentPhase_Active, + }, &godo.Response{}, nil) + as.On("GetLogs", ctx, appID, deploymentID, "", godo.AppLogTypeBuild, true, -1).Return(&godo.AppLogs{ + HistoricURLs: []string{"http://build.com"}, + }, &godo.Response{Response: &http.Response{StatusCode: http.StatusBadRequest}}, errors.New("an error")) + as.On("GetLogs", ctx, appID, deploymentID, "", godo.AppLogTypeDeploy, true, -1).Return(&godo.AppLogs{ + HistoricURLs: []string{"http://deploy.com"}, + }, &godo.Response{Response: &http.Response{StatusCode: http.StatusBadRequest}}, errors.New("an error")) + as.On("Get", ctx, appID).Return(&godo.App{ID: appID, LiveURL: "https://example.com"}, &godo.Response{}, errors.New("an error")) + return as + }(), + err: true, + expectedLogs: []byte(`app "foo" does not exist yet, creating... +wait for deployment to finish +deployment is in phase: ACTIVE +`), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var actionLogs bytes.Buffer + outputFilePath := t.TempDir() + "/output" + d := &deployer{ + action: gha.New(gha.WithWriter(&actionLogs), gha.WithGetenv(func(k string) string { + switch k { + case "GITHUB_OUTPUT": + return outputFilePath + default: + return "" + } + })), + apps: test.appService, + httpClient: &http.Client{Transport: test.logsRT}, + inputs: test.inputs, + } + _, err := d.deploy(ctx, spec) + if err != nil && !test.err { + t.Fatalf("unexpected error: %v", err) + } + if err == nil && test.err { + t.Fatalf("expected an error") + } + + require.Equal(t, test.expectedLogs, actionLogs.Bytes()) + + output, err := os.ReadFile(outputFilePath) + if test.expectedOutput == nil { + require.ErrorIs(t, err, os.ErrNotExist) + } else { + require.NoError(t, err) + require.Equal(t, test.expectedOutput, output) + } + + test.appService.AssertExpectations(t) + }) + } +} + +type mockedRoundtripper struct { + mock.Mock +} + +func (m *mockedRoundtripper) RoundTrip(req *http.Request) (*http.Response, error) { + args := m.Called(req) + return args.Get(0).(*http.Response), args.Error(1) +} + +type mockedAppsService struct { + mock.Mock + godo.AppsService +} + +func (m *mockedAppsService) Get(ctx context.Context, appID string) (*godo.App, *godo.Response, error) { + args := m.Called(ctx, appID) + return args.Get(0).(*godo.App), args.Get(1).(*godo.Response), args.Error(2) +} + +func (m *mockedAppsService) Create(ctx context.Context, req *godo.AppCreateRequest) (*godo.App, *godo.Response, error) { + args := m.Called(ctx, req) + return args.Get(0).(*godo.App), args.Get(1).(*godo.Response), args.Error(2) +} + +func (m *mockedAppsService) Update(ctx context.Context, name string, req *godo.AppUpdateRequest) (*godo.App, *godo.Response, error) { + args := m.Called(ctx, name, req) + return args.Get(0).(*godo.App), args.Get(1).(*godo.Response), args.Error(2) +} + +func (m *mockedAppsService) List(ctx context.Context, opt *godo.ListOptions) ([]*godo.App, *godo.Response, error) { + args := m.Called(ctx, opt) + return args.Get(0).([]*godo.App), args.Get(1).(*godo.Response), args.Error(2) +} + +func (m *mockedAppsService) GetDeployment(ctx context.Context, appID string, deploymentID string) (*godo.Deployment, *godo.Response, error) { + args := m.Called(ctx, appID, deploymentID) + return args.Get(0).(*godo.Deployment), args.Get(1).(*godo.Response), args.Error(2) +} + +func (m *mockedAppsService) ListDeployments(ctx context.Context, appID string, opt *godo.ListOptions) ([]*godo.Deployment, *godo.Response, error) { + args := m.Called(ctx, appID, opt) + return args.Get(0).([]*godo.Deployment), args.Get(1).(*godo.Response), args.Error(2) +} + +func (m *mockedAppsService) GetLogs(ctx context.Context, appID, deploymentID, component string, logType godo.AppLogType, follow bool, tailLines int) (*godo.AppLogs, *godo.Response, error) { + args := m.Called(ctx, appID, deploymentID, component, logType, follow, tailLines) + return args.Get(0).(*godo.AppLogs), args.Get(1).(*godo.Response), args.Error(2) +} diff --git a/go.mod b/go.mod index 59c1b39..459800a 100644 --- a/go.mod +++ b/go.mod @@ -4,17 +4,20 @@ go 1.22.5 require ( github.com/digitalocean/godo v1.119.0 - github.com/golang/mock v1.6.0 - github.com/pkg/errors v0.9.1 - gopkg.in/yaml.v2 v2.4.0 + github.com/sethvargo/go-githubactions v1.2.0 + github.com/stretchr/testify v1.9.0 sigs.k8s.io/yaml v1.4.0 ) require ( + github.com/davecgh/go-spew v1.1.1 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.7 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect golang.org/x/oauth2 v0.21.0 // indirect golang.org/x/sys v0.22.0 // indirect golang.org/x/time v0.5.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 71313da..1b022c0 100644 --- a/go.sum +++ b/go.sum @@ -4,8 +4,6 @@ github.com/digitalocean/godo v1.119.0 h1:dmFNQwSIAcH3z+FVovHLkazKDC2uA8oOlGvg5+H github.com/digitalocean/godo v1.119.0/go.mod h1:WQVH83OHUy6gC4gXpEVQKtxTd4L5oCp+5OialidkPLY= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -21,46 +19,23 @@ github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxec github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +github.com/sethvargo/go-githubactions v1.2.0 h1:Gbr36trCAj6uq7Rx1DolY1NTIg0wnzw3/N5WHdKIjME= +github.com/sethvargo/go-githubactions v1.2.0/go.mod h1:7/4WeHgYfSz9U5vwuToCK9KPnELVHAhGtRwLREOQV80= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/internal/doctl/doctl.go b/internal/doctl/doctl.go deleted file mode 100644 index cec2ab4..0000000 --- a/internal/doctl/doctl.go +++ /dev/null @@ -1,176 +0,0 @@ -package doctl - -import ( - "fmt" - "os/exec" - "strings" - - "github.com/digitalocean/app_action/internal/parser" - "github.com/digitalocean/app_action/internal/parser_struct" - "github.com/digitalocean/godo" - "github.com/pkg/errors" -) - -// Client is a struct for holding doctl dependent function interface -type Client struct { -} - -// NewClient doctl client wrapper -func NewClient(token string) (Client, error) { - val, err := exec.Command("sh", "-c", fmt.Sprintf("doctl auth init --access-token %s", token)).Output() - if err != nil { - return Client{}, fmt.Errorf("unable to authenticate user: %s", val) - } - - d := Client{} - - return d, nil -} - -// ListDeployments takes appID as input and returns list of deployments (used to retrieve most recent deployment) -func (d *Client) ListDeployments(appID string) ([]godo.Deployment, error) { - cmd := exec.Command("sh", "-c", fmt.Sprintf("doctl apps list-deployments %s -ojson", appID)) - spec, err := cmd.Output() - if err != nil { - return nil, errors.Wrap(err, "error in retrieving list of deployments") - } - - // parsing incoming data to get all deployments - deployments, err := parser.ParseDeploymentSpec(spec) - if err != nil { - return nil, err - } - return deployments, nil -} - -// RetrieveActiveDeploymentID takes appID as input and retrieves currently deployment id of the active deployment of the app on App Platform -func (d *Client) RetrieveActiveDeploymentID(appID string) (string, error) { - cmd := exec.Command("sh", "-c", fmt.Sprintf("doctl apps get --format ActiveDeployment.ID --no-header %s", appID)) - deployID, err := cmd.Output() - if err != nil { - return "", errors.Wrap(err, "unable to retrieve active deployment") - } - deploymentID := strings.TrimSpace(string(deployID)) - return deploymentID, nil -} - -// RetrieveActiveDeployment takes active deployment id as input from(RetrieveActiveDeploymentID) and app id -// returns the app spec from App Platform as *godo.AppSpec, retrieves parsed json object of the json input -func (d *Client) RetrieveActiveDeployment(deploymentID string, appID string, input string) ([]parser_struct.UpdatedRepo, *godo.AppSpec, error) { - cmd := exec.Command("sh", "-c", fmt.Sprintf("doctl apps get-deployment %s %s -ojson", appID, string(deploymentID))) - apps, err := cmd.Output() - if err != nil { - return nil, nil, errors.Wrap(err, "error in retrieving currently deployed app id") - } - - //parse json input - allRepos, err := parser.ParseJsonInput(input) - if err != nil { - return nil, nil, err - } - - //parse deployment spec - deployments, err := parser.ParseDeploymentSpec(apps) - if err != nil { - return nil, nil, err - } - return allRepos, deployments[0].Spec, nil -} - -// UpdateAppPlatformAppSpec takes appID as input -// updates App Platform's app spec and creates deployment -func (d *Client) UpdateAppPlatformAppSpec(tmpfile, appID string) error { - cmd := exec.Command("sh", "-c", fmt.Sprintf("doctl apps update %s --spec %s", appID, tmpfile)) - _, err := cmd.Output() - if err != nil { - fmt.Printf("doctl apps update %s --spec %s", appID, tmpfile) - return errors.Wrap(err, "unable to update app") - } - return nil -} - -// CreateDeployments takes app id as an input and creates deployment for the app -func (d *Client) CreateDeployments(appID string) error { - cmd := exec.Command("sh", "-c", fmt.Sprintf("doctl apps create-deployment %s", appID)) - _, err := cmd.Output() - if err != nil { - return errors.Wrap(err, "unable to create-deployment for app") - } - return nil -} - -// RetrieveFromDigitalocean returns the app from DigitalOcean as a slice of byte -func (d *Client) RetrieveFromDigitalocean() ([]godo.App, error) { - cmd := exec.Command("sh", "-c", "doctl apps list -ojson") - apps, err := cmd.Output() - if err != nil { - return nil, errors.Wrap(err, "unable to get user app data from digitalocean") - } - // parsing incoming data for AppId - arr, err := parser.ParseAppSpec(apps) - if err != nil { - return nil, err - } - return arr, nil -} - -// RetrieveAppID takes unique app name as an input and retrieves app id from app platform based on the users unique app name -func (d *Client) RetrieveAppID(appName string) (string, error) { - arr, err := d.RetrieveFromDigitalocean() - if err != nil { - return "", err - } - //retrieve app id app array - var appID string - for k := range arr { - if arr[k].Spec.Name == appName { - appID = arr[k].ID - break - } - } - if appID == "" { - return "", errors.Wrap(err, "app not found") - } - return appID, nil -} - -// IsDeployed takes app id as an input and checks for the status of the deployment until the status is updated to ACTIVE or failed -func (d *Client) IsDeployed(appID string) error { - done := false - fmt.Println("App Platform is Building ....") - for !done { - app, err := d.ListDeployments(appID) - if err != nil { - return errors.Wrap(err, "error in retrieving list of deployments") - } - if app[0].Phase == "ACTIVE" { - fmt.Println("Build successful") - return nil - } - if app[0].Phase == "Failed" { - fmt.Println("Build unsuccessful") - return errors.Wrap(err, "build unsuccessful") - } - } - return nil -} - -// Deploy redeploys app if user provides empty json file -func (d *Client) Deploy(input string, appName string) error { - if strings.TrimSpace(string(input)) == "" { - appID, err := d.RetrieveAppID(appName) - if err != nil { - return err - } - err = d.CreateDeployments(appID) - if err != nil { - return err - } - err = d.IsDeployed(appID) - if err != nil { - return err - } - return nil - } - return errors.Errorf("Please provide valid json input") -} diff --git a/internal/parser/parser.go b/internal/parser/parser.go deleted file mode 100644 index 0a9afff..0000000 --- a/internal/parser/parser.go +++ /dev/null @@ -1,52 +0,0 @@ -package parser - -import ( - "encoding/json" - "log" - - parser_struct "github.com/digitalocean/app_action/internal/parser_struct" - "github.com/digitalocean/godo" - "github.com/pkg/errors" - "sigs.k8s.io/yaml" -) - -//ParseAppSpecToYaml parses updated json file to yaml -func ParseAppSpecToYaml(appSpec *godo.AppSpec) ([]byte, error) { - newYaml, err := yaml.Marshal(appSpec) - if err != nil { - return nil, errors.Wrap(err, "Error in building yaml") - } - return newYaml, nil -} - -// ParseDeploymentSpec parses deployment array and retrieves appSpec of recent deployment -func ParseDeploymentSpec(apps []byte) ([]godo.Deployment, error) { - var app []godo.Deployment - err := json.Unmarshal(apps, &app) - if err != nil { - log.Fatal("Error in retrieving app spec: ", err) - } - return app, nil -} - -// ParseAppSpec parses appSpec and returns array of apps -func ParseAppSpec(apps []byte) ([]godo.App, error) { - var arr []godo.App - err := json.Unmarshal(apps, &arr) - if err != nil { - return nil, errors.Wrap(err, "error in parsing data for AppId") - } - return arr, nil -} - -// ParseJsonInput takes the array of json object as input and unique name of users app as appName -//it parses the input and returns UpdatedRepo of the input -func ParseJsonInput(input string) ([]parser_struct.UpdatedRepo, error) { - //takes care of empty json Deployment (use case where we redeploy using same app spec) - var allRepos []parser_struct.UpdatedRepo - err := json.Unmarshal([]byte(input), &allRepos) - if err != nil { - return nil, errors.Wrap(err, "error in parsing json data from file") - } - return allRepos, nil -} diff --git a/internal/parser_struct/parser_struct.go b/internal/parser_struct/parser_struct.go deleted file mode 100644 index 273178b..0000000 --- a/internal/parser_struct/parser_struct.go +++ /dev/null @@ -1,17 +0,0 @@ -package parser_struct - -import "github.com/digitalocean/godo" - -// UpdatedRepo used for parsing json object of changed repo -type UpdatedRepo struct { - // Name is the App Component Name. - Name string `json:"name,omitempty"` - // Repo is the Repository to be deployed. - // Deprecated: Use Image instead. - Repository string `json:"repository,omitempty"` - // Tag is the image tag to be deployed. - // Deprecated: Use Image instead. - Tag string `json:"tag,omitempty"` - // Image is the ImageSourceSpec to apply to the component. - Image godo.ImageSourceSpec `json:"image,omitempty"` -} diff --git a/main.go b/main.go deleted file mode 100755 index 018d44c..0000000 --- a/main.go +++ /dev/null @@ -1,311 +0,0 @@ -package main - -import ( - "fmt" - "io/ioutil" - "log" - "os" - "strings" - - "github.com/digitalocean/app_action/internal/doctl" - "github.com/digitalocean/app_action/internal/parser" - parser_struct "github.com/digitalocean/app_action/internal/parser_struct" - "github.com/digitalocean/godo" - "github.com/pkg/errors" -) - -// AllError is used for handling errors -type AllError struct { - name string - notFound []string -} - -//go:generate mockgen -package main -source=main.go -self_package main -destination mock.go DoctlClient - -//DoctlClient interface for doctl functions -type DoctlClient interface { - ListDeployments(appID string) ([]godo.Deployment, error) - RetrieveActiveDeploymentID(appID string) (string, error) - RetrieveActiveDeployment(deploymentID string, appID string, input string) ([]parser_struct.UpdatedRepo, *godo.AppSpec, error) - UpdateAppPlatformAppSpec(tmpfile, appID string) error - CreateDeployments(appID string) error - RetrieveFromDigitalocean() ([]godo.App, error) - RetrieveAppID(appName string) (string, error) - IsDeployed(appID string) error - Deploy(input string, appName string) error -} - -type action struct { - appName string - images string - authToken string - client DoctlClient -} - -func main() { - //declaring variables for command line arguments input - appName := os.Args[2] - images := os.Args[1] - authToken := os.Args[3] - - //check for authentication token - if strings.TrimSpace(authToken) == "" { - log.Fatal("No auth token provided") - } - - //check for app name - if strings.TrimSpace(appName) == "" { - log.Fatal("No app name provided") - } - - d, err := doctl.NewClient(authToken) - if err != nil { - log.Fatal(err) - } - - a := &action{ - appName: appName, - images: images, - authToken: authToken, - client: &d, - } - - err = a.run() - if err != nil { - log.Fatal(err) - } -} - -//run contains business logic of app_action -func (a *action) run() error { - //redeploying app with the same app spec - if strings.TrimSpace(a.images) == "" { - err := a.client.Deploy(a.images, a.appName) - if err != nil { - return errors.Wrap(err, "triggering deploy") - } - return nil - } - - //retrieve appID from users deployment - appID, err := a.client.RetrieveAppID(a.appName) - if err != nil { - return errors.Wrap(err, "retrieving appID") - } - - //retrieve deployment id of active deployment - deploymentID, err := a.client.RetrieveActiveDeploymentID(appID) - if err != nil { - return errors.Wrap(err, "retrieving active deployment id") - } - - //retrieve apps from deployment id - input, apps, err := a.client.RetrieveActiveDeployment(deploymentID, appID, a.images) - if err != nil { - return errors.Wrap(err, "retrieving active deployment") - } - - //updates local app spec based on user input - tmpfile, err := a.updateLocalAppSpec(input, apps) - if err != nil { - return errors.Wrap(err, "updating local app spec") - } - - // cleanup app spec file if exists after run - defer func() { - if _, err := os.Stat(tmpfile); err == nil { - // deletes the local temp app spec file - err = os.Remove(tmpfile) - if err != nil { - log.Fatalf("deleting local temp app spec file: %s", err) - } - } - }() - - //updates app spec of the app using the local temp file and update - err = a.client.UpdateAppPlatformAppSpec(tmpfile, appID) - if err != nil { - return errors.Wrap(err, "updating app spec") - } - - //checks for deployment status - err = a.client.IsDeployed(appID) - if err != nil { - return errors.Wrap(err, "checking deployment status") - } - - return nil -} - -//updateLocalAppSpec updates app spec based on users input and saves it in a local file called .do._app.yaml -func (a *action) updateLocalAppSpec(input []parser_struct.UpdatedRepo, appSpec *godo.AppSpec) (string, error) { - //updates all the container images based on user input - newErr := filterApps(input, *appSpec) - if newErr.name != "" { - log.Print(newErr.name) - if len(newErr.notFound) != 0 { - log.Fatalf("%v", newErr.notFound) - } - return "", errors.New(newErr.name) - } - - //write to local temp file - tmpfile, err := writeToTempFile(appSpec) - if err != nil { - return "", err - } - return tmpfile, nil -} - -//writeToTempFile writes to a local temp file -func writeToTempFile(appSpec *godo.AppSpec) (string, error) { - //parse App Spec to yaml - newYaml, err := parser.ParseAppSpecToYaml(appSpec) - if err != nil { - return "", err - } - tmpfile, err := ioutil.TempFile("", "_do_app_*.yaml") - if err != nil { - return "", errors.Wrap(err, "Error in creating temp file") - } - if _, err := tmpfile.Write(newYaml); err != nil { - tmpfile.Close() - return "", errors.Wrap(err, "Error in writing to temp file") - } - if err := tmpfile.Close(); err != nil { - return "", errors.Wrap(err, "Error in closing temp file") - } - return tmpfile.Name(), nil -} - -// checkForGitAndDockerHub removes git, gitlab, github, DockerHub and DOCR images for the app name specified in the input json file -func checkForGitAndDockerHub(allFiles []parser_struct.UpdatedRepo, spec *godo.AppSpec) { - //iterate through all the files of the input and save names in a map - var nameMap = make(map[string]bool) - for val := range allFiles { - nameMap[allFiles[val].Name] = true - } - - //remove git, gitlab, github and dockerhub spec of services with unique name declared in input - for _, service := range spec.Services { - if !nameMap[service.Name] { - continue - } - service.Git = nil - service.GitLab = nil - service.GitHub = nil - service.Image = nil - } - - //remove git, gitlab, github and dockerhub spec of workers with unique name declared in input - for _, worker := range spec.Workers { - if !nameMap[worker.Name] { - continue - } - worker.Git = nil - worker.GitLab = nil - worker.GitHub = nil - worker.Image = nil - } - - //remove git, gitlab, github and dockerhub spec of Jobs with unique name declared in input - for _, job := range spec.Jobs { - if !nameMap[job.Name] { - continue - } - job.Git = nil - job.GitLab = nil - job.GitHub = nil - job.Image = nil - } -} - -func makeImageSpec(updatedRepo parser_struct.UpdatedRepo) *godo.ImageSourceSpec { - - if updatedRepo.Image.RegistryType == "" { - fmt.Println("::warning::Updating images without an ImageSourceSpec is deprecated. Please See: https://github.com/digitalocean/app_action/issues/10") - repos := strings.Split(updatedRepo.Repository, `/`) - repo := repos[len(repos)-1] - return &godo.ImageSourceSpec{ - RegistryType: "DOCR", - Repository: repo, - Tag: updatedRepo.Tag, - } - } - return &updatedRepo.Image -} - -// filterApps filters git and DockerHub apps and then updates app spec with new ImageSourceSpec -func filterApps(allFiles []parser_struct.UpdatedRepo, appSpec godo.AppSpec) AllError { - //remove all gitlab,github, git and dockerhub app info from appSpec for provided unique name component in input - checkForGitAndDockerHub(allFiles, &appSpec) - - //iterate through all the files of the input and save names in a map - var nameMap = make(map[string]bool) - for val := range allFiles { - nameMap[allFiles[val].Name] = true - } - - //iterate through all services, worker and job to update AppSpec.ImageSourceSpec based on component name declared in input - for key := range allFiles { - for _, service := range appSpec.Services { - if service.Name != allFiles[key].Name { - continue - } - service.Image = makeImageSpec(allFiles[key]) - delete(nameMap, service.Name) - } - for _, worker := range appSpec.Workers { - if worker.Name != allFiles[key].Name { - continue - } - - worker.Image = makeImageSpec(allFiles[key]) - delete(nameMap, worker.Name) - } - for _, job := range appSpec.Jobs { - if job.Name != allFiles[key].Name { - continue - } - - job.Image = makeImageSpec(allFiles[key]) - delete(nameMap, job.Name) - } - - //if functions component unique name is mentioned in the user input throw error as functions components do not support containers - for _, functions := range appSpec.Functions { - if functions.Name != allFiles[key].Name { - continue - } - - return AllError{ - name: fmt.Sprintf("Functions components in App Platform do not support containers: %s", functions.Name), - } - } - //if static sites unique name is mentioned in the user input throw error as static sites do not support containers - for _, static := range appSpec.StaticSites { - if static.Name != allFiles[key].Name { - continue - } - - return AllError{ - name: fmt.Sprintf("Static sites in App Platform do not support containers: %s", static.Name), - } - } - } - - if len(nameMap) == 0 { - return AllError{} - } - - keys := make([]string, 0, len(nameMap)) - - for k := range nameMap { - keys = append(keys, k) - } - - return AllError{ - name: "all components with following names were not found in your deployed app spec", - notFound: keys, - } -} diff --git a/main_test.go b/main_test.go deleted file mode 100644 index dfcd53e..0000000 --- a/main_test.go +++ /dev/null @@ -1,266 +0,0 @@ -package main - -import ( - "bytes" - "io/ioutil" - "log" - "os" - "testing" - - "github.com/digitalocean/app_action/internal/parser" - "github.com/digitalocean/godo" - gomock "github.com/golang/mock/gomock" - "gopkg.in/yaml.v2" -) - -//TestParseJsonInput uses custom input to check if the parseJsonInput function is working properly -func TestParseJsonInput(t *testing.T) { - temp := `[ { - "name": "frontend", - "repository": "registry.digitalocean.com//", - "tag": "latest" - }]` - allRepos, err := parser.ParseJsonInput(temp) - if err != nil { - t.Errorf("Error in parsing input json data") - } - if allRepos[0].Name != "frontend" || - allRepos[0].Repository != "registry.digitalocean.com//" || - allRepos[0].Tag != "latest" { - t.Errorf("Error in unmarshal") - } -} - -//TestCheckForGitAndDockerHub uses custom input to check if the checkForGitAndDockerHub is working -func TestCheckForGitAndDockerHub(t *testing.T) { - //sample-golang is app spec used for testing purposes - testInput, err := ioutil.ReadFile("testdata/sample-golang.yaml") - if err != nil { - t.Errorf("error in reading test file") - } - var app godo.AppSpec - err = yaml.Unmarshal(testInput, &app) - if err != nil { - t.Errorf("Error in unmarshalling test yaml") - } - if app.Services[0].Name == "web" && app.Services[0].Git.RepoCloneURL == "https://github.com/snormore/sample-golang.git" { - t.Errorf("Error in parsing test data") - } - temp := `[ { - "name": "web", - "repository": "registry.digitalocean.com//", - "tag": "latest" - }]` - allRepos, err := parser.ParseJsonInput(temp) - if err != nil { - t.Errorf(err.Error()) - } - if allRepos[0].Name != "web" || - allRepos[0].Repository != "registry.digitalocean.com//" || - allRepos[0].Tag != "latest" { - t.Errorf("error in unmarshalling input data") - } - - //check for git,github,gitlab,DOCR and dockerhub removal for app name provided in user input - checkForGitAndDockerHub(allRepos, &app) - if app.Services[0].Name == "web" && app.Services[0].Git != nil { - - t.Errorf("error in checkForGitAndDockerHub") - } - -} - -//TestFilterApps tests filterApps function using testdata/sample-golang.yaml as input -func TestFilterApps(t *testing.T) { - //sample-golang is app spec used for testing purposes - testInput, err := ioutil.ReadFile("testdata/sample-golang.yaml") - if err != nil { - t.Errorf("error in reading test file") - } - var app godo.AppSpec - err = yaml.Unmarshal(testInput, &app) - if err != nil { - t.Errorf("Error in unmarshalling test yaml") - } - if app.Services[0].Name == "web" && app.Services[0].Git.RepoCloneURL == "https://github.com/snormore/sample-golang.git" { - t.Errorf("Error in parsing test data") - } - temp := `[ { - "name": "web", - "repository": "registry.digitalocean.com//", - "tag": "latest" - }]` - - //paseJsonInput function is used to parse the input json data - allRepos, err := parser.ParseJsonInput(temp) - if err != nil { - t.Errorf(err.Error()) - } - if allRepos[0].Name != "web" || - allRepos[0].Repository != "registry.digitalocean.com//" || - allRepos[0].Tag != "latest" { - t.Errorf("error in unmarshalling input data") - } - - //filterApps function is used to filter the app spec based on the app name provided in user input - aErr := filterApps(allRepos, app) - if aErr.name != "" { - t.Errorf(aErr.name) - } - if app.Services[0].Image.RegistryType != "DOCR" || - app.Services[0].Image.Repository != "" || - app.Services[0].Image.Tag != "latest" { - t.Errorf("error in filterApps") - } -} - -//TestUpdateLocalAppSpec tests all the non doctl dependent functions -func TestUpdateLocalAppSpec(t *testing.T) { - t1Input := `[{ - "name": "web", - "repository": "registry.digitalocean.com/sample-go/add_sample", - "tag": "latest" - } - ]` - - //temp is the deployment spec scraped from actual deployment used for testing purposes - testInput, err := ioutil.ReadFile("testdata/temp") - if err != nil { - t.Errorf("error in reading test file") - } - - a := &action{ - appName: "sample-golang", - images: t1Input, - } - - allRepos, err := parser.ParseJsonInput(t1Input) - if err != nil { - t.Errorf(err.Error()) - } - //parseDeploymentSpec - appSpec, err := parser.ParseDeploymentSpec(testInput) - if err != nil { - t.Errorf(err.Error()) - } - - //test for all functions which are independent of doctl - file, err := a.updateLocalAppSpec(allRepos, appSpec[0].Spec) - if err != nil { - t.Errorf(err.Error()) - } - f1, err1 := ioutil.ReadFile(file) - if err1 != nil { - log.Fatal(err1) - } - - //read updatedAppSpec.yaml to compare the final output with expected output - f2, err2 := ioutil.ReadFile("testdata/updatedAppSpec.yaml") - if err2 != nil { - log.Fatal(err2) - } - if bytes.Equal(f1, f2) == false { - t.Errorf("error in parsing app spec yaml file") - } - os.Remove(file) -} - -func Test_run(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - appID := "3a91c9e3-253f-4c75-99e5-b81b9c3f744f" - activeDeploymentID := "fac38395-30f3-4c59-9e6c-3a67523f51de" - sampleImages := `[{ - "name": "web", - "repository": "registry.digitalocean.com/sample-go/add_sample", - "tag": "3.2.1" - } - ]` - - //parse input data - sampleImagesRepo, err := parser.ParseJsonInput(sampleImages) - if err != nil { - t.Errorf(err.Error()) - } - - do := NewMockDoctlClient(ctrl) - do.EXPECT().RetrieveAppID(gomock.Eq("sample-golang")).Return(appID, nil) - do.EXPECT().RetrieveActiveDeploymentID(gomock.Eq(appID)).Return(activeDeploymentID, nil) - //temp is the deployment spec scraped from actual deployment used for testing purposes - testInput, err := ioutil.ReadFile("testdata/temp") - if err != nil { - t.Errorf("error in reading test file") - } - //parse testInput data - deployments, err := parser.ParseDeploymentSpec(testInput) - if err != nil { - t.Errorf(err.Error()) - } - do.EXPECT().RetrieveActiveDeployment(gomock.Eq(activeDeploymentID), gomock.Eq(appID), gomock.Eq(sampleImages)).Return(sampleImagesRepo, deployments[0].Spec, nil) - do.EXPECT().UpdateAppPlatformAppSpec(gomock.Any(), appID).Return(nil) - - do.EXPECT().IsDeployed(appID).Return(nil) - - a := &action{ - appName: "sample-golang", - images: sampleImages, - client: do, - } - - err = a.run() - if err != nil { - t.Fail() - } -} - -func Test_run_with_ImageSourceSpec(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - appID := "2a91c9e3-253f-4c75-99e5-b81b9c3f744f" - activeDeploymentID := "fac38395-30f3-4c59-9e6c-3a67523f51de" - sampleImages := `[{ - "name": "web", - "image":{ - "registry_type": "DOCR", - "repository": "sample-go/add_sample", - "tag": "3.20.2" - } - } - ]` - - //parse input data - sampleImagesRepo, err := parser.ParseJsonInput(sampleImages) - if err != nil { - t.Errorf(err.Error()) - } - - do := NewMockDoctlClient(ctrl) - do.EXPECT().RetrieveAppID(gomock.Eq("sample-golang")).Return(appID, nil) - do.EXPECT().RetrieveActiveDeploymentID(gomock.Eq(appID)).Return(activeDeploymentID, nil) - //temp is the deployment spec scraped from actual deployment used for testing purposes - testInput, err := ioutil.ReadFile("testdata/temp") - if err != nil { - t.Errorf("error in reading test file") - } - //parse testInput data - deployments, err := parser.ParseDeploymentSpec(testInput) - if err != nil { - t.Errorf(err.Error()) - } - do.EXPECT().RetrieveActiveDeployment(gomock.Eq(activeDeploymentID), gomock.Eq(appID), gomock.Eq(sampleImages)).Return(sampleImagesRepo, deployments[0].Spec, nil) - do.EXPECT().UpdateAppPlatformAppSpec(gomock.Any(), appID).Return(nil) - do.EXPECT().IsDeployed(appID).Return(nil) - - a := &action{ - appName: "sample-golang", - images: sampleImages, - client: do, - } - - err = a.run() - if err != nil { - t.Fail() - } -} diff --git a/mock.go b/mock.go deleted file mode 100644 index 4f97130..0000000 --- a/mock.go +++ /dev/null @@ -1,167 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: main.go - -// Package main is a generated GoMock package. -package main - -import ( - parser_struct "github.com/digitalocean/app_action/internal/parser_struct" - godo "github.com/digitalocean/godo" - gomock "github.com/golang/mock/gomock" - reflect "reflect" -) - -// MockDoctlClient is a mock of DoctlClient interface -type MockDoctlClient struct { - ctrl *gomock.Controller - recorder *MockDoctlClientMockRecorder -} - -// MockDoctlClientMockRecorder is the mock recorder for MockDoctlClient -type MockDoctlClientMockRecorder struct { - mock *MockDoctlClient -} - -// NewMockDoctlClient creates a new mock instance -func NewMockDoctlClient(ctrl *gomock.Controller) *MockDoctlClient { - mock := &MockDoctlClient{ctrl: ctrl} - mock.recorder = &MockDoctlClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockDoctlClient) EXPECT() *MockDoctlClientMockRecorder { - return m.recorder -} - -// ListDeployments mocks base method -func (m *MockDoctlClient) ListDeployments(appID string) ([]godo.Deployment, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListDeployments", appID) - ret0, _ := ret[0].([]godo.Deployment) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListDeployments indicates an expected call of ListDeployments -func (mr *MockDoctlClientMockRecorder) ListDeployments(appID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListDeployments", reflect.TypeOf((*MockDoctlClient)(nil).ListDeployments), appID) -} - -// RetrieveActiveDeploymentID mocks base method -func (m *MockDoctlClient) RetrieveActiveDeploymentID(appID string) (string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RetrieveActiveDeploymentID", appID) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// RetrieveActiveDeploymentID indicates an expected call of RetrieveActiveDeploymentID -func (mr *MockDoctlClientMockRecorder) RetrieveActiveDeploymentID(appID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RetrieveActiveDeploymentID", reflect.TypeOf((*MockDoctlClient)(nil).RetrieveActiveDeploymentID), appID) -} - -// RetrieveActiveDeployment mocks base method -func (m *MockDoctlClient) RetrieveActiveDeployment(deploymentID, appID, input string) ([]parser_struct.UpdatedRepo, *godo.AppSpec, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RetrieveActiveDeployment", deploymentID, appID, input) - ret0, _ := ret[0].([]parser_struct.UpdatedRepo) - ret1, _ := ret[1].(*godo.AppSpec) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// RetrieveActiveDeployment indicates an expected call of RetrieveActiveDeployment -func (mr *MockDoctlClientMockRecorder) RetrieveActiveDeployment(deploymentID, appID, input interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RetrieveActiveDeployment", reflect.TypeOf((*MockDoctlClient)(nil).RetrieveActiveDeployment), deploymentID, appID, input) -} - -// UpdateAppPlatformAppSpec mocks base method -func (m *MockDoctlClient) UpdateAppPlatformAppSpec(tmpfile, appID string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateAppPlatformAppSpec", tmpfile, appID) - ret0, _ := ret[0].(error) - return ret0 -} - -// UpdateAppPlatformAppSpec indicates an expected call of UpdateAppPlatformAppSpec -func (mr *MockDoctlClientMockRecorder) UpdateAppPlatformAppSpec(tmpfile, appID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAppPlatformAppSpec", reflect.TypeOf((*MockDoctlClient)(nil).UpdateAppPlatformAppSpec), tmpfile, appID) -} - -// CreateDeployments mocks base method -func (m *MockDoctlClient) CreateDeployments(appID string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateDeployments", appID) - ret0, _ := ret[0].(error) - return ret0 -} - -// CreateDeployments indicates an expected call of CreateDeployments -func (mr *MockDoctlClientMockRecorder) CreateDeployments(appID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateDeployments", reflect.TypeOf((*MockDoctlClient)(nil).CreateDeployments), appID) -} - -// RetrieveFromDigitalocean mocks base method -func (m *MockDoctlClient) RetrieveFromDigitalocean() ([]godo.App, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RetrieveFromDigitalocean") - ret0, _ := ret[0].([]godo.App) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// RetrieveFromDigitalocean indicates an expected call of RetrieveFromDigitalocean -func (mr *MockDoctlClientMockRecorder) RetrieveFromDigitalocean() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RetrieveFromDigitalocean", reflect.TypeOf((*MockDoctlClient)(nil).RetrieveFromDigitalocean)) -} - -// RetrieveAppID mocks base method -func (m *MockDoctlClient) RetrieveAppID(appName string) (string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RetrieveAppID", appName) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// RetrieveAppID indicates an expected call of RetrieveAppID -func (mr *MockDoctlClientMockRecorder) RetrieveAppID(appName interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RetrieveAppID", reflect.TypeOf((*MockDoctlClient)(nil).RetrieveAppID), appName) -} - -// IsDeployed mocks base method -func (m *MockDoctlClient) IsDeployed(appID string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "IsDeployed", appID) - ret0, _ := ret[0].(error) - return ret0 -} - -// IsDeployed indicates an expected call of IsDeployed -func (mr *MockDoctlClientMockRecorder) IsDeployed(appID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsDeployed", reflect.TypeOf((*MockDoctlClient)(nil).IsDeployed), appID) -} - -// Deploy mocks base method -func (m *MockDoctlClient) Deploy(input, appName string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Deploy", input, appName) - ret0, _ := ret[0].(error) - return ret0 -} - -// Deploy indicates an expected call of Deploy -func (mr *MockDoctlClientMockRecorder) Deploy(input, appName interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Deploy", reflect.TypeOf((*MockDoctlClient)(nil).Deploy), input, appName) -} diff --git a/testdata/sample-golang.yaml b/testdata/sample-golang.yaml deleted file mode 100644 index 6313294..0000000 --- a/testdata/sample-golang.yaml +++ /dev/null @@ -1,12 +0,0 @@ -name: sample-golang -region: nyc -services: -- git: - branch: main - repo_clone_url: https://github.com/snormore/sample-golang.git - http_port: 8080 - instance_count: 1 - instance_size_slug: basic-xxs - name: web - routes: - - path: / diff --git a/testdata/temp b/testdata/temp deleted file mode 100644 index 0ad919a..0000000 --- a/testdata/temp +++ /dev/null @@ -1,131 +0,0 @@ -[ - { - "id": "8801ffe3-8afe-4d5e-a700-c2f737b81312", - "spec": { - "name": "sample-golang", - "services": [ - { - "name": "web", - "image": { - "registry_type": "DOCR", - "repository": "add_sample", - "tag": "latest" - }, - "instance_size_slug": "basic-xxs", - "instance_count": 1, - "http_port": 8080, - "routes": [ - { - "path": "/" - } - ] - } - ], - "region": "nyc" - }, - "services": [ - { - "name": "web" - } - ], - "phase_last_updated_at": "2021-07-21T21:49:07Z", - "created_at": "2021-07-21T21:48:19Z", - "updated_at": "2021-07-21T21:49:07Z", - "cause": "manual", - "progress": { - "success_steps": 6, - "total_steps": 6, - "steps": [ - { - "name": "build", - "status": "SUCCESS", - "steps": [ - { - "name": "initialize", - "status": "SUCCESS", - "started_at": "2021-07-21T21:48:35.512775397Z", - "ended_at": "2021-07-21T21:48:35.582262631Z" - }, - { - "name": "components", - "status": "SUCCESS", - "steps": [ - { - "name": "web", - "status": "SUCCESS", - "started_at": "0001-01-01T00:00:00Z", - "ended_at": "0001-01-01T00:00:00Z", - "reason": { - "code": "PreviousBuildReused", - "message": "Your previous build was reused." - }, - "component_name": "web", - "message_base": "Building service" - } - ], - "started_at": "2021-07-21T21:48:35.582287555Z", - "ended_at": "2021-07-21T21:48:35.582770619Z" - } - ], - "started_at": "2021-07-21T21:48:35.512747237Z", - "ended_at": "2021-07-21T21:48:35.585305076Z" - }, - { - "name": "deploy", - "status": "SUCCESS", - "steps": [ - { - "name": "initialize", - "status": "SUCCESS", - "started_at": "2021-07-21T21:48:42.969073936Z", - "ended_at": "2021-07-21T21:48:43.234490888Z" - }, - { - "name": "components", - "status": "SUCCESS", - "steps": [ - { - "name": "web", - "status": "SUCCESS", - "steps": [ - { - "name": "deploy", - "status": "SUCCESS", - "started_at": "0001-01-01T00:00:00Z", - "ended_at": "0001-01-01T00:00:00Z", - "component_name": "web", - "message_base": "Deploying service" - }, - { - "name": "wait", - "status": "SUCCESS", - "started_at": "0001-01-01T00:00:00Z", - "ended_at": "0001-01-01T00:00:00Z", - "component_name": "web", - "message_base": "Waiting for service" - } - ], - "started_at": "0001-01-01T00:00:00Z", - "ended_at": "0001-01-01T00:00:00Z", - "component_name": "web" - } - ], - "started_at": "2021-07-21T21:48:43.234527628Z", - "ended_at": "2021-07-21T21:49:04.971628049Z" - }, - { - "name": "finalize", - "status": "SUCCESS", - "started_at": "2021-07-21T21:49:05.016891179Z", - "ended_at": "2021-07-21T21:49:07.146845266Z" - } - ], - "started_at": "2021-07-21T21:48:42.969045812Z", - "ended_at": "2021-07-21T21:49:07.146946671Z" - } - ] - }, - "phase": "ACTIVE", - "tier_slug": "basic" - } -] diff --git a/testdata/updatedAppSpec.yaml b/testdata/updatedAppSpec.yaml deleted file mode 100644 index b50504d..0000000 --- a/testdata/updatedAppSpec.yaml +++ /dev/null @@ -1,13 +0,0 @@ -name: sample-golang -region: nyc -services: -- http_port: 8080 - image: - registry_type: DOCR - repository: add_sample - tag: latest - instance_count: 1 - instance_size_slug: basic-xxs - name: web - routes: - - path: / diff --git a/utils/apps.go b/utils/apps.go new file mode 100644 index 0000000..53c7ece --- /dev/null +++ b/utils/apps.go @@ -0,0 +1,38 @@ +package utils + +import ( + "context" + "fmt" + + "github.com/digitalocean/godo" +) + +// FindAppByName returns the app with the given name, or nil if it does not exist. +func FindAppByName(ctx context.Context, ap godo.AppsService, name string) (*godo.App, error) { + opt := &godo.ListOptions{} + for { + apps, resp, err := ap.List(ctx, opt) + if err != nil { + return nil, fmt.Errorf("failed to list apps: %w", err) + } + + for _, a := range apps { + if a.GetSpec().GetName() == name { + return a, nil + } + } + + if resp.Links == nil || resp.Links.IsLastPage() { + break + } + + page, err := resp.Links.CurrentPage() + if err != nil { + return nil, fmt.Errorf("failed to get current page: %w", err) + } + + // set the page we want for the next request + opt.Page = page + 1 + } + return nil, nil +} diff --git a/utils/apps_test.go b/utils/apps_test.go new file mode 100644 index 0000000..c4aee77 --- /dev/null +++ b/utils/apps_test.go @@ -0,0 +1,48 @@ +package utils + +import ( + "context" + "errors" + "testing" + + "github.com/digitalocean/godo" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestFindAppByName(t *testing.T) { + app1 := &godo.App{Spec: &godo.AppSpec{Name: "app1"}} + app2 := &godo.App{Spec: &godo.AppSpec{Name: "app2"}} + + as := &mockedAppsService{} + as.On("List", mock.Anything, &godo.ListOptions{Page: 0}).Return([]*godo.App{app1}, &godo.Response{Links: &godo.Links{Pages: &godo.Pages{Next: "2"}}}, nil).Times(3) + as.On("List", mock.Anything, &godo.ListOptions{Page: 2}).Return([]*godo.App{app2}, &godo.Response{}, nil).Times(2) + + app, err := FindAppByName(context.Background(), as, "app1") + require.NoError(t, err) + require.Equal(t, app1, app) + + app, err = FindAppByName(context.Background(), as, "app2") + require.NoError(t, err) + require.Equal(t, app2, app) + + app, err = FindAppByName(context.Background(), as, "app3") + require.NoError(t, err) + require.Nil(t, app) + + as.On("List", mock.Anything, mock.Anything).Return([]*godo.App{}, &godo.Response{}, errors.New("an error")).Once() + _, err = FindAppByName(context.Background(), as, "app4") + require.Error(t, err) + + as.AssertExpectations(t) +} + +type mockedAppsService struct { + godo.AppsService + mock.Mock +} + +func (m *mockedAppsService) List(ctx context.Context, opt *godo.ListOptions) ([]*godo.App, *godo.Response, error) { + args := m.Called(ctx, opt) + return args.Get(0).([]*godo.App), args.Get(1).(*godo.Response), args.Error(2) +} diff --git a/utils/inputs.go b/utils/inputs.go new file mode 100644 index 0000000..b5ae492 --- /dev/null +++ b/utils/inputs.go @@ -0,0 +1,38 @@ +package utils + +import ( + "fmt" + "strconv" + + gha "github.com/sethvargo/go-githubactions" +) + +// InputAsString parses the input as a string and sets the target. +func InputAsString(a *gha.Action, input string, required bool, target *string) error { + str := a.GetInput(input) + if str == "" && required { + return fmt.Errorf("input %q is required", input) + } + *target = str + return nil +} + +// InputAsBool parses the input as a boolean and sets the target. +func InputAsBool(a *gha.Action, input string, required bool, target *bool) error { + str := a.GetInput(input) + if str == "" { + if required { + return fmt.Errorf("input %q is required", input) + } + + // If the input is not required, we default to false. + *target = false + return nil + } + val, err := strconv.ParseBool(str) + if err != nil { + return fmt.Errorf("failed to parse %q as a boolean: %v", input, err) + } + *target = val + return nil +} diff --git a/utils/inputs_test.go b/utils/inputs_test.go new file mode 100644 index 0000000..1ee74ec --- /dev/null +++ b/utils/inputs_test.go @@ -0,0 +1,113 @@ +package utils + +import ( + "testing" + + gha "github.com/sethvargo/go-githubactions" + "github.com/stretchr/testify/require" +) + +func TestInputAsString(t *testing.T) { + tests := []struct { + name string + input string + required bool + expected string + err bool + }{{ + name: "success", + input: "input", + required: true, + expected: "value", + }, { + name: "required", + input: "empty", + required: true, + err: true, + }, { + name: "optional", + input: "empty", + required: false, + expected: "", + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + a := gha.New(gha.WithGetenv(func(k string) string { + switch k { + case "INPUT_INPUT": + return "value" + case "INPUT_EMPTY": + return "" + default: + return "unexpected" + } + })) + target := new(string) + err := InputAsString(a, test.input, test.required, target) + if err != nil && !test.err { + require.NoError(t, err) + } + if err == nil && test.err { + require.Error(t, err) + } + require.Equal(t, test.expected, *target) + }) + } +} + +func TestInputAsBool(t *testing.T) { + tests := []struct { + name string + input string + required bool + expected bool + err bool + }{{ + name: "success", + input: "input", + required: true, + expected: true, + }, { + name: "required", + input: "empty", + required: true, + err: true, + }, { + name: "optional", + input: "empty", + required: false, + expected: false, + }, { + name: "invalid", + input: "invalid", + required: true, + err: true, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + a := gha.New(gha.WithGetenv(func(k string) string { + switch k { + case "INPUT_INPUT": + return "true" + case "INPUT_EMPTY": + return "" + case "INPUT_INVALID": + return "invalid" + default: + return "unexpected" + } + })) + target := new(bool) + err := InputAsBool(a, test.input, test.required, target) + if err != nil && !test.err { + require.NoError(t, err) + } + if err == nil && test.err { + require.Error(t, err) + } + require.Equal(t, test.expected, *target) + }) + } +} diff --git a/utils/preview.go b/utils/preview.go new file mode 100644 index 0000000..b35b855 --- /dev/null +++ b/utils/preview.go @@ -0,0 +1,71 @@ +package utils + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "strings" + + "github.com/digitalocean/godo" + gha "github.com/sethvargo/go-githubactions" +) + +// SanitizeSpecForPullRequestPreview modifies the given AppSpec to be suitable for a pull request preview. +// This includes: +// - Setting a unique app name. +// - Unsetting any domains. +// - Unsetting any alerts. +// - Setting the reference of all relevant components to point to the PRs ref. +func SanitizeSpecForPullRequestPreview(spec *godo.AppSpec, ghCtx *gha.GitHubContext) error { + repoOwner, repo := ghCtx.Repo() + + // Override app name to something that identifies this PR. + spec.Name = GenerateAppName(repoOwner, repo, ghCtx.RefName) + + // Unset any domains as those might collide with production apps. + spec.Domains = nil + + // Unset any alerts as those will be delivered wrongly anyway. + spec.Alerts = nil + + // Override the reference of all relevant components to point to the PRs ref. + if err := godo.ForEachAppSpecComponent(spec, func(c godo.AppBuildableComponentSpec) error { + // TODO: Should this also deal with raw Git sources? + ref := c.GetGitHub() + if ref == nil || ref.Repo != fmt.Sprintf("%s/%s", repoOwner, repo) { + // Skip Github refs pointing to other repos. + return nil + } + // We manually kick new deployments so we can watch their status better. + ref.DeployOnPush = false + ref.Branch = ghCtx.HeadRef + return nil + }); err != nil { + return fmt.Errorf("failed to sanitize buildable components: %w", err) + } + return nil +} + +// GenerateAppName generates a unique app name based on the repoOwner, repo, and ref. +func GenerateAppName(repoOwner, repo, ref string) string { + baseName := fmt.Sprintf("%s-%s-%s", repoOwner, repo, ref) + baseName = strings.ToLower(baseName) + baseName = strings.NewReplacer( + "/", "-", // Replace slashes. + ":", "", // Colons are illegal. + "_", "-", // Underscores are illegal. + ).Replace(baseName) + + // Generate a hash from the unique enumeration of repoOwner, repo, and ref. + hasher := sha256.New() + hasher.Write([]byte(baseName)) + suffix := "-" + hex.EncodeToString(hasher.Sum(nil))[:8] + + // App names must be at most 32 characters. + limit := 32 - len(suffix) + if len(baseName) < limit { + limit = len(baseName) + } + + return baseName[:limit] + suffix +} diff --git a/utils/preview_test.go b/utils/preview_test.go new file mode 100644 index 0000000..0a3b8ad --- /dev/null +++ b/utils/preview_test.go @@ -0,0 +1,146 @@ +package utils + +import ( + "testing" + + "github.com/digitalocean/godo" + gha "github.com/sethvargo/go-githubactions" + "github.com/stretchr/testify/require" +) + +func TestSanitizeSpecForPullRequestPreview(t *testing.T) { + spec := &godo.AppSpec{ + Name: "foo", + Domains: []*godo.AppDomainSpec{{Domain: "foo.com"}}, + Alerts: []*godo.AppAlertSpec{{Value: 80}}, + Services: []*godo.AppServiceSpec{{ + Name: "web", + GitHub: &godo.GitHubSourceSpec{ + Repo: "foo/bar", + Branch: "main", + DeployOnPush: true, + }, + }, { + Name: "web2", + GitHub: &godo.GitHubSourceSpec{ + Repo: "another/repo", + Branch: "main", + DeployOnPush: true, + }, + }}, + Workers: []*godo.AppWorkerSpec{{ + Name: "worker", + GitHub: &godo.GitHubSourceSpec{ + Repo: "foo/bar", + Branch: "main", + DeployOnPush: true, + }, + }}, + Jobs: []*godo.AppJobSpec{{ + Name: "job", + GitHub: &godo.GitHubSourceSpec{ + Repo: "foo/bar", + Branch: "main", + DeployOnPush: true, + }, + }}, + Functions: []*godo.AppFunctionsSpec{{ + Name: "function", + GitHub: &godo.GitHubSourceSpec{ + Repo: "foo/bar", + Branch: "main", + DeployOnPush: true, + }, + }}, + } + + ghCtx := &gha.GitHubContext{ + Repository: "foo/bar", + RefName: "3/merge", + HeadRef: "feature-branch", + } + + err := SanitizeSpecForPullRequestPreview(spec, ghCtx) + require.NoError(t, err) + + expected := &godo.AppSpec{ + Name: "foo-bar-3-merge-adb46530", // Name got generated. + // Domains and alerts got removed. + Services: []*godo.AppServiceSpec{{ + Name: "web", + GitHub: &godo.GitHubSourceSpec{ + Repo: "foo/bar", + Branch: "feature-branch", // Branch got updated. + DeployOnPush: false, // DeployOnPush got set to false. + }, + }, { + Name: "web2", + GitHub: &godo.GitHubSourceSpec{ + Repo: "another/repo", // No change. + Branch: "main", + DeployOnPush: true, + }, + }}, + Workers: []*godo.AppWorkerSpec{{ + Name: "worker", + GitHub: &godo.GitHubSourceSpec{ + Repo: "foo/bar", + Branch: "feature-branch", // Branch got updated. + DeployOnPush: false, // DeployOnPush got set to false. + }, + }}, + Jobs: []*godo.AppJobSpec{{ + Name: "job", + GitHub: &godo.GitHubSourceSpec{ + Repo: "foo/bar", + Branch: "feature-branch", // Branch got updated. + DeployOnPush: false, // DeployOnPush got set to false. + }, + }}, + Functions: []*godo.AppFunctionsSpec{{ + Name: "function", + GitHub: &godo.GitHubSourceSpec{ + Repo: "foo/bar", + Branch: "feature-branch", // Branch got updated. + DeployOnPush: false, // DeployOnPush got set to false. + }, + }}, + } + + require.Equal(t, expected, spec) +} + +func TestGenerateAppName(t *testing.T) { + tests := []struct { + name string + repoOwner string + repo string + ref string + expected string + }{{ + name: "success", + repoOwner: "foo", + repo: "bar", + ref: "3/merge", + expected: "foo-bar-3-merge-adb46530", + }, { + name: "long repo owner", + repoOwner: "thisisanextremelylongrepohostname", + repo: "bar", + ref: "3/merge", + expected: "thisisanextremelylongre-92da974b", + }, { + name: "long repo", + repoOwner: "foo", + repo: "thisisanextremelylongreponame", + ref: "3/merge", + expected: "foo-thisisanextremelylo-67dbc40d", + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := GenerateAppName(test.repoOwner, test.repo, test.ref) + require.Equal(t, test.expected, got) + }) + } +} diff --git a/vendor/github.com/digitalocean/godo/.gitignore b/vendor/github.com/digitalocean/godo/.gitignore deleted file mode 100644 index 48b8bf9..0000000 --- a/vendor/github.com/digitalocean/godo/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vendor/ diff --git a/vendor/github.com/digitalocean/godo/.whitesource b/vendor/github.com/digitalocean/godo/.whitesource deleted file mode 100644 index 6b6a735..0000000 --- a/vendor/github.com/digitalocean/godo/.whitesource +++ /dev/null @@ -1,8 +0,0 @@ -{ - "checkRunSettings": { - "vulnerableCheckRunConclusionLevel": "failure" - }, - "issueSettings": { - "minSeverityLevel": "LOW" - } -} diff --git a/vendor/github.com/digitalocean/godo/1-click.go b/vendor/github.com/digitalocean/godo/1-click.go deleted file mode 100644 index 2e07cf6..0000000 --- a/vendor/github.com/digitalocean/godo/1-click.go +++ /dev/null @@ -1,81 +0,0 @@ -package godo - -import ( - "context" - "fmt" - "net/http" -) - -const oneClickBasePath = "v2/1-clicks" - -// OneClickService is an interface for interacting with 1-clicks with the -// DigitalOcean API. -// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/1-Click-Applications -type OneClickService interface { - List(context.Context, string) ([]*OneClick, *Response, error) - InstallKubernetes(context.Context, *InstallKubernetesAppsRequest) (*InstallKubernetesAppsResponse, *Response, error) -} - -var _ OneClickService = &OneClickServiceOp{} - -// OneClickServiceOp interfaces with 1-click endpoints in the DigitalOcean API. -type OneClickServiceOp struct { - client *Client -} - -// OneClick is the structure of a 1-click -type OneClick struct { - Slug string `json:"slug"` - Type string `json:"type"` -} - -// OneClicksRoot is the root of the json payload that contains a list of 1-clicks -type OneClicksRoot struct { - List []*OneClick `json:"1_clicks"` -} - -// InstallKubernetesAppsRequest represents a request required to install 1-click kubernetes apps -type InstallKubernetesAppsRequest struct { - Slugs []string `json:"addon_slugs"` - ClusterUUID string `json:"cluster_uuid"` -} - -// InstallKubernetesAppsResponse is the response of a kubernetes 1-click install request -type InstallKubernetesAppsResponse struct { - Message string `json:"message"` -} - -// List returns a list of the available 1-click applications. -func (ocs *OneClickServiceOp) List(ctx context.Context, oneClickType string) ([]*OneClick, *Response, error) { - path := fmt.Sprintf(`%s?type=%s`, oneClickBasePath, oneClickType) - - req, err := ocs.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(OneClicksRoot) - resp, err := ocs.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.List, resp, nil -} - -// InstallKubernetes installs an addon on a kubernetes cluster -func (ocs *OneClickServiceOp) InstallKubernetes(ctx context.Context, install *InstallKubernetesAppsRequest) (*InstallKubernetesAppsResponse, *Response, error) { - path := fmt.Sprintf(oneClickBasePath + "/kubernetes") - - req, err := ocs.client.NewRequest(ctx, http.MethodPost, path, install) - if err != nil { - return nil, nil, err - } - - responseMessage := new(InstallKubernetesAppsResponse) - resp, err := ocs.client.Do(ctx, req, responseMessage) - if err != nil { - return nil, resp, err - } - return responseMessage, resp, err -} diff --git a/vendor/github.com/digitalocean/godo/CHANGELOG.md b/vendor/github.com/digitalocean/godo/CHANGELOG.md deleted file mode 100644 index 21c2470..0000000 --- a/vendor/github.com/digitalocean/godo/CHANGELOG.md +++ /dev/null @@ -1,732 +0,0 @@ -# Change Log - -## [v1.119.0] - 2024-07-24 - -- #704 - @ElanHasson - APPS-9133 - Add support for OPENSEARCH as a database engine option -- #703 - @dependabot[bot] - Bump github.com/hashicorp/go-retryablehttp from 0.7.4 to 0.7.7 -- #699 - @ElanHasson - APPS-8790 Add support to App Platform Log Forwarding for an OpenSearch DBaaS cluster destination. - -## [v1.118.0] - 2024-06-04 - -**Note**: This release contains features in closed beta (#700). - -- #701 - @llDrLove - Rename control plane permission to control plane firewall -- #700 - @bbassingthwaite - Add ProxyProtocol to LoadBalancer HealthCheck - -## [v1.117.0] - 2024-06-04 - -- #696 - @llDrLove - Support specifying control plane firewall rules when creating or updating DOKS clusters -- #697 - @asaha2 - Add support for lb internal network type -- #695 - @ElanHasson - APPS-8732 - Update documentation on App Platform OpenSearch endpoint structure. -- #692 - @ElanHasson - APPS-8732 - Add OpenSearch as a Log Destination for App Platform. - -## [v1.116.0] - 2024-05-16 - -- #693 - @guptado - Introduce VPC peering methods - -## [v1.115.0] - 2024-05-08 - -- #688 - @asaha2 - load balancers: support glb active-passive fail-over settings, currently in closed beta - -## [v1.114.0] - 2024-04-12 - -- #686 - @greeshmapill - APPS-8386: Add comments to mark deprecation of unused instance size fields -- #685 - @jcodybaker - APPS-8711: container termination controls -- #682 - @dependabot[bot] - Bump golang.org/x/net from 0.17.0 to 0.23.0 - -## [v1.113.0] - 2024-04-12 - -- #679 - @bhardwajRahul - Enable ui_connection parameter for Opensearch -- #678 - @bhardwajRahul - Enable Opensearch option in Godo - -## [v1.112.0] - 2024-04-08 - -- #672 - @dependabot[bot] - Bump google.golang.org/protobuf from 1.28.0 to 1.33.0 -- #675 - @bhardwajRahul - Add ListDatabaseEvents to Godo - -## [v1.111.0] - 2024-04-02 - -- #674 - @asaha2 - load balancers: introduce glb settings in godo, currently in closed beta - -## [v1.110.0] - 2024-03-14 - -- #667 - @dwilsondo - Include DBaaS metrics credential endpoint operations -- #670 - @guptado - [NETPROD-3583] Added name param in ListOption to get resource by name -- #671 - @greeshmapill - APPS-8383: Add deprecation intent and bandwidth allowance to app instance size spec - -## [v1.109.0] - 2024-02-09 - -- #668 - @greeshmapill - APPS-8315: Update app instance size spec -- #665 - @jcodybaker - APPS-8263: methods for managing App Platform dev DBs -- #663 - @dwilsondo - Include replica connection info on DBaaS clusters & DBaaS PG pools -- #662 - @ddatta-do - load balancer : add regional network as new LB type - -## [v1.108.0] - 2024-01-17 - -- #660 - @dweinshenker - Enable CRUD operations for replicas with storage_size_mib - -## [v1.107.0] - 2023-12-07 - -- #658 - @markusthoemmes - APPS-8033 Add the RUN_RESTARTED log type -- #656 - @dweinshenker - Enhancement: add database user update -- #657 - @markusthoemmes - apps: Add registry_credentials field, GHCR registry type and the egress spec - -## [v1.106.0] - 2023-11-14 - -- #654 - @dweinshenker - Remove unclean_leader_election_enable for topic configuration - -## [v1.105.1] - 2023-11-07 - -- #652 - @andrewsomething - Retry on HTTP/2 internal errors. -- #648 - @alexandear - test: use fmt.Fprintf instead of fmt.Fprintf(fmt.Sprintf(...)) -- #651 - @alexandear - test: Replace deprecated io/ioutil with io -- #647 - @alexandear - test: add missing error check - -## [v1.105.0] - 2023-10-16 - -- #643 - @dweinshenker - Add support for scalable storage on database clusters -- #641 - @dweinshenker - Fix Kafka Partition Count -- #645 - @gregmankes - APPS-7325 - update app godo spec -- #642 - @dependabot[bot] - Bump golang.org/x/net from 0.7.0 to 0.17.0 - -## [v1.104.1] - 2023-10-10 - -* #640 - @andrewsomething - Drop required Go version to 1.20 and document policy. -* #640 - @andrewsomething - Fix library version. - -## [v1.104.0] - 2023-10-10 - -- #637 - @mikesmithgh - chore: change uptime alert comparison type -- #638 - @markusthoemmes - APPS-7700 Add ability to specify digest for an image - -## [v1.103.0] - 2023-10-03 - -- #635 - @andrewsomething - Bump github.com/stretchr/testify to v1.8.4 -- #634 - @andrewsomething - Bump Go version to v1.21.0 -- #632 - @danaelhe - Make Retrys by Default for NewFromToken() -- #633 - @dwilsondo - Add DBaaS engine Kafka -- #621 - @testwill - chore: use fmt.Fprintf instead of fmt.Fprint(fmt.Sprintf(...)) - -## [v1.102.1] - 2023-08-17 - -- #629 - @andrewsomething - Provide a custom retryablehttp.ErrorHandler for more consistent returns using retries. - -## [v1.102.0] - 2023-08-14 - -- #624 - @danaelhe - Update README.md with Retryable Info -- #626 - @andrewsomething - Allow configuring go-retryablehttp.Logger -- #625 - @andrewsomething - Export the HTTP client - -## [v1.101.0] - 2023-08-09 - -- #619 - @danaelhe - Add retryablehttp Client Option - -## [v1.100.0] - 2023-07-20 - -- #618 - @asaha - load balancers: introduce new type field -- #620 - @andrewsomething - account: add name field. - -## [v1.99.0] - 2023-04-24 - -- #616 - @bentranter - Bump CI version for Go 1.20 -- #615 - @bentranter - Remove beta support for tokens API -- #604 - @dvigueras - Add support for "Validate a Container Registry Name" -- #613 - @ibilalkayy - updated the README file by showing up the build status icon - -## [v1.98.0] - 2023-03-09 - -- #608 - @anitgandhi - client: don't process body upon 204 response -- #607 - @gregmankes - add apps rewrites/redirects to app spec - -## [v1.97.0] - 2023-02-10 - -- #601 - @jcodybaker - APPS-6813: update app platform - pending_deployment + timing -- #602 - @jcodybaker - Use App Platform active deployment for GetLogs if not specified - -## [v1.96.0] - 2023-01-23 - -- #599 - @markpaulson - Adding PromoteReplicaToPrimary to client interface. - -## [v1.95.0] - 2023-01-23 - -- #595 - @dweinshenker - Add UpgradeMajorVersion to godo - -## [v1.94.0] - 2022-01-23 - -- #596 - @DMW2151 - DBAAS-3906: Include updatePool for DB Clusters -- #593 - @danaelhe - Add Uptime Checks and Alerts Support - -## [v1.93.0] - 2022-12-15 - -- #591 - @andrewsomething - tokens: Add initial support for new API. - -## [v1.92.0] - 2022-12-14 - -- #589 - @wez470 - load-balancers: Minor doc fixup -- #585 - @StephenVarela - Add firewall support for load balancers -- #587 - @StephenVarela - Support new http alerts for load balancers -- #586 - @andrewsomething - godo.go: Sort service lists. -- #583 - @ddebarros - Adds support for functions trigger API - -## [v1.91.1] - 2022-11-23 - -- #582 - @StephenVarela - Load Balancers: Support new endpoints for http alerts - -## [v1.90.0] - 2022-11-16 - -- #571 - @kraai - Add WaitForAvailable -- #579 - @bentranter - Deprecate old pointer helpers, use generic one -- #580 - @StephenVarela - LBAAS Fixup default http idle timeout behaviour -- #578 - @StephenVarela - LBAAS-2430 Add support for HTTP idle timeout seconds -- #577 - @ddebarros - Functions api support - -## [v1.89.0] - 2022-11-02 - -- #575 - @ghostlandr - apps: add option to get projects data from Apps List endpoint - -## [v1.88.0] - 2022-10-31 - -- #573 - @kamaln7 - apps: add ListBuildpacks, UpgradeBuildpack -- #572 - @ghostlandr - Apps: add project id as a parameter to CreateApp and to the App struct -- #570 - @kraai - Fix copy-and-paste error in comment -- #568 - @StephenVarela - LBAAS-2321 Add project_id to load balancers structs - -## [v1.87.0] - 2022-10-12 - -- #564 - @DWizGuy58 - Add public monitoring alert policies for dbaas -- #565 - @dylanrhysscott - CON-5657 (Re-)expose public HA enablement flags in godo -- #563 - @andrewsomething - Add option to configure a rate.Limiter for the client. - -## [v1.86.0] - 2022-09-23 - -- #561 - @jonfriesen - apps: add docr image deploy on push - -## [v1.85.0] - 2022-09-21 - -- #560 - @andrewsomething - Bump golang.org/x/net (fixes: #557). -- #559 - @kamaln7 - apps: update component spec interfaces -- #555 - @kamaln7 - apps: add accessor methods and spec helpers -- #556 - @kamaln7 - update CI for go 1.18 & 1.19 - -## [v1.84.1] - 2022-09-16 - -- #554 - @andrewsomething - reserved IPs: project_id should have omitempty in create req. - -## [v1.84.0] - 2022-09-16 - -- #552 - @andrewsomething - reserved IPs: Expose project_id and locked attributes. -- #549 - @rpmoore - adding the replica id to the database replica model - -## [v1.83.0] - 2022-08-10 - -- #546 - @DWizGuy58 - Add support for database options - -## [v1.82.0] - 2022-08-04 - -- #544 - @andrewsomething - apps: Add URN() method. -- #542 - @andrewsomething - databases: Support advanced config endpoints. -- #543 - @nicktate - Ntate/detection models -- #541 - @andrewsomething - droplets: Support listing Droplets filtered by name. -- #540 - @bentranter - Update links to API documentation - -## [v1.81.0] - 2022-06-15 - -- #532 - @senorprogrammer - Add support for Reserved IP addresses -- #538 - @bentranter - util: update droplet create example -- #537 - @rpmoore - Adding project_id to databases -- #536 - @andrewsomething - account: Now may include info on current team. -- #535 - @ElanHasson - APPS-5636 Update App Platform for functions and Starter Tier App Proposals. - -## [v1.80.0] - 2022-05-23 - -- #533 - @ElanHasson - APPS-5636 - App Platform updates - -## [v1.79.0] - 2022-04-29 - -- #530 - @anitgandhi - monitoring: alerts for Load Balancers TLS conns/s utilization -- #529 - @ChiefMateStarbuck - Test against Go 1.18 -- #528 - @senorprogrammer - Remove DisablePublicNetworking option from the Create path -- #527 - @senorprogrammer - Remove the WithFloatingIPAddress create option - -## [v1.78.0] - 2022-03-31 - -- #522 - @jcodybaker - app platform: add support for features field - -## [v1.77.0] - 2022-03-16 - -- #518 - @rcj4747 - apps: Update apps protos - -## [v1.76.0] - 2022-03-09 - -- #516 - @CollinShoop - Add registry region support - -## [v1.75.0] - 2022-01-27 - -- #508 - @ElanHasson - Synchronize public protos and add multiple specs - -## [v1.74.0] - 2022-01-20 - -- #506 - @ZachEddy - Add new component type to apps-related structs - -## [v1.73.0] - 2021-12-03 - -- #501 - @CollinShoop - Add support for Registry ListManifests and ListRepositoriesV2 - -## [v1.72.0] - 2021-11-29 - -- #500 - @ElanHasson - APPS-4420: Add PreservePathPrefix to AppRouteSpec - -## [v1.71.0] - 2021-11-09 - -- #498 - @bojand - apps: update spec to include log destinations - -## [v1.70.0] - 2021-11-01 - -- #491 - @andrewsomething - Add support for retrieving Droplet monitoring metrics. -- #494 - @alexandear - Refactor tests: replace t.Errorf with assert/require -- #495 - @alexandear - Fix typos and grammar issues in comments -- #492 - @andrewsomething - Update golang.org/x/net -- #486 - @abeltay - Fix typo on "DigitalOcean" - -## [v1.69.1] - 2021-10-06 - -- #484 - @sunny-b - k8s/godo: remove ha field from update request - -## [v1.69.0] - 2021-10-04 - -- #482 - @dikshant - godo/load-balancers: add DisableLetsEncryptDNSRecords field for LBaaS - -## [v1.68.0] - 2021-09-29 - -- #480 - @sunny-b - kubernetes: add support for HA control plane - -## [v1.67.0] - 2021-09-22 - -- #478 - @sunny-b - kubernetes: add supported_features field to the kubernetes/options response -- #477 - @wez470 - Add size unit to LB API. - -## [v1.66.0] - 2021-09-21 - -- #473 - @andrewsomething - Add Go 1.17.x to test matrix and drop unsupported versions. -- #472 - @bsnyder788 - insights: add private (in/out)bound and public inbound bandwidth aler… -- #470 - @gottwald - domains: remove invalid json struct tag option - -## [v1.65.0] - 2021-08-05 - -- #468 - @notxarb - New alerts feature for App Platform -- #467 - @andrewsomething - docs: Update links to API documentation. -- #466 - @andrewsomething - Mark Response.Monitor as deprecated. - -## [v1.64.2] - 2021-07-23 - -- #464 - @bsnyder788 - insights: update HTTP method for alert policy update - -## [v1.64.1] - 2021-07-19 - -- #462 - @bsnyder788 - insights: fix alert policy update endpoint - -## [v1.64.0] - 2021-07-19 - -- #460 - @bsnyder788 - insights: add CRUD APIs for alert policies - -## [v1.63.0] - 2021-07-06 - -- #458 - @ZachEddy - apps: Add tail_lines query parameter to GetLogs function - -## [v1.62.0] - 2021-06-07 - -- #454 - @house-lee - add with_droplet_agent option to create requests - -## [v1.61.0] - 2021-05-12 - -- #452 - @caiofilipini - Add support for DOKS clusters as peers in Firewall rules -- #448 - @andrewsomething - flip: Set omitempty for Region in FloatingIPCreateRequest. -- #451 - @andrewsomething - CheckResponse: Add RequestID from header to ErrorResponse when missing from body. -- #450 - @nanzhong - dbaas: handle ca certificates as base64 encoded -- #449 - @nanzhong - dbaas: add support for getting cluster CA -- #446 - @kamaln7 - app spec: update cors policy - -## [v1.60.0] - 2021-04-04 - -- #443 - @andrewsomething - apps: Support pagination. -- #442 - @andrewsomething - dbaas: Support restoring from a backup. -- #441 - @andrewsomething - k8s: Add URN method to KubernetesCluster. - -## [v1.59.0] - 2021-03-29 - -- #439 - @andrewsomething - vpcs: Support listing members of a VPC. -- #438 - @andrewsomething - Add Go 1.16.x to the testing matrix. - -## [v1.58.0] - 2021-02-17 - -- #436 - @MorrisLaw - kubernetes: add name field to associated resources -- #434 - @andrewsomething - sizes: Add description field. -- #433 - @andrewsomething - Deprecate Name field in godo.DropletCreateVolume - -## [v1.57.0] - 2021-01-15 - -- #429 - @varshavaradarajan - kubernetes: support optional cascading deletes for clusters -- #430 - @jonfriesen - apps: updates apps.gen.go for gitlab addition -- #431 - @nicktate - apps: update proto to support dockerhub registry type - -## [v1.56.0] - 2021-01-08 - -- #422 - @kamaln7 - apps: add ProposeApp method - -## [v1.55.0] - 2021-01-07 - -- #425 - @adamwg - registry: Support the storage usage indicator -- #423 - @ChiefMateStarbuck - Updated README example -- #421 - @andrewsomething - Add some basic input cleaning to NewFromToken -- #420 - @bentranter - Don't set "Content-Type" header on GET requests - -## [v1.54.0] - 2020-11-24 - -- #417 - @waynr - registry: add support for garbage collection types - -## [v1.53.0] - 2020-11-20 - -- #414 - @varshavaradarajan - kubernetes: add clusterlint support -- #413 - @andrewsomething - images: Support updating distribution and description. - -## [v1.52.0] - 2020-11-05 - -- #411 - @nicktate - apps: add unspecified type to image source registry types -- #409 - @andrewsomething - registry: Add support for updating a subscription. -- #408 - @nicktate - apps: update spec to include image source -- #407 - @kamaln7 - apps: add the option to force build a new deployment - -## [v1.51.0] - 2020-11-02 - -- #405 - @adamwg - registry: Support subscription options -- #398 - @reeseconor - Add support for caching dependencies between GitHub Action runs -- #404 - @andrewsomething - CONTRIBUTING.md: Suggest using github-changelog-generator. - -## [v1.50.0] - 2020-10-26 - -- #400 - @waynr - registry: add garbage collection support -- #402 - @snormore - apps: add catchall_document static site spec field and failed-deploy job type -- #401 - @andrewlouis93 - VPC: adds option to set a VPC as the regional default - -## [v1.49.0] - 2020-10-21 - -- #383 - @kamaln7 - apps: add ListRegions, Get/ListTiers, Get/ListInstanceSizes -- #390 - @snormore - apps: add service spec internal_ports - -## [v1.48.0] - 2020-10-16 - -- #388 - @varshavaradarajan - kubernetes - change docr integration api routes -- #386 - @snormore - apps: pull in recent updates to jobs and domains - -## [v1.47.0] - 2020-10-14 - -- #384 kubernetes - add registry related doks apis - @varshavaradarajan -- #385 Fixed some typo in apps.gen.go and databases.go file - @devil-cyber -- #382 Add GetKubeConfigWithExpiry (#334) - @ivanlemeshev -- #381 Fix golint issues #377 - @sidsbrmnn -- #380 refactor: Cyclomatic complexity issue - @DonRenando -- #379 Run gofmt to fix some issues in codebase - @mycodeself - -## [v1.46.0] - 2020-10-05 - -- #373 load balancers: add LB size field, currently in closed beta - @anitgandhi - -## [v1.45.0] - 2020-09-25 - -**Note**: This release contains breaking changes to App Platform features currently in closed beta. - -- #369 update apps types to latest - @kamaln7 -- #368 Kubernetes: add taints field to node pool create and update requests - @timoreimann -- #367 update apps types, address marshaling bug - @kamaln7 - -## [v1.44.0] - 2020-09-08 - -- #364 apps: support aggregate deployment logs - @kamaln7 - -## [v1.43.0] - 2020-09-08 - -- #362 update apps types - @kamaln7 - -## [v1.42.1] - 2020-08-06 - -- #360 domains: Allow for SRV records with port 0. - @andrewsomething - -## [v1.42.0] - 2020-07-22 - -- #357 invoices: add category to InvoiceItem - @rbutler -- #358 apps: add support for following logs - @nanzhong - -## [v1.41.0] - 2020-07-17 - -- #355 kubernetes: Add support for surge upgrades - @varshavaradarajan - -## [v1.40.0] - 2020-07-16 - -- #347 Make Rate limits thread safe - @roidelapluie -- #353 Reuse TCP connection - @itsksaurabh - -## [v1.39.0] - 2020-07-14 - -- #345, #346 Add app platform support [beta] - @nanzhong - -## [v1.38.0] - 2020-06-18 - -- #341 Install 1-click applications on a Kubernetes cluster - @keladhruv -- #340 Add RecordsByType, RecordsByName and RecordsByTypeAndName to the DomainsService - @viola - -## [v1.37.0] - 2020-06-01 - -- #336 registry: URL encode repository names when building URLs. @adamwg -- #335 Add 1-click service and request. @scottcrawford03 - -## [v1.36.0] - 2020-05-12 - -- #331 Expose expiry_seconds for Registry.DockerCredentials. @andrewsomething - -## [v1.35.1] - 2020-04-21 - -- #328 Update vulnerable x/crypto dependency - @bentranter - -## [v1.35.0] - 2020-04-20 - -- #326 Add TagCount field to registry/Repository - @nicktate -- #325 Add DOCR EA routes - @nicktate -- #324 Upgrade godo to Go 1.14 - @bentranter - -## [v1.34.0] - 2020-03-30 - -- #320 Add VPC v3 attributes - @viola - -## [v1.33.1] - 2020-03-23 - -- #318 upgrade github.com/stretchr/objx past 0.1.1 - @hilary - -## [v1.33.0] - 2020-03-20 - -- #310 Add BillingHistory service and List endpoint - @rbutler -- #316 load balancers: add new enable_backend_keepalive field - @anitgandhi - -## [v1.32.0] - 2020-03-04 - -- #311 Add reset database user auth method - @zbarahal-do - -## [v1.31.0] - 2020-02-28 - -- #305 invoices: GetPDF and GetCSV methods - @rbutler -- #304 Add NewFromToken convenience method to init client - @bentranter -- #301 invoices: Get, Summary, and List methods - @rbutler -- #299 Fix param expiry_seconds for kubernetes.GetCredentials request - @velp - -## [v1.30.0] - 2020-02-03 - -- #295 registry: support the created_at field - @adamwg -- #293 doks: node pool labels - @snormore - -## [v1.29.0] - 2019-12-13 - -- #288 Add Balance Get method - @rbutler -- #286,#289 Deserialize meta field - @timoreimann - -## [v1.28.0] - 2019-12-04 - -- #282 Add valid Redis eviction policy constants - @bentranter -- #281 Remove databases info from top-level godoc string - @bentranter -- #280 Fix VolumeSnapshotResourceType value volumesnapshot -> volume_snapshot - @aqche - -## [v1.27.0] - 2019-11-18 - -- #278 add mysql user auth settings for database users - @gregmankes - -## [v1.26.0] - 2019-11-13 - -- #272 dbaas: get and set mysql sql mode - @mikejholly - -## [v1.25.0] - 2019-11-13 - -- #275 registry/docker-credentials: add support for the read/write parameter - @kamaln7 -- #273 implement the registry/docker-credentials endpoint - @kamaln7 -- #271 Add registry resource - @snormore - -## [v1.24.1] - 2019-11-04 - -- #264 Update isLast to check p.Next - @aqche - -## [v1.24.0] - 2019-10-30 - -- #267 Return []DatabaseFirewallRule in addition to raw response. - @andrewsomething - -## [v1.23.1] - 2019-10-30 - -- #265 add support for getting/setting firewall rules - @gregmankes -- #262 remove ResolveReference call - @mdanzinger -- #261 Update CONTRIBUTING.md - @mdanzinger - -## [v1.22.0] - 2019-09-24 - -- #259 Add Kubernetes GetCredentials method - @snormore - -## [v1.21.1] - 2019-09-19 - -- #257 Upgrade to Go 1.13 - @bentranter - -## [v1.21.0] - 2019-09-16 - -- #255 Add DropletID to Kubernetes Node instance - @snormore -- #254 Add tags to Database, DatabaseReplica - @Zyqsempai - -## [v1.20.0] - 2019-09-06 - -- #252 Add Kubernetes autoscale config fields - @snormore -- #251 Support unset fields on Kubernetes cluster and node pool updates - @snormore -- #250 Add Kubernetes GetUser method - @snormore - -## [v1.19.0] - 2019-07-19 - -- #244 dbaas: add private-network-uuid field to create request - -## [v1.18.0] - 2019-07-17 - -- #241 Databases: support for custom VPC UUID on migrate @mikejholly -- #240 Add the ability to get URN for a Database @stack72 -- #236 Fix omitempty typos in JSON struct tags @amccarthy1 - -## [v1.17.0] - 2019-06-21 - -- #238 Add support for Redis eviction policy in Databases @mikejholly - -## [v1.16.0] - 2019-06-04 - -- #233 Add Kubernetes DeleteNode method, deprecate RecycleNodePoolNodes @bouk - -## [v1.15.0] - 2019-05-13 - -- #231 Add private connection fields to Databases - @mikejholly -- #223 Introduce Go modules - @andreiavrammsd - -## [v1.14.0] - 2019-05-13 - -- #229 Add support for upgrading Kubernetes clusters - @adamwg - -## [v1.13.0] - 2019-04-19 - -- #213 Add tagging support for volume snapshots - @jcodybaker - -## [v1.12.0] - 2019-04-18 - -- #224 Add maintenance window support for Kubernetes- @fatih - -## [v1.11.1] - 2019-04-04 - -- #222 Fix Create Database Pools json fields - @sunny-b - -## [v1.11.0] - 2019-04-03 - -- #220 roll out vpc functionality - @jheimann - -## [v1.10.1] - 2019-03-27 - -- #219 Fix Database Pools json field - @sunny-b - -## [v1.10.0] - 2019-03-20 - -- #215 Add support for Databases - @mikejholly - -## [v1.9.0] - 2019-03-18 - -- #214 add support for enable_proxy_protocol. - @mregmi - -## [v1.8.0] - 2019-03-13 - -- #210 Expose tags on storage volume create/list/get. - @jcodybaker - -## [v1.7.5] - 2019-03-04 - -- #207 Add support for custom subdomains for Spaces CDN [beta] - @xornivore - -## [v1.7.4] - 2019-02-08 - -- #202 Allow tagging volumes - @mchitten - -## [v1.7.3] - 2018-12-18 - -- #196 Expose tag support for creating Load Balancers. - -## [v1.7.2] - 2018-12-04 - -- #192 Exposes more options for Kubernetes clusters. - -## [v1.7.1] - 2018-11-27 - -- #190 Expose constants for the state of Kubernetes clusters. - -## [v1.7.0] - 2018-11-13 - -- #188 Kubernetes support [beta] - @aybabtme - -## [v1.6.0] - 2018-10-16 - -- #185 Projects support [beta] - @mchitten - -## [v1.5.0] - 2018-10-01 - -- #181 Adding tagging images support - @hugocorbucci - -## [v1.4.2] - 2018-08-30 - -- #178 Allowing creating domain records with weight of 0 - @TFaga -- #177 Adding `VolumeLimit` to account - @lxfontes - -## [v1.4.1] - 2018-08-23 - -- #176 Fix cdn flush cache API endpoint - @sunny-b - -## [v1.4.0] - 2018-08-22 - -- #175 Add support for Spaces CDN - @sunny-b - -## [v1.3.0] - 2018-05-24 - -- #170 Add support for volume formatting - @adamwg - -## [v1.2.0] - 2018-05-08 - -- #166 Remove support for Go 1.6 - @iheanyi -- #165 Add support for Let's Encrypt Certificates - @viola - -## [v1.1.3] - 2018-03-07 - -- #156 Handle non-json errors from the API - @aknuds1 -- #158 Update droplet example to use latest instance type - @dan-v - -## [v1.1.2] - 2018-03-06 - -- #157 storage: list volumes should handle only name or only region params - @andrewsykim -- #154 docs: replace first example with fully-runnable example - @xmudrii -- #152 Handle flags & tag properties of domain record - @jaymecd - -## [v1.1.1] - 2017-09-29 - -- #151 Following user agent field recommendations - @joonas -- #148 AsRequest method to create load balancers requests - @lukegb - -## [v1.1.0] - 2017-06-06 - -### Added - -- #145 Add FirewallsService for managing Firewalls with the DigitalOcean API. - @viola -- #139 Add TTL field to the Domains. - @xmudrii - -### Fixed - -- #143 Fix oauth2.NoContext depreciation. - @jbowens -- #141 Fix DropletActions on tagged resources. - @xmudrii - -## [v1.0.0] - 2017-03-10 - -### Added - -- #130 Add Convert to ImageActionsService. - @xmudrii -- #126 Add CertificatesService for managing certificates with the DigitalOcean API. - @viola -- #125 Add LoadBalancersService for managing load balancers with the DigitalOcean API. - @viola -- #122 Add GetVolumeByName to StorageService. - @protochron -- #113 Add context.Context to all calls. - @aybabtme diff --git a/vendor/github.com/digitalocean/godo/CONTRIBUTING.md b/vendor/github.com/digitalocean/godo/CONTRIBUTING.md deleted file mode 100644 index 388a5bd..0000000 --- a/vendor/github.com/digitalocean/godo/CONTRIBUTING.md +++ /dev/null @@ -1,77 +0,0 @@ -# Contributing - -We love contributions! You are welcome to open a pull request, but it's a good idea to -open an issue and discuss your idea with us first. - -Once you are ready to open a PR, please keep the following guidelines in mind: - -1. Code should be `go fmt` compliant. -1. Types, structs and funcs should be documented. -1. Tests pass. - -## Getting set up - -`godo` uses go modules. Just fork this repo, clone your fork and off you go! - -## Running tests - -When working on code in this repository, tests can be run via: - -```sh -go test -mod=vendor . -``` - -## Versioning - -Godo follows [semver](https://www.semver.org) versioning semantics. -New functionality should be accompanied by increment to the minor -version number. Any code merged to main is subject to release. - -## Releasing - -Releasing a new version of godo is currently a manual process. - -Submit a separate pull request for the version change from the pull -request with your changes. - -1. Update the `CHANGELOG.md` with your changes. If a version header - for the next (unreleased) version does not exist, create one. - Include one bullet point for each piece of new functionality in the - release, including the pull request ID, description, and author(s). - For example: - -``` -## [v1.8.0] - 2019-03-13 - -- #210 - @jcodybaker - Expose tags on storage volume create/list/get. -- #123 - @digitalocean - Update test dependencies -``` - - To generate a list of changes since the previous release in the correct - format, you can use [github-changelog-generator](https://github.com/digitalocean/github-changelog-generator). - It can be installed from source by running: - -``` -go get -u github.com/digitalocean/github-changelog-generator -``` - - Next, list the changes by running: - -``` -github-changelog-generator -org digitalocean -repo godo -``` - -2. Update the `libraryVersion` number in `godo.go`. -3. Make a pull request with these changes. This PR should be separate from the PR containing the godo changes. -4. Once the pull request has been merged, [draft a new release](https://github.com/digitalocean/godo/releases/new). -5. Update the `Tag version` and `Release title` field with the new godo version. Be sure the version has a `v` prefixed in both places. Ex `v1.8.0`. -6. Copy the changelog bullet points to the description field. -7. Publish the release. - -## Go Version Support - -This project follows the support [policy of Go](https://go.dev/doc/devel/release#policy) -as its support policy. The two latest major releases of Go are supported by the project. -[CI workflows](.github/workflows/ci.yml) should test against both supported versions. -[go.mod](./go.mod) should specify the oldest of the supported versions to give -downstream users of godo flexibility. diff --git a/vendor/github.com/digitalocean/godo/LICENSE.txt b/vendor/github.com/digitalocean/godo/LICENSE.txt deleted file mode 100644 index 43c5d2e..0000000 --- a/vendor/github.com/digitalocean/godo/LICENSE.txt +++ /dev/null @@ -1,55 +0,0 @@ -Copyright (c) 2014-2016 The godo AUTHORS. All rights reserved. - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -====================== -Portions of the client are based on code at: -https://github.com/google/go-github/ - -Copyright (c) 2013 The go-github AUTHORS. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/digitalocean/godo/README.md b/vendor/github.com/digitalocean/godo/README.md deleted file mode 100644 index fd3cdbd..0000000 --- a/vendor/github.com/digitalocean/godo/README.md +++ /dev/null @@ -1,199 +0,0 @@ -# Godo - -[![GitHub Actions CI](https://github.com/digitalocean/godo/actions/workflows/ci.yml/badge.svg)](https://github.com/digitalocean/godo/actions/workflows/ci.yml) -[![GoDoc](https://godoc.org/github.com/digitalocean/godo?status.svg)](https://godoc.org/github.com/digitalocean/godo) - -Godo is a Go client library for accessing the DigitalOcean V2 API. - -You can view the client API docs here: [http://godoc.org/github.com/digitalocean/godo](http://godoc.org/github.com/digitalocean/godo) - -You can view DigitalOcean API docs here: [https://docs.digitalocean.com/reference/api/api-reference/](https://docs.digitalocean.com/reference/api/api-reference/) - -## Install -```sh -go get github.com/digitalocean/godo@vX.Y.Z -``` - -where X.Y.Z is the [version](https://github.com/digitalocean/godo/releases) you need. - -or -```sh -go get github.com/digitalocean/godo -``` -for non Go modules usage or latest version. - -## Usage - -```go -import "github.com/digitalocean/godo" -``` - -Create a new DigitalOcean client, then use the exposed services to -access different parts of the DigitalOcean API. - -### Authentication - -Currently, Personal Access Token (PAT) is the only method of -authenticating with the API. You can manage your tokens -at the DigitalOcean Control Panel [Applications Page](https://cloud.digitalocean.com/settings/applications). - -You can then use your token to create a new client: - -```go -package main - -import ( - "github.com/digitalocean/godo" -) - -func main() { - client := godo.NewFromToken("my-digitalocean-api-token") -} -``` - -If you need to provide a `context.Context` to your new client, you should use [`godo.NewClient`](https://godoc.org/github.com/digitalocean/godo#NewClient) to manually construct a client instead. - -## Examples - - -To create a new Droplet: - -```go -dropletName := "super-cool-droplet" - -createRequest := &godo.DropletCreateRequest{ - Name: dropletName, - Region: "nyc3", - Size: "s-1vcpu-1gb", - Image: godo.DropletCreateImage{ - Slug: "ubuntu-20-04-x64", - }, -} - -ctx := context.TODO() - -newDroplet, _, err := client.Droplets.Create(ctx, createRequest) - -if err != nil { - fmt.Printf("Something bad happened: %s\n\n", err) - return err -} -``` - -### Pagination - -If a list of items is paginated by the API, you must request pages individually. For example, to fetch all Droplets: - -```go -func DropletList(ctx context.Context, client *godo.Client) ([]godo.Droplet, error) { - // create a list to hold our droplets - list := []godo.Droplet{} - - // create options. initially, these will be blank - opt := &godo.ListOptions{} - for { - droplets, resp, err := client.Droplets.List(ctx, opt) - if err != nil { - return nil, err - } - - // append the current page's droplets to our list - list = append(list, droplets...) - - // if we are at the last page, break out the for loop - if resp.Links == nil || resp.Links.IsLastPage() { - break - } - - page, err := resp.Links.CurrentPage() - if err != nil { - return nil, err - } - - // set the page we want for the next request - opt.Page = page + 1 - } - - return list, nil -} -``` - -Some endpoints offer token based pagination. For example, to fetch all Registry Repositories: - -```go -func ListRepositoriesV2(ctx context.Context, client *godo.Client, registryName string) ([]*godo.RepositoryV2, error) { - // create a list to hold our registries - list := []*godo.RepositoryV2{} - - // create options. initially, these will be blank - opt := &godo.TokenListOptions{} - for { - repositories, resp, err := client.Registry.ListRepositoriesV2(ctx, registryName, opt) - if err != nil { - return nil, err - } - - // append the current page's registries to our list - list = append(list, repositories...) - - // if we are at the last page, break out the for loop - if resp.Links == nil || resp.Links.IsLastPage() { - break - } - - // grab the next page token - nextPageToken, err := resp.Links.NextPageToken() - if err != nil { - return nil, err - } - - // provide the next page token for the next request - opt.Token = nextPageToken - } - - return list, nil -} -``` - -### Automatic Retries and Exponential Backoff - -The Godo client can be configured to use automatic retries and exponentional backoff for requests that fail with 429 or 500-level response codes via [go-retryablehttp](https://github.com/hashicorp/go-retryablehttp). To configure Godo to enable usage of go-retryablehttp, the `RetryConfig.RetryMax` must be set. - -```go -tokenSrc := oauth2.StaticTokenSource(&oauth2.Token{ - AccessToken: "dop_v1_xxxxxx", -}) - -oauth_client := oauth2.NewClient(oauth2.NoContext, tokenSrc) - -waitMax := godo.PtrTo(6.0) -waitMin := godo.PtrTo(3.0) - -retryConfig := godo.RetryConfig{ - RetryMax: 3, - RetryWaitMin: waitMin, - RetryWaitMax: waitMax, -} - -client, err := godo.New(oauth_client, godo.WithRetryAndBackoffs(retryConfig)) -``` - -Please refer to the [RetryConfig Godo documentation](https://pkg.go.dev/github.com/digitalocean/godo#RetryConfig) for more information. - -## Versioning - -Each version of the client is tagged and the version is updated accordingly. - -To see the list of past versions, run `git tag`. - - -## Documentation - -For a comprehensive list of examples, check out the [API documentation](https://docs.digitalocean.com/reference/api/api-reference/#tag/SSH-Keys). - -For details on all the functionality in this library, see the [GoDoc](http://godoc.org/github.com/digitalocean/godo) documentation. - - -## Contributing - -We love pull requests! Please see the [contribution guidelines](CONTRIBUTING.md). diff --git a/vendor/github.com/digitalocean/godo/account.go b/vendor/github.com/digitalocean/godo/account.go deleted file mode 100644 index 7f61900..0000000 --- a/vendor/github.com/digitalocean/godo/account.go +++ /dev/null @@ -1,69 +0,0 @@ -package godo - -import ( - "context" - "net/http" -) - -// AccountService is an interface for interfacing with the Account -// endpoints of the DigitalOcean API -// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Account -type AccountService interface { - Get(context.Context) (*Account, *Response, error) -} - -// AccountServiceOp handles communication with the Account related methods of -// the DigitalOcean API. -type AccountServiceOp struct { - client *Client -} - -var _ AccountService = &AccountServiceOp{} - -// Account represents a DigitalOcean Account -type Account struct { - DropletLimit int `json:"droplet_limit,omitempty"` - FloatingIPLimit int `json:"floating_ip_limit,omitempty"` - ReservedIPLimit int `json:"reserved_ip_limit,omitempty"` - VolumeLimit int `json:"volume_limit,omitempty"` - Email string `json:"email,omitempty"` - Name string `json:"name,omitempty"` - UUID string `json:"uuid,omitempty"` - EmailVerified bool `json:"email_verified,omitempty"` - Status string `json:"status,omitempty"` - StatusMessage string `json:"status_message,omitempty"` - Team *TeamInfo `json:"team,omitempty"` -} - -// TeamInfo contains information about the currently team context. -type TeamInfo struct { - Name string `json:"name,omitempty"` - UUID string `json:"uuid,omitempty"` -} - -type accountRoot struct { - Account *Account `json:"account"` -} - -func (r Account) String() string { - return Stringify(r) -} - -// Get DigitalOcean account info -func (s *AccountServiceOp) Get(ctx context.Context) (*Account, *Response, error) { - - path := "v2/account" - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(accountRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Account, resp, err -} diff --git a/vendor/github.com/digitalocean/godo/action.go b/vendor/github.com/digitalocean/godo/action.go deleted file mode 100644 index 07ee914..0000000 --- a/vendor/github.com/digitalocean/godo/action.go +++ /dev/null @@ -1,108 +0,0 @@ -package godo - -import ( - "context" - "fmt" - "net/http" -) - -const ( - actionsBasePath = "v2/actions" - - // ActionInProgress is an in progress action status - ActionInProgress = "in-progress" - - //ActionCompleted is a completed action status - ActionCompleted = "completed" -) - -// ActionsService handles communication with action related methods of the -// DigitalOcean API: https://docs.digitalocean.com/reference/api/api-reference/#tag/Actions -type ActionsService interface { - List(context.Context, *ListOptions) ([]Action, *Response, error) - Get(context.Context, int) (*Action, *Response, error) -} - -// ActionsServiceOp handles communication with the image action related methods of the -// DigitalOcean API. -type ActionsServiceOp struct { - client *Client -} - -var _ ActionsService = &ActionsServiceOp{} - -type actionsRoot struct { - Actions []Action `json:"actions"` - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -type actionRoot struct { - Event *Action `json:"action"` -} - -// Action represents a DigitalOcean Action -type Action struct { - ID int `json:"id"` - Status string `json:"status"` - Type string `json:"type"` - StartedAt *Timestamp `json:"started_at"` - CompletedAt *Timestamp `json:"completed_at"` - ResourceID int `json:"resource_id"` - ResourceType string `json:"resource_type"` - Region *Region `json:"region,omitempty"` - RegionSlug string `json:"region_slug,omitempty"` -} - -// List all actions -func (s *ActionsServiceOp) List(ctx context.Context, opt *ListOptions) ([]Action, *Response, error) { - path := actionsBasePath - path, err := addOptions(path, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(actionsRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.Actions, resp, err -} - -// Get an action by ID. -func (s *ActionsServiceOp) Get(ctx context.Context, id int) (*Action, *Response, error) { - if id < 1 { - return nil, nil, NewArgError("id", "cannot be less than 1") - } - - path := fmt.Sprintf("%s/%d", actionsBasePath, id) - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(actionRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Event, resp, err -} - -func (a Action) String() string { - return Stringify(a) -} diff --git a/vendor/github.com/digitalocean/godo/apps.gen.go b/vendor/github.com/digitalocean/godo/apps.gen.go deleted file mode 100644 index 2297054..0000000 --- a/vendor/github.com/digitalocean/godo/apps.gen.go +++ /dev/null @@ -1,1309 +0,0 @@ -// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. -// $ bundle -pkg godo -prefix ./dev/dist/godo - -package godo - -import ( - "time" -) - -// AppAlert Represents an alert configured for an app or component. -type AppAlert struct { - // The ID of the alert. This will be auto-generated by App Platform once the spec is submitted. - ID string `json:"id,omitempty"` - // Name of the component this alert applies to. - ComponentName string `json:"component_name,omitempty"` - Spec *AppAlertSpec `json:"spec,omitempty"` - // Email destinations for the alert when triggered. - Emails []string `json:"emails,omitempty"` - // Slack webhook destinations for the alert when triggered. - SlackWebhooks []*AppAlertSlackWebhook `json:"slack_webhooks,omitempty"` - Phase AppAlertPhase `json:"phase,omitempty"` - Progress *AppAlertProgress `json:"progress,omitempty"` -} - -// AppAlertPhase the model 'AppAlertPhase' -type AppAlertPhase string - -// List of AppAlertPhase -const ( - AppAlertPhase_Unknown AppAlertPhase = "UNKNOWN" - AppAlertPhase_Pending AppAlertPhase = "PENDING" - AppAlertPhase_Configuring AppAlertPhase = "CONFIGURING" - AppAlertPhase_Active AppAlertPhase = "ACTIVE" - AppAlertPhase_Error AppAlertPhase = "ERROR" -) - -// AppAlertProgress struct for AppAlertProgress -type AppAlertProgress struct { - Steps []*AppAlertProgressStep `json:"steps,omitempty"` -} - -// AppAlertProgressStep struct for AppAlertProgressStep -type AppAlertProgressStep struct { - Name string `json:"name,omitempty"` - Status AppAlertProgressStepStatus `json:"status,omitempty"` - Steps []*AppAlertProgressStep `json:"steps,omitempty"` - StartedAt time.Time `json:"started_at,omitempty"` - EndedAt time.Time `json:"ended_at,omitempty"` - Reason *AppAlertProgressStepReason `json:"reason,omitempty"` -} - -// AppAlertProgressStepReason struct for AppAlertProgressStepReason -type AppAlertProgressStepReason struct { - Code string `json:"code,omitempty"` - Message string `json:"message,omitempty"` -} - -// AppAlertProgressStepStatus the model 'AppAlertProgressStepStatus' -type AppAlertProgressStepStatus string - -// List of AppAlertProgressStepStatus -const ( - AppAlertProgressStepStatus_Unknown AppAlertProgressStepStatus = "UNKNOWN" - AppAlertProgressStepStatus_Pending AppAlertProgressStepStatus = "PENDING" - AppAlertProgressStepStatus_Running AppAlertProgressStepStatus = "RUNNING" - AppAlertProgressStepStatus_Error AppAlertProgressStepStatus = "ERROR" - AppAlertProgressStepStatus_Success AppAlertProgressStepStatus = "SUCCESS" -) - -// AppAlertSlackWebhook Configuration of a Slack alerting destination. -type AppAlertSlackWebhook struct { - // URL for the Slack webhook. - URL string `json:"url,omitempty"` - // Name of the Slack channel. - Channel string `json:"channel,omitempty"` -} - -// App An application's configuration and status. -type App struct { - ID string `json:"id,omitempty"` - OwnerUUID string `json:"owner_uuid,omitempty"` - Spec *AppSpec `json:"spec"` - LastDeploymentActiveAt time.Time `json:"last_deployment_active_at,omitempty"` - DefaultIngress string `json:"default_ingress,omitempty"` - CreatedAt time.Time `json:"created_at,omitempty"` - UpdatedAt time.Time `json:"updated_at,omitempty"` - ActiveDeployment *Deployment `json:"active_deployment,omitempty"` - InProgressDeployment *Deployment `json:"in_progress_deployment,omitempty"` - PendingDeployment *Deployment `json:"pending_deployment,omitempty"` - LastDeploymentCreatedAt time.Time `json:"last_deployment_created_at,omitempty"` - LiveURL string `json:"live_url,omitempty"` - Region *AppRegion `json:"region,omitempty"` - TierSlug string `json:"tier_slug,omitempty"` - LiveURLBase string `json:"live_url_base,omitempty"` - LiveDomain string `json:"live_domain,omitempty"` - Domains []*AppDomain `json:"domains,omitempty"` - PinnedDeployment *Deployment `json:"pinned_deployment,omitempty"` - BuildConfig *AppBuildConfig `json:"build_config,omitempty"` - // The id of the project for the app. This will be empty if there is a fleet (project) lookup failure. - ProjectID string `json:"project_id,omitempty"` - // The dedicated egress ip addresses associated with the app. - DedicatedIps []*AppDedicatedIp `json:"dedicated_ips,omitempty"` -} - -// AppAlertSpec Configuration of an alert for the app or a individual component. -type AppAlertSpec struct { - Rule AppAlertSpecRule `json:"rule,omitempty"` - // Determines whether or not the alert is disabled. - Disabled bool `json:"disabled,omitempty"` - Operator AppAlertSpecOperator `json:"operator,omitempty"` - // The meaning is dependent upon the rule. It is used in conjunction with the operator and window to determine when an alert should trigger. - Value float32 `json:"value,omitempty"` - Window AppAlertSpecWindow `json:"window,omitempty"` -} - -// AppAlertSpecOperator the model 'AppAlertSpecOperator' -type AppAlertSpecOperator string - -// List of AppAlertSpecOperator -const ( - AppAlertSpecOperator_UnspecifiedOperator AppAlertSpecOperator = "UNSPECIFIED_OPERATOR" - AppAlertSpecOperator_GreaterThan AppAlertSpecOperator = "GREATER_THAN" - AppAlertSpecOperator_LessThan AppAlertSpecOperator = "LESS_THAN" -) - -// AppAlertSpecRule - CPU_UTILIZATION: Represents CPU for a given container instance. Only applicable at the component level. - MEM_UTILIZATION: Represents RAM for a given container instance. Only applicable at the component level. - RESTART_COUNT: Represents restart count for a given container instance. Only applicable at the component level. - DEPLOYMENT_FAILED: Represents whether a deployment has failed. Only applicable at the app level. - DEPLOYMENT_LIVE: Represents whether a deployment has succeeded. Only applicable at the app level. - DEPLOYMENT_STARTED: Represents whether a deployment has started. Only applicable at the app level. - DEPLOYMENT_CANCELED: Represents whether a deployment has been canceled. Only applicable at the app level. - DOMAIN_FAILED: Represents whether a domain configuration has failed. Only applicable at the app level. - DOMAIN_LIVE: Represents whether a domain configuration has succeeded. Only applicable at the app level. - FUNCTIONS_ACTIVATION_COUNT: Represents an activation count for a given functions instance. Only applicable to functions components. - FUNCTIONS_AVERAGE_DURATION_MS: Represents the average duration for function runtimes. Only applicable to functions components. - FUNCTIONS_ERROR_RATE_PER_MINUTE: Represents an error rate per minute for a given functions instance. Only applicable to functions components. - FUNCTIONS_AVERAGE_WAIT_TIME_MS: Represents the average wait time for functions. Only applicable to functions components. - FUNCTIONS_ERROR_COUNT: Represents an error count for a given functions instance. Only applicable to functions components. - FUNCTIONS_GB_RATE_PER_SECOND: Represents the rate of memory consumption (GB x seconds) for functions. Only applicable to functions components. -type AppAlertSpecRule string - -// List of AppAlertSpecRule -const ( - AppAlertSpecRule_UnspecifiedRule AppAlertSpecRule = "UNSPECIFIED_RULE" - AppAlertSpecRule_CPUUtilization AppAlertSpecRule = "CPU_UTILIZATION" - AppAlertSpecRule_MemUtilization AppAlertSpecRule = "MEM_UTILIZATION" - AppAlertSpecRule_RestartCount AppAlertSpecRule = "RESTART_COUNT" - AppAlertSpecRule_DeploymentFailed AppAlertSpecRule = "DEPLOYMENT_FAILED" - AppAlertSpecRule_DeploymentLive AppAlertSpecRule = "DEPLOYMENT_LIVE" - AppAlertSpecRule_DeploymentStarted AppAlertSpecRule = "DEPLOYMENT_STARTED" - AppAlertSpecRule_DeploymentCanceled AppAlertSpecRule = "DEPLOYMENT_CANCELED" - AppAlertSpecRule_DomainFailed AppAlertSpecRule = "DOMAIN_FAILED" - AppAlertSpecRule_DomainLive AppAlertSpecRule = "DOMAIN_LIVE" - AppAlertSpecRule_FunctionsActivationCount AppAlertSpecRule = "FUNCTIONS_ACTIVATION_COUNT" - AppAlertSpecRule_FunctionsAverageDurationMS AppAlertSpecRule = "FUNCTIONS_AVERAGE_DURATION_MS" - AppAlertSpecRule_FunctionsErrorRatePerMinute AppAlertSpecRule = "FUNCTIONS_ERROR_RATE_PER_MINUTE" - AppAlertSpecRule_FunctionsAverageWaitTimeMs AppAlertSpecRule = "FUNCTIONS_AVERAGE_WAIT_TIME_MS" - AppAlertSpecRule_FunctionsErrorCount AppAlertSpecRule = "FUNCTIONS_ERROR_COUNT" - AppAlertSpecRule_FunctionsGBRatePerSecond AppAlertSpecRule = "FUNCTIONS_GB_RATE_PER_SECOND" -) - -// AppAlertSpecWindow the model 'AppAlertSpecWindow' -type AppAlertSpecWindow string - -// List of AppAlertSpecWindow -const ( - AppAlertSpecWindow_UnspecifiedWindow AppAlertSpecWindow = "UNSPECIFIED_WINDOW" - AppAlertSpecWindow_FiveMinutes AppAlertSpecWindow = "FIVE_MINUTES" - AppAlertSpecWindow_TenMinutes AppAlertSpecWindow = "TEN_MINUTES" - AppAlertSpecWindow_ThirtyMinutes AppAlertSpecWindow = "THIRTY_MINUTES" - AppAlertSpecWindow_OneHour AppAlertSpecWindow = "ONE_HOUR" -) - -// AppAutoscalingSpec struct for AppAutoscalingSpec -type AppAutoscalingSpec struct { - // The minimum amount of instances for this component. - MinInstanceCount int64 `json:"min_instance_count,omitempty"` - // The maximum amount of instances for this component. - MaxInstanceCount int64 `json:"max_instance_count,omitempty"` - Metrics *AppAutoscalingSpecMetrics `json:"metrics,omitempty"` -} - -// AppAutoscalingSpecMetricCPU struct for AppAutoscalingSpecMetricCPU -type AppAutoscalingSpecMetricCPU struct { - // The average target CPU utilization for the component. - Percent int64 `json:"percent,omitempty"` -} - -// AppAutoscalingSpecMetrics struct for AppAutoscalingSpecMetrics -type AppAutoscalingSpecMetrics struct { - CPU *AppAutoscalingSpecMetricCPU `json:"cpu,omitempty"` -} - -// AppBuildConfig struct for AppBuildConfig -type AppBuildConfig struct { - CNBVersioning *AppBuildConfigCNBVersioning `json:"cnb_versioning,omitempty"` -} - -// AppBuildConfigCNBVersioning struct for AppBuildConfigCNBVersioning -type AppBuildConfigCNBVersioning struct { - // List of versioned buildpacks used for the application. Buildpacks are only versioned based on the major semver version, therefore exact versions will not be available at the app build config. - Buildpacks []*Buildpack `json:"buildpacks,omitempty"` - // A version id that represents the underlying CNB stack. The version of the stack indicates what buildpacks are supported. - StackID string `json:"stack_id,omitempty"` -} - -// AppDatabaseSpec struct for AppDatabaseSpec -type AppDatabaseSpec struct { - // The database's name. The name must be unique across all components within the same app and cannot use capital letters. - Name string `json:"name"` - Engine AppDatabaseSpecEngine `json:"engine,omitempty"` - Version string `json:"version,omitempty"` - // Deprecated. - Size string `json:"size,omitempty"` - // Deprecated. - NumNodes int64 `json:"num_nodes,omitempty"` - // Whether this is a production or dev database. - Production bool `json:"production,omitempty"` - // The name of the underlying DigitalOcean DBaaS cluster. This is required for production databases. For dev databases, if cluster_name is not set, a new cluster will be provisioned. - ClusterName string `json:"cluster_name,omitempty"` - // The name of the MySQL or PostgreSQL database to configure. - DBName string `json:"db_name,omitempty"` - // The name of the MySQL or PostgreSQL user to configure. - DBUser string `json:"db_user,omitempty"` -} - -// AppDatabaseSpecEngine the model 'AppDatabaseSpecEngine' -type AppDatabaseSpecEngine string - -// List of AppDatabaseSpecEngine -const ( - AppDatabaseSpecEngine_Unset AppDatabaseSpecEngine = "UNSET" - AppDatabaseSpecEngine_MySQL AppDatabaseSpecEngine = "MYSQL" - AppDatabaseSpecEngine_PG AppDatabaseSpecEngine = "PG" - AppDatabaseSpecEngine_Redis AppDatabaseSpecEngine = "REDIS" - AppDatabaseSpecEngine_MongoDB AppDatabaseSpecEngine = "MONGODB" - AppDatabaseSpecEngine_Kafka AppDatabaseSpecEngine = "KAFKA" - AppDatabaseSpecEngine_Opensearch AppDatabaseSpecEngine = "OPENSEARCH" -) - -// AppDedicatedIp Represents a dedicated egress ip. -type AppDedicatedIp struct { - // The ip address of the dedicated egress ip. - Ip string `json:"ip,omitempty"` - // The id of the dedictated egress ip. - ID string `json:"id,omitempty"` - Status AppDedicatedIpStatus `json:"status,omitempty"` -} - -// AppDedicatedIpStatus the model 'AppDedicatedIpStatus' -type AppDedicatedIpStatus string - -// List of AppDedicatedIPStatus -const ( - APPDEDICATEDIPSTATUS_Unknown AppDedicatedIpStatus = "UNKNOWN" - APPDEDICATEDIPSTATUS_Assigning AppDedicatedIpStatus = "ASSIGNING" - APPDEDICATEDIPSTATUS_Assigned AppDedicatedIpStatus = "ASSIGNED" - APPDEDICATEDIPSTATUS_Removed AppDedicatedIpStatus = "REMOVED" -) - -// AppDomainSpec struct for AppDomainSpec -type AppDomainSpec struct { - Domain string `json:"domain"` - Type AppDomainSpecType `json:"type,omitempty"` - Wildcard bool `json:"wildcard,omitempty"` - // Optional. If the domain uses DigitalOcean DNS and you would like App Platform to automatically manage it for you, set this to the name of the domain on your account. For example, If the domain you are adding is `app.domain.com`, the zone could be `domain.com`. - Zone string `json:"zone,omitempty"` - Certificate string `json:"certificate,omitempty"` - // Optional. The minimum version of TLS a client application can use to access resources for the domain. Must be one of the following values wrapped within quotations: `\"1.2\"` or `\"1.3\"`. - MinimumTLSVersion string `json:"minimum_tls_version,omitempty"` -} - -// AppDomainSpecType the model 'AppDomainSpecType' -type AppDomainSpecType string - -// List of AppDomainSpecType -const ( - AppDomainSpecType_Unspecified AppDomainSpecType = "UNSPECIFIED" - AppDomainSpecType_Default AppDomainSpecType = "DEFAULT" - AppDomainSpecType_Primary AppDomainSpecType = "PRIMARY" - AppDomainSpecType_Alias AppDomainSpecType = "ALIAS" -) - -// AppEgressSpec Specification for app egress configurations. -type AppEgressSpec struct { - Type AppEgressSpecType `json:"type,omitempty"` -} - -// AppEgressSpecType the model 'AppEgressSpecType' -type AppEgressSpecType string - -// List of AppEgressSpecType -const ( - APPEGRESSSPECTYPE_Autoassign AppEgressSpecType = "AUTOASSIGN" - APPEGRESSSPECTYPE_DedicatedIp AppEgressSpecType = "DEDICATED_IP" -) - -// AppFunctionsSpec struct for AppFunctionsSpec -type AppFunctionsSpec struct { - // The name. Must be unique across all components within the same app. - Name string `json:"name"` - Git *GitSourceSpec `json:"git,omitempty"` - GitHub *GitHubSourceSpec `json:"github,omitempty"` - GitLab *GitLabSourceSpec `json:"gitlab,omitempty"` - // An optional path to the working directory to use for the build. Must be relative to the root of the repo. - SourceDir string `json:"source_dir,omitempty"` - // A list of environment variables made available to the component. - Envs []*AppVariableDefinition `json:"envs,omitempty"` - // (Deprecated) A list of HTTP routes that should be routed to this component. - Routes []*AppRouteSpec `json:"routes,omitempty"` - // A list of configured alerts the user has enabled. - Alerts []*AppAlertSpec `json:"alerts,omitempty"` - // A list of configured log forwarding destinations. - LogDestinations []*AppLogDestinationSpec `json:"log_destinations,omitempty"` - CORS *AppCORSPolicy `json:"cors,omitempty"` -} - -// AppIngressSpec Specification for app ingress configurations. -type AppIngressSpec struct { - LoadBalancer AppIngressSpecLoadBalancer `json:"load_balancer,omitempty"` - LoadBalancerSize int64 `json:"load_balancer_size,omitempty"` - // Rules for configuring HTTP ingress for component routes, CORS, rewrites, and redirects. - Rules []*AppIngressSpecRule `json:"rules,omitempty"` -} - -// AppIngressSpecLoadBalancer the model 'AppIngressSpecLoadBalancer' -type AppIngressSpecLoadBalancer string - -// List of AppIngressSpecLoadBalancer -const ( - AppIngressSpecLoadBalancer_Unknown AppIngressSpecLoadBalancer = "UNKNOWN" - AppIngressSpecLoadBalancer_DigitalOcean AppIngressSpecLoadBalancer = "DIGITALOCEAN" -) - -// AppIngressSpecRule A rule that configures component routes, rewrites, redirects and cors. -type AppIngressSpecRule struct { - Match *AppIngressSpecRuleMatch `json:"match,omitempty"` - Component *AppIngressSpecRuleRoutingComponent `json:"component,omitempty"` - Redirect *AppIngressSpecRuleRoutingRedirect `json:"redirect,omitempty"` - CORS *AppCORSPolicy `json:"cors,omitempty"` -} - -// AppIngressSpecRuleMatch The match configuration for a rule. -type AppIngressSpecRuleMatch struct { - Path *AppIngressSpecRuleStringMatch `json:"path,omitempty"` -} - -// AppIngressSpecRuleRoutingComponent The component routing configuration. -type AppIngressSpecRuleRoutingComponent struct { - // The name of the component to route to. - Name string `json:"name,omitempty"` - // An optional flag to preserve the path that is forwarded to the backend service. By default, the HTTP request path will be trimmed from the left when forwarded to the component. For example, a component with `path=/api` will have requests to `/api/list` trimmed to `/list`. If this value is `true`, the path will remain `/api/list`. Note: this is not applicable for Functions Components and is mutually exclusive with `rewrite`. - PreservePathPrefix bool `json:"preserve_path_prefix,omitempty"` - // An optional field that will rewrite the path of the component to be what is specified here. By default, the HTTP request path will be trimmed from the left when forwarded to the component. For example, a component with `path=/api` will have requests to `/api/list` trimmed to `/list`. If you specified the rewrite to be `/v1/`, requests to `/api/list` would be rewritten to `/v1/list`. Note: this is mutually exclusive with `preserve_path_prefix`. - Rewrite string `json:"rewrite,omitempty"` -} - -// AppIngressSpecRuleRoutingRedirect The redirect routing configuration. -type AppIngressSpecRuleRoutingRedirect struct { - // An optional URI path to redirect to. Note: if this is specified the whole URI of the original request will be overwritten to this value, irrespective of the original request URI being matched. - Uri string `json:"uri,omitempty"` - // The authority/host to redirect to. This can be a hostname or IP address. Note: use `port` to set the port. - Authority string `json:"authority,omitempty"` - // The port to redirect to. - Port int64 `json:"port,omitempty"` - // The scheme to redirect to. Supported values are `http` or `https`. Default: `https`. - Scheme string `json:"scheme,omitempty"` - // The redirect code to use. Defaults to `302`. Supported values are 300, 301, 302, 303, 304, 307, 308. - RedirectCode int64 `json:"redirect_code,omitempty"` -} - -// AppIngressSpecRuleStringMatch The string match configuration. -type AppIngressSpecRuleStringMatch struct { - // Prefix-based match. For example, `/api` will match `/api`, `/api/`, and any nested paths such as `/api/v1/endpoint`. - Prefix string `json:"prefix,omitempty"` -} - -// AppJobSpec struct for AppJobSpec -type AppJobSpec struct { - // The name. Must be unique across all components within the same app. - Name string `json:"name"` - Git *GitSourceSpec `json:"git,omitempty"` - GitHub *GitHubSourceSpec `json:"github,omitempty"` - Image *ImageSourceSpec `json:"image,omitempty"` - GitLab *GitLabSourceSpec `json:"gitlab,omitempty"` - // The path to the Dockerfile relative to the root of the repo. If set, it will be used to build this component. Otherwise, App Platform will attempt to build it using buildpacks. - DockerfilePath string `json:"dockerfile_path,omitempty"` - // An optional build command to run while building this component from source. - BuildCommand string `json:"build_command,omitempty"` - // An optional run command to override the component's default. - RunCommand string `json:"run_command,omitempty"` - // An optional path to the working directory to use for the build. For Dockerfile builds, this will be used as the build context. Must be relative to the root of the repo. - SourceDir string `json:"source_dir,omitempty"` - // An environment slug describing the type of this app. For a full list, please refer to [the product documentation](https://www.digitalocean.com/docs/app-platform/). - EnvironmentSlug string `json:"environment_slug,omitempty"` - // A list of environment variables made available to the component. - Envs []*AppVariableDefinition `json:"envs,omitempty"` - // The instance size to use for this component. - InstanceSizeSlug string `json:"instance_size_slug,omitempty"` - InstanceCount int64 `json:"instance_count,omitempty"` - Kind AppJobSpecKind `json:"kind,omitempty"` - // A list of configured alerts which apply to the component. - Alerts []*AppAlertSpec `json:"alerts,omitempty"` - // A list of configured log forwarding destinations. - LogDestinations []*AppLogDestinationSpec `json:"log_destinations,omitempty"` - Termination *AppJobSpecTermination `json:"termination,omitempty"` -} - -// AppJobSpecKind - UNSPECIFIED: Default job type, will auto-complete to POST_DEPLOY kind. - PRE_DEPLOY: Indicates a job that runs before an app deployment. - POST_DEPLOY: Indicates a job that runs after an app deployment. - FAILED_DEPLOY: Indicates a job that runs after a component fails to deploy. -type AppJobSpecKind string - -// List of AppJobSpecKind -const ( - AppJobSpecKind_Unspecified AppJobSpecKind = "UNSPECIFIED" - AppJobSpecKind_PreDeploy AppJobSpecKind = "PRE_DEPLOY" - AppJobSpecKind_PostDeploy AppJobSpecKind = "POST_DEPLOY" - AppJobSpecKind_FailedDeploy AppJobSpecKind = "FAILED_DEPLOY" -) - -// AppJobSpecTermination struct for AppJobSpecTermination -type AppJobSpecTermination struct { - // The number of seconds to wait between sending a TERM signal to a container and issuing a KILL which causes immediate shutdown. Default: 120, Minimum 1, Maximum 600. - GracePeriodSeconds int32 `json:"grace_period_seconds,omitempty"` -} - -// AppLogDestinationSpec struct for AppLogDestinationSpec -type AppLogDestinationSpec struct { - // Name of the log destination. - Name string `json:"name"` - Papertrail *AppLogDestinationSpecPapertrail `json:"papertrail,omitempty"` - Datadog *AppLogDestinationSpecDataDog `json:"datadog,omitempty"` - Logtail *AppLogDestinationSpecLogtail `json:"logtail,omitempty"` - OpenSearch *AppLogDestinationSpecOpenSearch `json:"open_search,omitempty"` - Endpoint string `json:"endpoint,omitempty"` - TLSInsecure bool `json:"tls_insecure,omitempty"` - Headers []*AppLogDestinationSpecHeader `json:"headers,omitempty"` -} - -// AppLogDestinationSpecDataDog DataDog configuration. -type AppLogDestinationSpecDataDog struct { - // Datadog HTTP log intake endpoint. - Endpoint string `json:"endpoint,omitempty"` - // Datadog API key. - ApiKey string `json:"api_key"` -} - -// AppLogDestinationSpecHeader struct for AppLogDestinationSpecHeader -type AppLogDestinationSpecHeader struct { - // The name - Key string `json:"key"` - // The header value. - Value string `json:"value,omitempty"` -} - -// AppLogDestinationSpecLogtail Logtail configuration. -type AppLogDestinationSpecLogtail struct { - // Logtail token. - Token string `json:"token"` -} - -// AppLogDestinationSpecOpenSearch OpenSearch configuration. -type AppLogDestinationSpecOpenSearch struct { - // OpenSearch API Endpoint. Only HTTPS is supported. Format: https://:. Cannot be specified if `cluster_name` is also specified. - Endpoint string `json:"endpoint,omitempty"` - BasicAuth *OpenSearchBasicAuth `json:"basic_auth,omitempty"` - // The index name to use for the logs. If not set, the default index name is \"logs\". - IndexName string `json:"index_name,omitempty"` - // The name of a DigitalOcean DBaaS OpenSearch cluster to use as a log forwarding destination. Cannot be specified if `endpoint` is also specified. - ClusterName string `json:"cluster_name,omitempty"` -} - -// AppLogDestinationSpecPapertrail Papertrail configuration. -type AppLogDestinationSpecPapertrail struct { - // Papertrail syslog endpoint. - Endpoint string `json:"endpoint"` -} - -// AppRouteSpec struct for AppRouteSpec -type AppRouteSpec struct { - // (Deprecated) An HTTP path prefix. Paths must start with / and must be unique across all components within an app. - Path string `json:"path,omitempty"` - // (Deprecated) An optional flag to preserve the path that is forwarded to the backend service. By default, the HTTP request path will be trimmed from the left when forwarded to the component. For example, a component with `path=/api` will have requests to `/api/list` trimmed to `/list`. If this value is `true`, the path will remain `/api/list`. Note: this is not applicable for Functions Components. - PreservePathPrefix bool `json:"preserve_path_prefix,omitempty"` -} - -// AppServiceSpec struct for AppServiceSpec -type AppServiceSpec struct { - // The name. Must be unique across all components within the same app. - Name string `json:"name"` - Git *GitSourceSpec `json:"git,omitempty"` - GitHub *GitHubSourceSpec `json:"github,omitempty"` - Image *ImageSourceSpec `json:"image,omitempty"` - GitLab *GitLabSourceSpec `json:"gitlab,omitempty"` - // The path to the Dockerfile relative to the root of the repo. If set, it will be used to build this component. Otherwise, App Platform will attempt to build it using buildpacks. - DockerfilePath string `json:"dockerfile_path,omitempty"` - // An optional build command to run while building this component from source. - BuildCommand string `json:"build_command,omitempty"` - // An optional run command to override the component's default. - RunCommand string `json:"run_command,omitempty"` - // An optional path to the working directory to use for the build. For Dockerfile builds, this will be used as the build context. Must be relative to the root of the repo. - SourceDir string `json:"source_dir,omitempty"` - // An environment slug describing the type of this app. For a full list, please refer to [the product documentation](https://www.digitalocean.com/docs/app-platform/). - EnvironmentSlug string `json:"environment_slug,omitempty"` - // A list of environment variables made available to the component. - Envs []*AppVariableDefinition `json:"envs,omitempty"` - InstanceSizeSlug string `json:"instance_size_slug,omitempty"` - // The amount of instances that this component should be scaled to. - InstanceCount int64 `json:"instance_count,omitempty"` - Autoscaling *AppAutoscalingSpec `json:"autoscaling,omitempty"` - // The internal port on which this service's run command will listen. Default: 8080 If there is not an environment variable with the name `PORT`, one will be automatically added with its value set to the value of this field. - HTTPPort int64 `json:"http_port,omitempty"` - // (Deprecated) A list of HTTP routes that should be routed to this component. - Routes []*AppRouteSpec `json:"routes,omitempty"` - HealthCheck *AppServiceSpecHealthCheck `json:"health_check,omitempty"` - CORS *AppCORSPolicy `json:"cors,omitempty"` - // The ports on which this service will listen for internal traffic. - InternalPorts []int64 `json:"internal_ports,omitempty"` - // A list of configured alerts which apply to the component. - Alerts []*AppAlertSpec `json:"alerts,omitempty"` - // A list of configured log forwarding destinations. - LogDestinations []*AppLogDestinationSpec `json:"log_destinations,omitempty"` - Termination *AppServiceSpecTermination `json:"termination,omitempty"` -} - -// AppServiceSpecHealthCheck struct for AppServiceSpecHealthCheck -type AppServiceSpecHealthCheck struct { - // Deprecated. Use http_path instead. - Path string `json:"path,omitempty"` - // The number of seconds to wait before beginning health checks. Default: 0 seconds, Minimum 0, Maximum 3600. - InitialDelaySeconds int32 `json:"initial_delay_seconds,omitempty"` - // The number of seconds to wait between health checks. Default: 10 seconds, Minimum 1, Maximum 300. - PeriodSeconds int32 `json:"period_seconds,omitempty"` - // The number of seconds after which the check times out. Default: 1 second, Minimum 1, Maximum 120. - TimeoutSeconds int32 `json:"timeout_seconds,omitempty"` - // The number of successful health checks before considered healthy. Default: 1, Minimum 1, Maximum 50. - SuccessThreshold int32 `json:"success_threshold,omitempty"` - // The number of failed health checks before considered unhealthy. Default: 9, Minimum 1, Maximum 50. - FailureThreshold int32 `json:"failure_threshold,omitempty"` - // The route path used for the HTTP health check ping. If not set, the HTTP health check will be disabled and a TCP health check used instead. - HTTPPath string `json:"http_path,omitempty"` - // The port on which the health check will be performed. If not set, the health check will be performed on the component's http_port. - Port int64 `json:"port,omitempty"` -} - -// AppServiceSpecTermination struct for AppServiceSpecTermination -type AppServiceSpecTermination struct { - // The number of seconds to wait between selecting a container instance for termination and issuing the TERM signal. Selecting a container instance for termination begins an asynchronous drain of new requests on upstream load-balancers. Default: 15 seconds, Minimum 1, Maximum 110. - DrainSeconds int32 `json:"drain_seconds,omitempty"` - // The number of seconds to wait between sending a TERM signal to a container and issuing a KILL which causes immediate shutdown. Default: 120, Minimum 1, Maximum 600. - GracePeriodSeconds int32 `json:"grace_period_seconds,omitempty"` -} - -// AppSpec The desired configuration of an application. -type AppSpec struct { - // The name of the app. Must be unique across all apps in the same account. - Name string `json:"name"` - // Workloads which expose publicly-accessible HTTP services. - Services []*AppServiceSpec `json:"services,omitempty"` - // Content which can be rendered to static web assets. - StaticSites []*AppStaticSiteSpec `json:"static_sites,omitempty"` - // Workloads which do not expose publicly-accessible HTTP services. - Workers []*AppWorkerSpec `json:"workers,omitempty"` - // Pre and post deployment workloads which do not expose publicly-accessible HTTP routes. - Jobs []*AppJobSpec `json:"jobs,omitempty"` - // Workloads which expose publicly-accessible HTTP services via Functions Components. - Functions []*AppFunctionsSpec `json:"functions,omitempty"` - // Database instances which can provide persistence to workloads within the application. - Databases []*AppDatabaseSpec `json:"databases,omitempty"` - // A set of hostnames where the application will be available. - Domains []*AppDomainSpec `json:"domains,omitempty"` - Region string `json:"region,omitempty"` - // A list of environment variables made available to all components in the app. - Envs []*AppVariableDefinition `json:"envs,omitempty"` - // A list of alerts which apply to the app. - Alerts []*AppAlertSpec `json:"alerts,omitempty"` - Ingress *AppIngressSpec `json:"ingress,omitempty"` - Egress *AppEgressSpec `json:"egress,omitempty"` - Features []string `json:"features,omitempty"` -} - -// AppStaticSiteSpec struct for AppStaticSiteSpec -type AppStaticSiteSpec struct { - // The name. Must be unique across all components within the same app. - Name string `json:"name"` - Git *GitSourceSpec `json:"git,omitempty"` - GitHub *GitHubSourceSpec `json:"github,omitempty"` - GitLab *GitLabSourceSpec `json:"gitlab,omitempty"` - // The path to the Dockerfile relative to the root of the repo. If set, it will be used to build this component. Otherwise, App Platform will attempt to build it using buildpacks. - DockerfilePath string `json:"dockerfile_path,omitempty"` - // An optional build command to run while building this component from source. - BuildCommand string `json:"build_command,omitempty"` - // An optional path to the working directory to use for the build. For Dockerfile builds, this will be used as the build context. Must be relative to the root of the repo. - SourceDir string `json:"source_dir,omitempty"` - // An environment slug describing the type of this app. For a full list, please refer to [the product documentation](https://www.digitalocean.com/docs/app-platform/). - EnvironmentSlug string `json:"environment_slug,omitempty"` - // An optional path to where the built assets will be located, relative to the build context. If not set, App Platform will automatically scan for these directory names: `_static`, `dist`, `public`, `build`. - OutputDir string `json:"output_dir,omitempty"` - IndexDocument string `json:"index_document,omitempty"` - // The name of the error document to use when serving this static site. Default: 404.html. If no such file exists within the built assets, App Platform will supply one. - ErrorDocument string `json:"error_document,omitempty"` - // A list of environment variables made available to the component. - Envs []*AppVariableDefinition `json:"envs,omitempty"` - // (Deprecated) A list of HTTP routes that should be routed to this component. - Routes []*AppRouteSpec `json:"routes,omitempty"` - CORS *AppCORSPolicy `json:"cors,omitempty"` - // The name of the document to use as the fallback for any requests to documents that are not found when serving this static site. Only 1 of `catchall_document` or `error_document` can be set. - CatchallDocument string `json:"catchall_document,omitempty"` -} - -// AppVariableDefinition struct for AppVariableDefinition -type AppVariableDefinition struct { - // The name - Key string `json:"key"` - // The value. If the type is `SECRET`, the value will be encrypted on first submission. On following submissions, the encrypted value should be used. - Value string `json:"value,omitempty"` - Scope AppVariableScope `json:"scope,omitempty"` - Type AppVariableType `json:"type,omitempty"` -} - -// AppWorkerSpec struct for AppWorkerSpec -type AppWorkerSpec struct { - // The name. Must be unique across all components within the same app. - Name string `json:"name"` - Git *GitSourceSpec `json:"git,omitempty"` - GitHub *GitHubSourceSpec `json:"github,omitempty"` - Image *ImageSourceSpec `json:"image,omitempty"` - GitLab *GitLabSourceSpec `json:"gitlab,omitempty"` - // The path to the Dockerfile relative to the root of the repo. If set, it will be used to build this component. Otherwise, App Platform will attempt to build it using buildpacks. - DockerfilePath string `json:"dockerfile_path,omitempty"` - // An optional build command to run while building this component from source. - BuildCommand string `json:"build_command,omitempty"` - // An optional run command to override the component's default. - RunCommand string `json:"run_command,omitempty"` - // An optional path to the working directory to use for the build. For Dockerfile builds, this will be used as the build context. Must be relative to the root of the repo. - SourceDir string `json:"source_dir,omitempty"` - // An environment slug describing the type of this app. For a full list, please refer to [the product documentation](https://www.digitalocean.com/docs/app-platform/). - EnvironmentSlug string `json:"environment_slug,omitempty"` - // A list of environment variables made available to the component. - Envs []*AppVariableDefinition `json:"envs,omitempty"` - // The instance size to use for this component. - InstanceSizeSlug string `json:"instance_size_slug,omitempty"` - InstanceCount int64 `json:"instance_count,omitempty"` - Autoscaling *AppAutoscalingSpec `json:"autoscaling,omitempty"` - // A list of configured alerts which apply to the component. - Alerts []*AppAlertSpec `json:"alerts,omitempty"` - // A list of configured log forwarding destinations. - LogDestinations []*AppLogDestinationSpec `json:"log_destinations,omitempty"` - Termination *AppWorkerSpecTermination `json:"termination,omitempty"` -} - -// AppWorkerSpecTermination struct for AppWorkerSpecTermination -type AppWorkerSpecTermination struct { - // The number of seconds to wait between sending a TERM signal to a container and issuing a KILL which causes immediate shutdown. Default: 120, Minimum 1, Maximum 600. - GracePeriodSeconds int32 `json:"grace_period_seconds,omitempty"` -} - -// Buildpack struct for Buildpack -type Buildpack struct { - // The ID of the buildpack. - ID string `json:"id,omitempty"` - // Full semver version string. - Version string `json:"version,omitempty"` - // The major version line that the buildpack is pinned to. Example: a value of `1` indicates that the buildpack is pinned to versions `>=1.0.0 and <2.0.0`. - MajorVersion int32 `json:"major_version,omitempty"` - // Indicates whether the buildpack is on the latest major version line available. - Latest bool `json:"latest,omitempty"` - // A human friendly name. - Name string `json:"name,omitempty"` - // A description of the buildpack's purpose and steps performed at build time. - Description []string `json:"description,omitempty"` - // A link to the buildpack's documentation. - DocsLink string `json:"docs_link,omitempty"` -} - -// DeploymentCauseDetailsAutoscalerAction struct for DeploymentCauseDetailsAutoscalerAction -type DeploymentCauseDetailsAutoscalerAction struct { - // Marker for the deployment being autoscaled. Necessary because the generation tooling can't handle empty messages. - Autoscaled bool `json:"autoscaled,omitempty"` -} - -// DeploymentCauseDetailsDigitalOceanUser struct for DeploymentCauseDetailsDigitalOceanUser -type DeploymentCauseDetailsDigitalOceanUser struct { - UUID string `json:"uuid,omitempty"` - Email string `json:"email,omitempty"` - FullName string `json:"full_name,omitempty"` -} - -// DeploymentCauseDetailsDigitalOceanUserAction struct for DeploymentCauseDetailsDigitalOceanUserAction -type DeploymentCauseDetailsDigitalOceanUserAction struct { - User *DeploymentCauseDetailsDigitalOceanUser `json:"user,omitempty"` - Name DeploymentCauseDetailsDigitalOceanUserActionName `json:"name,omitempty"` -} - -// DeploymentCauseDetailsDOCRPush struct for DeploymentCauseDetailsDOCRPush -type DeploymentCauseDetailsDOCRPush struct { - // The registry name. - Registry string `json:"registry,omitempty"` - // The repository name. - Repository string `json:"repository,omitempty"` - // The repository tag. - Tag string `json:"tag,omitempty"` - // OCI Image digest. - ImageDigest string `json:"image_digest,omitempty"` -} - -// DeploymentCauseDetailsGitPush struct for DeploymentCauseDetailsGitPush -type DeploymentCauseDetailsGitPush struct { - GitHub *GitHubSourceSpec `json:"github,omitempty"` - GitLab *GitLabSourceSpec `json:"gitlab,omitempty"` - Username string `json:"username,omitempty"` - CommitAuthor string `json:"commit_author,omitempty"` - CommitSHA string `json:"commit_sha,omitempty"` - CommitMessage string `json:"commit_message,omitempty"` -} - -// AppCORSPolicy struct for AppCORSPolicy -type AppCORSPolicy struct { - // The set of allowed CORS origins. This configures the Access-Control-Allow-Origin header. - AllowOrigins []*AppStringMatch `json:"allow_origins,omitempty"` - // The set of allowed HTTP methods. This configures the Access-Control-Allow-Methods header. - AllowMethods []string `json:"allow_methods,omitempty"` - // The set of allowed HTTP request headers. This configures the Access-Control-Allow-Headers header. - AllowHeaders []string `json:"allow_headers,omitempty"` - // The set of HTTP response headers that browsers are allowed to access. This configures the Access-Control-Expose-Headers header. - ExposeHeaders []string `json:"expose_headers,omitempty"` - // An optional duration specifying how long browsers can cache the results of a preflight request. This configures the Access-Control-Max-Age header. Example: `5h30m`. - MaxAge string `json:"max_age,omitempty"` - // Whether browsers should expose the response to the client-side JavaScript code when the request's credentials mode is `include`. This configures the Access-Control-Allow-Credentials header. - AllowCredentials bool `json:"allow_credentials,omitempty"` -} - -// AppCreateRequest struct for AppCreateRequest -type AppCreateRequest struct { - Spec *AppSpec `json:"spec"` - // Optional. The UUID of the project the app should be assigned. - ProjectID string `json:"project_id,omitempty"` -} - -// DeployTemplate struct for DeployTemplate -type DeployTemplate struct { - Spec *AppSpec `json:"spec,omitempty"` -} - -// Deployment struct for Deployment -type Deployment struct { - ID string `json:"id,omitempty"` - Spec *AppSpec `json:"spec,omitempty"` - Services []*DeploymentService `json:"services,omitempty"` - StaticSites []*DeploymentStaticSite `json:"static_sites,omitempty"` - Workers []*DeploymentWorker `json:"workers,omitempty"` - Jobs []*DeploymentJob `json:"jobs,omitempty"` - Functions []*DeploymentFunctions `json:"functions,omitempty"` - PhaseLastUpdatedAt time.Time `json:"phase_last_updated_at,omitempty"` - CreatedAt time.Time `json:"created_at,omitempty"` - UpdatedAt time.Time `json:"updated_at,omitempty"` - Cause string `json:"cause,omitempty"` - ClonedFrom string `json:"cloned_from,omitempty"` - Progress *DeploymentProgress `json:"progress,omitempty"` - Phase DeploymentPhase `json:"phase,omitempty"` - TierSlug string `json:"tier_slug,omitempty"` - PreviousDeploymentID string `json:"previous_deployment_id,omitempty"` - CauseDetails *DeploymentCauseDetails `json:"cause_details,omitempty"` - LoadBalancerID string `json:"load_balancer_id,omitempty"` - Timing *DeploymentTiming `json:"timing,omitempty"` -} - -// DeploymentCauseDetails struct for DeploymentCauseDetails -type DeploymentCauseDetails struct { - DigitalOceanUserAction *DeploymentCauseDetailsDigitalOceanUserAction `json:"digitalocean_user_action,omitempty"` - GitPush *DeploymentCauseDetailsGitPush `json:"git_push,omitempty"` - DOCRPush *DeploymentCauseDetailsDOCRPush `json:"docr_push,omitempty"` - Internal bool `json:"internal,omitempty"` - Autoscaler *DeploymentCauseDetailsAutoscalerAction `json:"autoscaler,omitempty"` - Type DeploymentCauseDetailsType `json:"type,omitempty"` -} - -// DeploymentCauseDetailsType - MANUAL: A deployment that was manually created - DEPLOY_ON_PUSH: A deployment that was automatically created by a Deploy on Push hook - MAINTENANCE: A deployment created for App Platform maintenance - MANUAL_ROLLBACK: A rollback deployment that was manually created - AUTO_ROLLBACK: An automatic rollback deployment created as a result of a previous deployment failing - UPDATE_DATABASE_TRUSTED_SOURCES: A deployment that was created due to an update in database trusted sources. - AUTOSCALED: A deployment that was created due to an autoscaler update. -type DeploymentCauseDetailsType string - -// List of DeploymentCauseDetailsType -const ( - DeploymentCauseDetailsType_Unknown DeploymentCauseDetailsType = "UNKNOWN" - DeploymentCauseDetailsType_Manual DeploymentCauseDetailsType = "MANUAL" - DeploymentCauseDetailsType_DeployOnPush DeploymentCauseDetailsType = "DEPLOY_ON_PUSH" - DeploymentCauseDetailsType_Maintenance DeploymentCauseDetailsType = "MAINTENANCE" - DeploymentCauseDetailsType_ManualRollback DeploymentCauseDetailsType = "MANUAL_ROLLBACK" - DeploymentCauseDetailsType_AutoRollback DeploymentCauseDetailsType = "AUTO_ROLLBACK" - DeploymentCauseDetailsType_UpdateDatabaseTrustedSources DeploymentCauseDetailsType = "UPDATE_DATABASE_TRUSTED_SOURCES" - DeploymentCauseDetailsType_Autoscaled DeploymentCauseDetailsType = "AUTOSCALED" -) - -// DeploymentFunctions struct for DeploymentFunctions -type DeploymentFunctions struct { - Name string `json:"name,omitempty"` - // The commit hash of the repository that was used to build this functions component. - SourceCommitHash string `json:"source_commit_hash,omitempty"` - // The namespace where the functions are deployed. - Namespace string `json:"namespace,omitempty"` -} - -// DeploymentJob struct for DeploymentJob -type DeploymentJob struct { - Name string `json:"name,omitempty"` - SourceCommitHash string `json:"source_commit_hash,omitempty"` - // The list of resolved buildpacks used for a given deployment component. - Buildpacks []*Buildpack `json:"buildpacks,omitempty"` -} - -// DeploymentPhase the model 'DeploymentPhase' -type DeploymentPhase string - -// List of DeploymentPhase -const ( - DeploymentPhase_Unknown DeploymentPhase = "UNKNOWN" - DeploymentPhase_PendingBuild DeploymentPhase = "PENDING_BUILD" - DeploymentPhase_Building DeploymentPhase = "BUILDING" - DeploymentPhase_PendingDeploy DeploymentPhase = "PENDING_DEPLOY" - DeploymentPhase_Deploying DeploymentPhase = "DEPLOYING" - DeploymentPhase_Active DeploymentPhase = "ACTIVE" - DeploymentPhase_Superseded DeploymentPhase = "SUPERSEDED" - DeploymentPhase_Error DeploymentPhase = "ERROR" - DeploymentPhase_Canceled DeploymentPhase = "CANCELED" -) - -// DeploymentProgress struct for DeploymentProgress -type DeploymentProgress struct { - PendingSteps int32 `json:"pending_steps,omitempty"` - RunningSteps int32 `json:"running_steps,omitempty"` - SuccessSteps int32 `json:"success_steps,omitempty"` - ErrorSteps int32 `json:"error_steps,omitempty"` - TotalSteps int32 `json:"total_steps,omitempty"` - Steps []*DeploymentProgressStep `json:"steps,omitempty"` - SummarySteps []*DeploymentProgressStep `json:"summary_steps,omitempty"` -} - -// DeploymentProgressStep struct for DeploymentProgressStep -type DeploymentProgressStep struct { - Name string `json:"name,omitempty"` - Status DeploymentProgressStepStatus `json:"status,omitempty"` - Steps []*DeploymentProgressStep `json:"steps,omitempty"` - StartedAt time.Time `json:"started_at,omitempty"` - EndedAt time.Time `json:"ended_at,omitempty"` - Reason *DeploymentProgressStepReason `json:"reason,omitempty"` - ComponentName string `json:"component_name,omitempty"` - // The base of a human-readable description of the step intended to be combined with the component name for presentation. For example: `message_base` = \"Building service\" `component_name` = \"api\" - MessageBase string `json:"message_base,omitempty"` -} - -// DeploymentProgressStepReason struct for DeploymentProgressStepReason -type DeploymentProgressStepReason struct { - Code string `json:"code,omitempty"` - Message string `json:"message,omitempty"` -} - -// DeploymentProgressStepStatus the model 'DeploymentProgressStepStatus' -type DeploymentProgressStepStatus string - -// List of DeploymentProgressStepStatus -const ( - DeploymentProgressStepStatus_Unknown DeploymentProgressStepStatus = "UNKNOWN" - DeploymentProgressStepStatus_Pending DeploymentProgressStepStatus = "PENDING" - DeploymentProgressStepStatus_Running DeploymentProgressStepStatus = "RUNNING" - DeploymentProgressStepStatus_Error DeploymentProgressStepStatus = "ERROR" - DeploymentProgressStepStatus_Success DeploymentProgressStepStatus = "SUCCESS" -) - -// DeploymentService struct for DeploymentService -type DeploymentService struct { - Name string `json:"name,omitempty"` - SourceCommitHash string `json:"source_commit_hash,omitempty"` - // The list of resolved buildpacks used for a given deployment component. - Buildpacks []*Buildpack `json:"buildpacks,omitempty"` -} - -// DeploymentStaticSite struct for DeploymentStaticSite -type DeploymentStaticSite struct { - Name string `json:"name,omitempty"` - SourceCommitHash string `json:"source_commit_hash,omitempty"` - // The list of resolved buildpacks used for a given deployment component. - Buildpacks []*Buildpack `json:"buildpacks,omitempty"` -} - -// DeploymentTiming struct for DeploymentTiming -type DeploymentTiming struct { - // Pending describes the time spent waiting for the build to begin. This may include delays related to build concurrency limits. - Pending string `json:"pending,omitempty"` - // BuildTotal describes total time between the start of the build and its completion. - BuildTotal string `json:"build_total,omitempty"` - // BuildBillable describes the time spent executing the build. As builds may run concurrently this may be greater than the build total. - BuildBillable string `json:"build_billable,omitempty"` - // Components breaks down billable build time by component. - Components []*DeploymentTimingComponent `json:"components,omitempty"` - // DatabaseProvision describes the time spent creating databases. - DatabaseProvision string `json:"database_provision,omitempty"` - // Deploying is time spent starting containers and waiting for health checks to pass. - Deploying string `json:"deploying,omitempty"` -} - -// DeploymentTimingComponent struct for DeploymentTimingComponent -type DeploymentTimingComponent struct { - // Name of the component. - Name string `json:"name,omitempty"` - // BuildBillable is the billable build time for this component. - BuildBillable string `json:"build_billable,omitempty"` -} - -// DeploymentWorker struct for DeploymentWorker -type DeploymentWorker struct { - Name string `json:"name,omitempty"` - SourceCommitHash string `json:"source_commit_hash,omitempty"` - // The list of resolved buildpacks used for a given deployment component. - Buildpacks []*Buildpack `json:"buildpacks,omitempty"` -} - -// DetectRequest struct for DetectRequest -type DetectRequest struct { - Git *GitSourceSpec `json:"git,omitempty"` - GitHub *GitHubSourceSpec `json:"github,omitempty"` - GitLab *GitLabSourceSpec `json:"gitlab,omitempty"` - // An optional commit hash to use instead of the branch specified in the source spec. - CommitSHA string `json:"commit_sha,omitempty"` - // An optional path to the working directory for the detection process. - SourceDir string `json:"source_dir,omitempty"` -} - -// DetectResponse struct for DetectResponse -type DetectResponse struct { - Components []*DetectResponseComponent `json:"components,omitempty"` - Template *DeployTemplate `json:"template,omitempty"` - TemplateFound bool `json:"template_found,omitempty"` - TemplateValid bool `json:"template_valid,omitempty"` - TemplateError string `json:"template_error,omitempty"` -} - -// DetectResponseComponent struct for DetectResponseComponent -type DetectResponseComponent struct { - Strategy DetectResponseType `json:"strategy,omitempty"` - Types []string `json:"types,omitempty"` - // A list of Dockerfiles that were found for this component. The recommendation is to use the first Dockerfile. - Dockerfiles []string `json:"dockerfiles,omitempty"` - BuildCommand string `json:"build_command,omitempty"` - RunCommand string `json:"run_command,omitempty"` - EnvironmentSlug string `json:"environment_slug,omitempty"` - // A list of HTTP ports that this component may listen on. The recommendation is to use the last port in the list. - HTTPPorts []int64 `json:"http_ports,omitempty"` - EnvVars []*AppVariableDefinition `json:"env_vars,omitempty"` - // List of serverless packages detected. - ServerlessPackages []*DetectResponseServerlessPackage `json:"serverless_packages,omitempty"` - SourceDir string `json:"source_dir,omitempty"` - // The list of detected buildpacks that will be used for the component build. - Buildpacks []*Buildpack `json:"buildpacks,omitempty"` -} - -// DetectResponseServerlessFunction struct for DetectResponseServerlessFunction -type DetectResponseServerlessFunction struct { - // Name of the function. - Name string `json:"name,omitempty"` - // Package that the function belongs to. - Package string `json:"package,omitempty"` - // Runtime detected for the function. - Runtime string `json:"runtime,omitempty"` - Limits *DetectResponseServerlessFunctionLimits `json:"limits,omitempty"` -} - -// DetectResponseServerlessFunctionLimits struct for DetectResponseServerlessFunctionLimits -type DetectResponseServerlessFunctionLimits struct { - // Timeout for function invocation in milliseconds. - Timeout string `json:"timeout,omitempty"` - // Max memory allocation for function invocation in megabytes. - Memory string `json:"memory,omitempty"` - // Max log size usage for function invocation in kilobytes. - Logs string `json:"logs,omitempty"` -} - -// DetectResponseServerlessPackage struct for DetectResponseServerlessPackage -type DetectResponseServerlessPackage struct { - // Name of the serverless package. - Name string `json:"name,omitempty"` - // List of functions detected in the serverless package. - Functions []*DetectResponseServerlessFunction `json:"functions,omitempty"` -} - -// DetectResponseType the model 'DetectResponseType' -type DetectResponseType string - -// List of DetectResponseType -const ( - DetectResponseType_Unspecified DetectResponseType = "UNSPECIFIED" - DetectResponseType_Dockerfile DetectResponseType = "DOCKERFILE" - DetectResponseType_Buildpack DetectResponseType = "BUILDPACK" - DetectResponseType_HTML DetectResponseType = "HTML" - DetectResponseType_Serverless DetectResponseType = "SERVERLESS" -) - -// DeploymentCauseDetailsDigitalOceanUserActionName the model 'CauseDetailsDigitalOceanUserActionName' -type DeploymentCauseDetailsDigitalOceanUserActionName string - -// List of DeploymentCauseDetailsDigitalOceanUserActionName -const ( - DeploymentCauseDetailsDigitalOceanUserActionName_Unknown DeploymentCauseDetailsDigitalOceanUserActionName = "UNKNOWN" - DeploymentCauseDetailsDigitalOceanUserActionName_CreateDeployment DeploymentCauseDetailsDigitalOceanUserActionName = "CREATE_DEPLOYMENT" - DeploymentCauseDetailsDigitalOceanUserActionName_UpdateSpec DeploymentCauseDetailsDigitalOceanUserActionName = "UPDATE_SPEC" - DeploymentCauseDetailsDigitalOceanUserActionName_ResetDatabasePassword DeploymentCauseDetailsDigitalOceanUserActionName = "RESET_DATABASE_PASSWORD" - DeploymentCauseDetailsDigitalOceanUserActionName_RollbackApp DeploymentCauseDetailsDigitalOceanUserActionName = "ROLLBACK_APP" - DeploymentCauseDetailsDigitalOceanUserActionName_RevertAppRollback DeploymentCauseDetailsDigitalOceanUserActionName = "REVERT_APP_ROLLBACK" - DeploymentCauseDetailsDigitalOceanUserActionName_UpgradeBuildpack DeploymentCauseDetailsDigitalOceanUserActionName = "UPGRADE_BUILDPACK" -) - -// AppDomain struct for AppDomain -type AppDomain struct { - ID string `json:"id,omitempty"` - Spec *AppDomainSpec `json:"spec,omitempty"` - Phase AppDomainPhase `json:"phase,omitempty"` - Progress *AppDomainProgress `json:"progress,omitempty"` - Validation *AppDomainValidation `json:"validation,omitempty"` - Validations []*AppDomainValidation `json:"validations,omitempty"` - RotateValidationRecords bool `json:"rotate_validation_records,omitempty"` - CertificateExpiresAt time.Time `json:"certificate_expires_at,omitempty"` -} - -// AppDomainPhase the model 'AppDomainPhase' -type AppDomainPhase string - -// List of AppDomainPhase -const ( - AppJobSpecKindPHASE_Unknown AppDomainPhase = "UNKNOWN" - AppJobSpecKindPHASE_Pending AppDomainPhase = "PENDING" - AppJobSpecKindPHASE_Configuring AppDomainPhase = "CONFIGURING" - AppJobSpecKindPHASE_Active AppDomainPhase = "ACTIVE" - AppJobSpecKindPHASE_Error AppDomainPhase = "ERROR" -) - -// AppDomainProgress struct for AppDomainProgress -type AppDomainProgress struct { - Steps []*AppDomainProgressStep `json:"steps,omitempty"` -} - -// AppDomainProgressStep struct for AppDomainProgressStep -type AppDomainProgressStep struct { - Name string `json:"name,omitempty"` - Status AppDomainProgressStepStatus `json:"status,omitempty"` - Steps []*AppDomainProgressStep `json:"steps,omitempty"` - StartedAt time.Time `json:"started_at,omitempty"` - EndedAt time.Time `json:"ended_at,omitempty"` - Reason *AppDomainProgressStepReason `json:"reason,omitempty"` -} - -// AppDomainProgressStepReason struct for AppDomainProgressStepReason -type AppDomainProgressStepReason struct { - Code string `json:"code,omitempty"` - Message string `json:"message,omitempty"` -} - -// AppDomainProgressStepStatus the model 'AppDomainProgressStepStatus' -type AppDomainProgressStepStatus string - -// List of AppDomainProgressStepStatus -const ( - AppJobSpecKindProgressStepStatus_Unknown AppDomainProgressStepStatus = "UNKNOWN" - AppJobSpecKindProgressStepStatus_Pending AppDomainProgressStepStatus = "PENDING" - AppJobSpecKindProgressStepStatus_Running AppDomainProgressStepStatus = "RUNNING" - AppJobSpecKindProgressStepStatus_Error AppDomainProgressStepStatus = "ERROR" - AppJobSpecKindProgressStepStatus_Success AppDomainProgressStepStatus = "SUCCESS" -) - -// AppDomainValidation struct for AppDomainValidation -type AppDomainValidation struct { - TXTName string `json:"txt_name,omitempty"` - TXTValue string `json:"txt_value,omitempty"` -} - -// GetAppDatabaseConnectionDetailsResponse struct for GetAppDatabaseConnectionDetailsResponse -type GetAppDatabaseConnectionDetailsResponse struct { - ConnectionDetails []*GetDatabaseConnectionDetailsResponse `json:"connection_details,omitempty"` -} - -// GetDatabaseConnectionDetailsResponse struct for GetDatabaseConnectionDetailsResponse -type GetDatabaseConnectionDetailsResponse struct { - Host string `json:"host,omitempty"` - Port int64 `json:"port,omitempty"` - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - DatabaseName string `json:"database_name,omitempty"` - SslMode string `json:"ssl_mode,omitempty"` - DatabaseURL string `json:"database_url,omitempty"` - ComponentName string `json:"component_name,omitempty"` - Pools []*GetDatabaseConnectionDetailsResponsePool `json:"pools,omitempty"` -} - -// GetDatabaseConnectionDetailsResponsePool struct for GetDatabaseConnectionDetailsResponsePool -type GetDatabaseConnectionDetailsResponsePool struct { - PoolName string `json:"pool_name,omitempty"` - Host string `json:"host,omitempty"` - Port int64 `json:"port,omitempty"` - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - DatabaseName string `json:"database_name,omitempty"` - SslMode string `json:"ssl_mode,omitempty"` - DatabaseURL string `json:"database_url,omitempty"` -} - -// GetDatabaseTrustedSourceResponse struct for GetDatabaseTrustedSourceResponse -type GetDatabaseTrustedSourceResponse struct { - IsEnabled bool `json:"is_enabled,omitempty"` -} - -// GitHubSourceSpec struct for GitHubSourceSpec -type GitHubSourceSpec struct { - Repo string `json:"repo,omitempty"` - Branch string `json:"branch,omitempty"` - DeployOnPush bool `json:"deploy_on_push,omitempty"` -} - -// GitLabSourceSpec struct for GitLabSourceSpec -type GitLabSourceSpec struct { - Repo string `json:"repo,omitempty"` - Branch string `json:"branch,omitempty"` - DeployOnPush bool `json:"deploy_on_push,omitempty"` -} - -// GitSourceSpec struct for GitSourceSpec -type GitSourceSpec struct { - RepoCloneURL string `json:"repo_clone_url,omitempty"` - Branch string `json:"branch,omitempty"` -} - -// ImageSourceSpec struct for ImageSourceSpec -type ImageSourceSpec struct { - RegistryType ImageSourceSpecRegistryType `json:"registry_type,omitempty"` - // The registry name. Must be left empty for the `DOCR` registry type. Required for the `DOCKER_HUB` registry type. - Registry string `json:"registry,omitempty"` - // The repository name. - Repository string `json:"repository,omitempty"` - // The repository tag. Defaults to `latest` if not provided and no digest is provided. Cannot be specified if digest is provided. - Tag string `json:"tag,omitempty"` - // The image digest. Cannot be specified if tag is provided. - Digest string `json:"digest,omitempty"` - // The credentials to be able to pull the image. The value will be encrypted on first submission. On following submissions, the encrypted value should be used. - \"$username:$access_token\" for registries of type `DOCKER_HUB`. - \"$username:$access_token\" for registries of type `GHCR`. - RegistryCredentials string `json:"registry_credentials,omitempty"` - DeployOnPush *ImageSourceSpecDeployOnPush `json:"deploy_on_push,omitempty"` -} - -// ImageSourceSpecDeployOnPush struct for ImageSourceSpecDeployOnPush -type ImageSourceSpecDeployOnPush struct { - // Automatically deploy new images. Only for DOCR images. Can't be enabled when a specific digest is specified. - Enabled bool `json:"enabled,omitempty"` -} - -// ImageSourceSpecRegistryType - DOCR: The DigitalOcean container registry type. - DOCKER_HUB: The DockerHub container registry type. - GHCR: The GitHub container registry type. -type ImageSourceSpecRegistryType string - -// List of ImageSourceSpecRegistryType -const ( - ImageSourceSpecRegistryType_Unspecified ImageSourceSpecRegistryType = "UNSPECIFIED" - ImageSourceSpecRegistryType_DOCR ImageSourceSpecRegistryType = "DOCR" - ImageSourceSpecRegistryType_DockerHub ImageSourceSpecRegistryType = "DOCKER_HUB" - ImageSourceSpecRegistryType_Ghcr ImageSourceSpecRegistryType = "GHCR" -) - -// AppInstanceSize struct for AppInstanceSize -type AppInstanceSize struct { - Name string `json:"name,omitempty"` - Slug string `json:"slug,omitempty"` - CPUType AppInstanceSizeCPUType `json:"cpu_type,omitempty"` - CPUs string `json:"cpus,omitempty"` - MemoryBytes string `json:"memory_bytes,omitempty"` - USDPerMonth string `json:"usd_per_month,omitempty"` - USDPerSecond string `json:"usd_per_second,omitempty"` - TierSlug string `json:"tier_slug,omitempty"` - // (Deprecated) The slug of the corresponding upgradable instance size on the higher tier. - TierUpgradeTo string `json:"tier_upgrade_to,omitempty"` - // (Deprecated) The slug of the corresponding downgradable instance size on the lower tier. - TierDowngradeTo string `json:"tier_downgrade_to,omitempty"` - // Indicates if the tier instance size can enable autoscaling. - Scalable bool `json:"scalable,omitempty"` - // (Deprecated) Indicates if the tier instance size is in feature preview state. - FeaturePreview bool `json:"feature_preview,omitempty"` - // Indicates if the tier instance size allows more than one instance. - SingleInstanceOnly bool `json:"single_instance_only,omitempty"` - // Indicates if the tier instance size is intended for deprecation. - DeprecationIntent bool `json:"deprecation_intent,omitempty"` - // The bandwidth allowance in GiB for the tier instance size. - BandwidthAllowanceGib string `json:"bandwidth_allowance_gib,omitempty"` -} - -// AppInstanceSizeCPUType the model 'AppInstanceSizeCPUType' -type AppInstanceSizeCPUType string - -// List of AppInstanceSizeCPUType -const ( - AppInstanceSizeCPUType_Unspecified AppInstanceSizeCPUType = "UNSPECIFIED" - AppInstanceSizeCPUType_Shared AppInstanceSizeCPUType = "SHARED" - AppInstanceSizeCPUType_Dedicated AppInstanceSizeCPUType = "DEDICATED" -) - -// ListBuildpacksResponse struct for ListBuildpacksResponse -type ListBuildpacksResponse struct { - // List of the available buildpacks on App Platform. - Buildpacks []*Buildpack `json:"buildpacks,omitempty"` -} - -// OpenSearchBasicAuth Configure Username and/or Password for Basic authentication. -type OpenSearchBasicAuth struct { - // Username to authenticate with. Only required when `endpoint` is set. Defaults to `doadmin` when `cluster_name` is set. - User string `json:"user,omitempty"` - // Password for user defined in User. Is required when `endpoint` is set. Cannot be set if using a DigitalOcean DBaaS OpenSearch cluster. - Password string `json:"password,omitempty"` -} - -// AppProposeRequest struct for AppProposeRequest -type AppProposeRequest struct { - Spec *AppSpec `json:"spec"` - // An optional ID of an existing app. If set, the spec will be treated as a proposed update to the specified app. The existing app is not modified using this method. - AppID string `json:"app_id,omitempty"` -} - -// AppProposeResponse struct for AppProposeResponse -type AppProposeResponse struct { - // Deprecated. Please use app_is_starter instead. - AppIsStatic bool `json:"app_is_static,omitempty"` - // Indicates whether the app name is available. - AppNameAvailable bool `json:"app_name_available,omitempty"` - // If the app name is unavailable, this will be set to a suggested available name. - AppNameSuggestion string `json:"app_name_suggestion,omitempty"` - // Deprecated. Please use existing_starter_apps instead. - ExistingStaticApps string `json:"existing_static_apps,omitempty"` - // Deprecated. Please use max_free_starter_apps instead. - MaxFreeStaticApps string `json:"max_free_static_apps,omitempty"` - Spec *AppSpec `json:"spec,omitempty"` - // The monthly cost of the proposed app in USD. - AppCost float32 `json:"app_cost,omitempty"` - // (Deprecated) The monthly cost of the proposed app in USD using the next pricing plan tier. For example, if you propose an app that uses the Basic tier, the `app_tier_upgrade_cost` field displays the monthly cost of the app if it were to use the Professional tier. If the proposed app already uses the most expensive tier, the field is empty. - AppTierUpgradeCost float32 `json:"app_tier_upgrade_cost,omitempty"` - // (Deprecated) The monthly cost of the proposed app in USD using the previous pricing plan tier. For example, if you propose an app that uses the Professional tier, the `app_tier_downgrade_cost` field displays the monthly cost of the app if it were to use the Basic tier. If the proposed app already uses the lest expensive tier, the field is empty. - AppTierDowngradeCost float32 `json:"app_tier_downgrade_cost,omitempty"` - // The number of existing starter tier apps the account has. - ExistingStarterApps string `json:"existing_starter_apps,omitempty"` - // The maximum number of free starter apps the account can have. Any additional starter apps will be charged for. These include apps with only static sites, functions, and databases. - MaxFreeStarterApps string `json:"max_free_starter_apps,omitempty"` - // Indicates whether the app is a starter tier app. - AppIsStarter bool `json:"app_is_starter,omitempty"` -} - -// AppRegion struct for AppRegion -type AppRegion struct { - Slug string `json:"slug,omitempty"` - Label string `json:"label,omitempty"` - Flag string `json:"flag,omitempty"` - Continent string `json:"continent,omitempty"` - Disabled bool `json:"disabled,omitempty"` - DataCenters []string `json:"data_centers,omitempty"` - Reason string `json:"reason,omitempty"` - // Whether or not the region is presented as the default. - Default bool `json:"default,omitempty"` -} - -// ResetDatabasePasswordRequest struct for ResetDatabasePasswordRequest -type ResetDatabasePasswordRequest struct { - AppID string `json:"app_id,omitempty"` - ComponentName string `json:"component_name,omitempty"` -} - -// ResetDatabasePasswordResponse struct for ResetDatabasePasswordResponse -type ResetDatabasePasswordResponse struct { - Deployment *Deployment `json:"deployment,omitempty"` -} - -// AppStringMatch struct for AppStringMatch -type AppStringMatch struct { - // Exact string match. Only 1 of `exact`, `prefix`, or `regex` must be set. - Exact string `json:"exact,omitempty"` - // Prefix-based match. Only 1 of `exact`, `prefix`, or `regex` must be set. - Prefix string `json:"prefix,omitempty"` - Regex string `json:"regex,omitempty"` -} - -// AppTier struct for AppTier -type AppTier struct { - Name string `json:"name,omitempty"` - Slug string `json:"slug,omitempty"` - EgressBandwidthBytes string `json:"egress_bandwidth_bytes,omitempty"` - BuildSeconds string `json:"build_seconds,omitempty"` -} - -// ToggleDatabaseTrustedSourceRequest struct for ToggleDatabaseTrustedSourceRequest -type ToggleDatabaseTrustedSourceRequest struct { - AppID string `json:"app_id,omitempty"` - ComponentName string `json:"component_name,omitempty"` - Enable bool `json:"enable,omitempty"` -} - -// ToggleDatabaseTrustedSourceResponse struct for ToggleDatabaseTrustedSourceResponse -type ToggleDatabaseTrustedSourceResponse struct { - IsEnabled bool `json:"is_enabled,omitempty"` -} - -// UpgradeBuildpackResponse struct for UpgradeBuildpackResponse -type UpgradeBuildpackResponse struct { - // The components that were affected by the upgrade. - AffectedComponents []string `json:"affected_components,omitempty"` - Deployment *Deployment `json:"deployment,omitempty"` -} - -// AppVariableScope the model 'AppVariableScope' -type AppVariableScope string - -// List of AppVariableScope -const ( - AppVariableScope_Unset AppVariableScope = "UNSET" - AppVariableScope_RunTime AppVariableScope = "RUN_TIME" - AppVariableScope_BuildTime AppVariableScope = "BUILD_TIME" - AppVariableScope_RunAndBuildTime AppVariableScope = "RUN_AND_BUILD_TIME" -) - -// AppVariableType the model 'AppVariableType' -type AppVariableType string - -// List of AppVariableType -const ( - AppVariableType_General AppVariableType = "GENERAL" - AppVariableType_Secret AppVariableType = "SECRET" -) diff --git a/vendor/github.com/digitalocean/godo/apps.go b/vendor/github.com/digitalocean/godo/apps.go deleted file mode 100644 index cd72f74..0000000 --- a/vendor/github.com/digitalocean/godo/apps.go +++ /dev/null @@ -1,800 +0,0 @@ -package godo - -import ( - "context" - "errors" - "fmt" - "net/http" -) - -const ( - appsBasePath = "/v2/apps" -) - -// AppLogType is the type of app logs. -type AppLogType string - -const ( - // AppLogTypeBuild represents build logs. - AppLogTypeBuild AppLogType = "BUILD" - // AppLogTypeDeploy represents deploy logs. - AppLogTypeDeploy AppLogType = "DEPLOY" - // AppLogTypeRun represents run logs. - AppLogTypeRun AppLogType = "RUN" - // AppLogTypeRunRestarted represents logs of crashed/restarted instances during runtime. - AppLogTypeRunRestarted AppLogType = "RUN_RESTARTED" -) - -// AppsService is an interface for interfacing with the App Platform endpoints -// of the DigitalOcean API. -type AppsService interface { - Create(ctx context.Context, create *AppCreateRequest) (*App, *Response, error) - Get(ctx context.Context, appID string) (*App, *Response, error) - List(ctx context.Context, opts *ListOptions) ([]*App, *Response, error) - Update(ctx context.Context, appID string, update *AppUpdateRequest) (*App, *Response, error) - Delete(ctx context.Context, appID string) (*Response, error) - Propose(ctx context.Context, propose *AppProposeRequest) (*AppProposeResponse, *Response, error) - - GetDeployment(ctx context.Context, appID, deploymentID string) (*Deployment, *Response, error) - ListDeployments(ctx context.Context, appID string, opts *ListOptions) ([]*Deployment, *Response, error) - CreateDeployment(ctx context.Context, appID string, create ...*DeploymentCreateRequest) (*Deployment, *Response, error) - - GetLogs(ctx context.Context, appID, deploymentID, component string, logType AppLogType, follow bool, tailLines int) (*AppLogs, *Response, error) - - ListRegions(ctx context.Context) ([]*AppRegion, *Response, error) - - ListTiers(ctx context.Context) ([]*AppTier, *Response, error) - GetTier(ctx context.Context, slug string) (*AppTier, *Response, error) - - ListInstanceSizes(ctx context.Context) ([]*AppInstanceSize, *Response, error) - GetInstanceSize(ctx context.Context, slug string) (*AppInstanceSize, *Response, error) - - ListAlerts(ctx context.Context, appID string) ([]*AppAlert, *Response, error) - UpdateAlertDestinations(ctx context.Context, appID, alertID string, update *AlertDestinationUpdateRequest) (*AppAlert, *Response, error) - - Detect(ctx context.Context, detect *DetectRequest) (*DetectResponse, *Response, error) - - ListBuildpacks(ctx context.Context) ([]*Buildpack, *Response, error) - UpgradeBuildpack(ctx context.Context, appID string, opts UpgradeBuildpackOptions) (*UpgradeBuildpackResponse, *Response, error) - - GetAppDatabaseConnectionDetails(ctx context.Context, appID string) ([]*GetDatabaseConnectionDetailsResponse, *Response, error) - ResetDatabasePassword(ctx context.Context, appID string, component string) (*Deployment, *Response, error) - ToggleDatabaseTrustedSource( - ctx context.Context, - appID string, - component string, - opts ToggleDatabaseTrustedSourceOptions, - ) ( - *ToggleDatabaseTrustedSourceResponse, - *Response, - error, - ) -} - -// AppLogs represent app logs. -type AppLogs struct { - LiveURL string `json:"live_url"` - HistoricURLs []string `json:"historic_urls"` -} - -// AppUpdateRequest represents a request to update an app. -type AppUpdateRequest struct { - Spec *AppSpec `json:"spec"` -} - -// DeploymentCreateRequest represents a request to create a deployment. -type DeploymentCreateRequest struct { - ForceBuild bool `json:"force_build"` -} - -// AlertDestinationUpdateRequest represents a request to update alert destinations. -type AlertDestinationUpdateRequest struct { - Emails []string `json:"emails"` - SlackWebhooks []*AppAlertSlackWebhook `json:"slack_webhooks"` -} - -// UpgradeBuildpackOptions struct for UpgradeBuildpackOptions -type UpgradeBuildpackOptions struct { - // The ID of the buildpack to upgrade. - BuildpackID string `json:"buildpack_id,omitempty"` - // The Major Version to upgrade the buildpack to. If omitted, the latest available major version will be used. - MajorVersion int32 `json:"major_version,omitempty"` - // Whether or not to trigger a deployment for the app after upgrading the buildpack. - TriggerDeployment bool `json:"trigger_deployment,omitempty"` -} - -// ToggleDatabaseTrustedSourceOptions provides optional parameters for ToggleDatabaseTrustedSource. -type ToggleDatabaseTrustedSourceOptions struct { - // Enable, if true, indicates the database should enable the trusted sources firewall. - Enable bool -} - -type appRoot struct { - App *App `json:"app"` -} - -type appsRoot struct { - Apps []*App `json:"apps"` - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -type deploymentRoot struct { - Deployment *Deployment `json:"deployment"` -} - -type deploymentsRoot struct { - Deployments []*Deployment `json:"deployments"` - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -type appTierRoot struct { - Tier *AppTier `json:"tier"` -} - -type appTiersRoot struct { - Tiers []*AppTier `json:"tiers"` -} - -type instanceSizeRoot struct { - InstanceSize *AppInstanceSize `json:"instance_size"` -} - -type instanceSizesRoot struct { - InstanceSizes []*AppInstanceSize `json:"instance_sizes"` -} - -type appRegionsRoot struct { - Regions []*AppRegion `json:"regions"` -} - -type appAlertsRoot struct { - Alerts []*AppAlert `json:"alerts"` -} - -type appAlertRoot struct { - Alert *AppAlert `json:"alert"` -} - -type buildpacksRoot struct { - Buildpacks []*Buildpack `json:"buildpacks,omitempty"` -} - -// AppsServiceOp handles communication with Apps methods of the DigitalOcean API. -type AppsServiceOp struct { - client *Client -} - -// URN returns a URN identifier for the app -func (a App) URN() string { - return ToURN("app", a.ID) -} - -// Create an app. -func (s *AppsServiceOp) Create(ctx context.Context, create *AppCreateRequest) (*App, *Response, error) { - path := appsBasePath - req, err := s.client.NewRequest(ctx, http.MethodPost, path, create) - if err != nil { - return nil, nil, err - } - - root := new(appRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.App, resp, nil -} - -// Get an app. -func (s *AppsServiceOp) Get(ctx context.Context, appID string) (*App, *Response, error) { - path := fmt.Sprintf("%s/%s", appsBasePath, appID) - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(appRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.App, resp, nil -} - -// List apps. -func (s *AppsServiceOp) List(ctx context.Context, opts *ListOptions) ([]*App, *Response, error) { - path := appsBasePath - path, err := addOptions(path, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(appsRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - if l := root.Links; l != nil { - resp.Links = l - } - - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.Apps, resp, nil -} - -// Update an app. -func (s *AppsServiceOp) Update(ctx context.Context, appID string, update *AppUpdateRequest) (*App, *Response, error) { - path := fmt.Sprintf("%s/%s", appsBasePath, appID) - req, err := s.client.NewRequest(ctx, http.MethodPut, path, update) - if err != nil { - return nil, nil, err - } - - root := new(appRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.App, resp, nil -} - -// Delete an app. -func (s *AppsServiceOp) Delete(ctx context.Context, appID string) (*Response, error) { - path := fmt.Sprintf("%s/%s", appsBasePath, appID) - req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - resp, err := s.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - return resp, nil -} - -// Propose an app. -func (s *AppsServiceOp) Propose(ctx context.Context, propose *AppProposeRequest) (*AppProposeResponse, *Response, error) { - path := fmt.Sprintf("%s/propose", appsBasePath) - req, err := s.client.NewRequest(ctx, http.MethodPost, path, propose) - if err != nil { - return nil, nil, err - } - - res := &AppProposeResponse{} - resp, err := s.client.Do(ctx, req, res) - if err != nil { - return nil, resp, err - } - return res, resp, nil -} - -// GetDeployment gets an app deployment. -func (s *AppsServiceOp) GetDeployment(ctx context.Context, appID, deploymentID string) (*Deployment, *Response, error) { - path := fmt.Sprintf("%s/%s/deployments/%s", appsBasePath, appID, deploymentID) - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(deploymentRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Deployment, resp, nil -} - -// ListDeployments lists an app deployments. -func (s *AppsServiceOp) ListDeployments(ctx context.Context, appID string, opts *ListOptions) ([]*Deployment, *Response, error) { - path := fmt.Sprintf("%s/%s/deployments", appsBasePath, appID) - path, err := addOptions(path, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(deploymentsRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - if l := root.Links; l != nil { - resp.Links = l - } - - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.Deployments, resp, nil -} - -// CreateDeployment creates an app deployment. -func (s *AppsServiceOp) CreateDeployment(ctx context.Context, appID string, create ...*DeploymentCreateRequest) (*Deployment, *Response, error) { - path := fmt.Sprintf("%s/%s/deployments", appsBasePath, appID) - - var createReq *DeploymentCreateRequest - for _, c := range create { - createReq = c - } - - req, err := s.client.NewRequest(ctx, http.MethodPost, path, createReq) - if err != nil { - return nil, nil, err - } - root := new(deploymentRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Deployment, resp, nil -} - -// GetLogs retrieves app logs. -func (s *AppsServiceOp) GetLogs(ctx context.Context, appID, deploymentID, component string, logType AppLogType, follow bool, tailLines int) (*AppLogs, *Response, error) { - var url string - if deploymentID == "" { - url = fmt.Sprintf("%s/%s/logs?type=%s&follow=%t&tail_lines=%d", appsBasePath, appID, logType, follow, tailLines) - } else { - url = fmt.Sprintf("%s/%s/deployments/%s/logs?type=%s&follow=%t&tail_lines=%d", appsBasePath, appID, deploymentID, logType, follow, tailLines) - } - if component != "" { - url = fmt.Sprintf("%s&component_name=%s", url, component) - } - - req, err := s.client.NewRequest(ctx, http.MethodGet, url, nil) - if err != nil { - return nil, nil, err - } - logs := new(AppLogs) - resp, err := s.client.Do(ctx, req, logs) - if err != nil { - return nil, resp, err - } - return logs, resp, nil -} - -// ListRegions lists all regions supported by App Platform. -func (s *AppsServiceOp) ListRegions(ctx context.Context) ([]*AppRegion, *Response, error) { - path := fmt.Sprintf("%s/regions", appsBasePath) - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(appRegionsRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Regions, resp, nil -} - -// ListTiers lists available app tiers. -func (s *AppsServiceOp) ListTiers(ctx context.Context) ([]*AppTier, *Response, error) { - path := fmt.Sprintf("%s/tiers", appsBasePath) - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(appTiersRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Tiers, resp, nil -} - -// GetTier retrieves information about a specific app tier. -func (s *AppsServiceOp) GetTier(ctx context.Context, slug string) (*AppTier, *Response, error) { - path := fmt.Sprintf("%s/tiers/%s", appsBasePath, slug) - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(appTierRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Tier, resp, nil -} - -// ListInstanceSizes lists available instance sizes for service, worker, and job components. -func (s *AppsServiceOp) ListInstanceSizes(ctx context.Context) ([]*AppInstanceSize, *Response, error) { - path := fmt.Sprintf("%s/tiers/instance_sizes", appsBasePath) - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(instanceSizesRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.InstanceSizes, resp, nil -} - -// GetInstanceSize retrieves information about a specific instance size for service, worker, and job components. -func (s *AppsServiceOp) GetInstanceSize(ctx context.Context, slug string) (*AppInstanceSize, *Response, error) { - path := fmt.Sprintf("%s/tiers/instance_sizes/%s", appsBasePath, slug) - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(instanceSizeRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.InstanceSize, resp, nil -} - -// ListAlerts retrieves a list of alerts on an app -func (s *AppsServiceOp) ListAlerts(ctx context.Context, appID string) ([]*AppAlert, *Response, error) { - path := fmt.Sprintf("%s/%s/alerts", appsBasePath, appID) - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(appAlertsRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Alerts, resp, nil -} - -// UpdateAlertDestinations updates the alert destinations of an app's alert -func (s *AppsServiceOp) UpdateAlertDestinations(ctx context.Context, appID, alertID string, update *AlertDestinationUpdateRequest) (*AppAlert, *Response, error) { - path := fmt.Sprintf("%s/%s/alerts/%s/destinations", appsBasePath, appID, alertID) - req, err := s.client.NewRequest(ctx, http.MethodPost, path, update) - if err != nil { - return nil, nil, err - } - root := new(appAlertRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Alert, resp, nil -} - -// Detect an app. -func (s *AppsServiceOp) Detect(ctx context.Context, detect *DetectRequest) (*DetectResponse, *Response, error) { - path := fmt.Sprintf("%s/detect", appsBasePath) - req, err := s.client.NewRequest(ctx, http.MethodPost, path, detect) - if err != nil { - return nil, nil, err - } - - res := &DetectResponse{} - resp, err := s.client.Do(ctx, req, res) - if err != nil { - return nil, resp, err - } - return res, resp, nil -} - -// ListBuildpacks lists the available buildpacks on App Platform. -func (s *AppsServiceOp) ListBuildpacks(ctx context.Context) ([]*Buildpack, *Response, error) { - path := fmt.Sprintf("%s/buildpacks", appsBasePath) - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(buildpacksRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Buildpacks, resp, nil -} - -// UpgradeBuildpack upgrades a buildpack for an app. -func (s *AppsServiceOp) UpgradeBuildpack(ctx context.Context, appID string, opts UpgradeBuildpackOptions) (*UpgradeBuildpackResponse, *Response, error) { - path := fmt.Sprintf("%s/%s/upgrade_buildpack", appsBasePath, appID) - req, err := s.client.NewRequest(ctx, http.MethodPost, path, opts) - if err != nil { - return nil, nil, err - } - root := new(UpgradeBuildpackResponse) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root, resp, nil -} - -// GetAppDatabaseConnectionDetails retrieves credentials for databases associated with the app. -func (s *AppsServiceOp) GetAppDatabaseConnectionDetails(ctx context.Context, appID string) ([]*GetDatabaseConnectionDetailsResponse, *Response, error) { - path := fmt.Sprintf("%s/%s/database_connection_details", appsBasePath, appID) - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(GetAppDatabaseConnectionDetailsResponse) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.ConnectionDetails, resp, nil -} - -// ResetDatabasePassword resets credentials for a database component associated with the app. -func (s *AppsServiceOp) ResetDatabasePassword(ctx context.Context, appID string, component string) (*Deployment, *Response, error) { - path := fmt.Sprintf("%s/%s/components/%s/reset_password", appsBasePath, appID, component) - req, err := s.client.NewRequest(ctx, http.MethodPost, path, nil) - if err != nil { - return nil, nil, err - } - root := new(deploymentRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Deployment, resp, nil -} - -// ToggleDatabaseTrustedSource enables/disables trusted sources on the specified dev database component. -func (s *AppsServiceOp) ToggleDatabaseTrustedSource( - ctx context.Context, - appID string, - component string, - opts ToggleDatabaseTrustedSourceOptions, -) ( - *ToggleDatabaseTrustedSourceResponse, - *Response, - error, -) { - path := fmt.Sprintf("%s/%s/components/%s/trusted_sources", appsBasePath, appID, component) - req, err := s.client.NewRequest(ctx, http.MethodPost, path, opts) - if err != nil { - return nil, nil, err - } - root := new(ToggleDatabaseTrustedSourceResponse) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root, resp, nil -} - -// AppComponentType is an app component type. -type AppComponentType string - -const ( - // AppComponentTypeService is the type for a service component. - AppComponentTypeService AppComponentType = "service" - // AppComponentTypeWorker is the type for a worker component. - AppComponentTypeWorker AppComponentType = "worker" - // AppComponentTypeJob is the type for a job component. - AppComponentTypeJob AppComponentType = "job" - // AppComponentTypeStaticSite is the type for a static site component. - AppComponentTypeStaticSite AppComponentType = "static_site" - // AppComponentTypeDatabase is the type for a database component. - AppComponentTypeDatabase AppComponentType = "database" - // AppComponentTypeFunctions is the type for a functions component. - AppComponentTypeFunctions AppComponentType = "functions" -) - -// GetType returns the Service component type. -func (s *AppServiceSpec) GetType() AppComponentType { - return AppComponentTypeService -} - -// GetType returns the Worker component type. -func (s *AppWorkerSpec) GetType() AppComponentType { - return AppComponentTypeWorker -} - -// GetType returns the Job component type. -func (s *AppJobSpec) GetType() AppComponentType { - return AppComponentTypeJob -} - -// GetType returns the StaticSite component type. -func (s *AppStaticSiteSpec) GetType() AppComponentType { - return AppComponentTypeStaticSite -} - -// GetType returns the Database component type. -func (s *AppDatabaseSpec) GetType() AppComponentType { - return AppComponentTypeDatabase -} - -// GetType returns the Functions component type. -func (s *AppFunctionsSpec) GetType() AppComponentType { - return AppComponentTypeFunctions -} - -// AppComponentSpec represents a component's spec. -type AppComponentSpec interface { - GetName() string - GetType() AppComponentType -} - -// AppBuildableComponentSpec is a component that is buildable from source. -type AppBuildableComponentSpec interface { - AppComponentSpec - - GetGit() *GitSourceSpec - GetGitHub() *GitHubSourceSpec - GetGitLab() *GitLabSourceSpec - - GetSourceDir() string - - GetEnvs() []*AppVariableDefinition -} - -// AppDockerBuildableComponentSpec is a component that is buildable from source using Docker. -type AppDockerBuildableComponentSpec interface { - AppBuildableComponentSpec - - GetDockerfilePath() string -} - -// AppCNBBuildableComponentSpec is a component that is buildable from source using cloud native buildpacks. -type AppCNBBuildableComponentSpec interface { - AppBuildableComponentSpec - - GetBuildCommand() string -} - -// AppContainerComponentSpec is a component that runs in a cluster. -type AppContainerComponentSpec interface { - AppBuildableComponentSpec - - GetImage() *ImageSourceSpec - GetRunCommand() string - GetInstanceSizeSlug() string - GetInstanceCount() int64 -} - -// AppRoutableComponentSpec is a component that defines routes. -type AppRoutableComponentSpec interface { - AppComponentSpec - - GetRoutes() []*AppRouteSpec - GetCORS() *AppCORSPolicy -} - -// AppSourceType is an app source type. -type AppSourceType string - -const ( - AppSourceTypeGitHub AppSourceType = "github" - AppSourceTypeGitLab AppSourceType = "gitlab" - AppSourceTypeGit AppSourceType = "git" - AppSourceTypeImage AppSourceType = "image" -) - -// SourceSpec represents a source. -type SourceSpec interface { - GetType() AppSourceType -} - -// GetType returns the GitHub source type. -func (s *GitHubSourceSpec) GetType() AppSourceType { - return AppSourceTypeGitHub -} - -// GetType returns the GitLab source type. -func (s *GitLabSourceSpec) GetType() AppSourceType { - return AppSourceTypeGitLab -} - -// GetType returns the Git source type. -func (s *GitSourceSpec) GetType() AppSourceType { - return AppSourceTypeGit -} - -// GetType returns the Image source type. -func (s *ImageSourceSpec) GetType() AppSourceType { - return AppSourceTypeImage -} - -// VCSSourceSpec represents a VCS source. -type VCSSourceSpec interface { - SourceSpec - GetRepo() string - GetBranch() string -} - -// GetRepo allows GitSourceSpec to implement the SourceSpec interface. -func (s *GitSourceSpec) GetRepo() string { - return s.RepoCloneURL -} - -// ForEachAppComponentSpec iterates over each component spec in an app. -func (s *AppSpec) ForEachAppComponentSpec(fn func(component AppComponentSpec) error) error { - if s == nil { - return nil - } - for _, c := range s.Services { - if err := fn(c); err != nil { - return err - } - } - for _, c := range s.Workers { - if err := fn(c); err != nil { - return err - } - } - for _, c := range s.Jobs { - if err := fn(c); err != nil { - return err - } - } - for _, c := range s.StaticSites { - if err := fn(c); err != nil { - return err - } - } - for _, c := range s.Databases { - if err := fn(c); err != nil { - return err - } - } - for _, c := range s.Functions { - if err := fn(c); err != nil { - return err - } - } - return nil -} - -// ForEachAppSpecComponent loops over each component spec that matches the provided interface type. -// The type constraint is intentionally set to `any` to allow use of arbitrary interfaces to match the desired component types. -// -// Examples: -// - interface constraint -// godo.ForEachAppSpecComponent(spec, func(component godo.AppBuildableComponentSpec) error { ... }) -// - struct type constraint -// godo.ForEachAppSpecComponent(spec, func(component *godo.AppStaticSiteSpec) error { ... }) -func ForEachAppSpecComponent[T any](s *AppSpec, fn func(component T) error) error { - return s.ForEachAppComponentSpec(func(component AppComponentSpec) error { - if c, ok := component.(T); ok { - if err := fn(c); err != nil { - return err - } - } - return nil - }) -} - -// GetAppSpecComponent returns an app spec component by type and name. -// -// Examples: -// - interface constraint -// godo.GetAppSpecComponent[godo.AppBuildableComponentSpec](spec, "component-name") -// - struct type constraint -// godo.GetAppSpecComponent[*godo.AppServiceSpec](spec, "component-name") -func GetAppSpecComponent[T interface { - GetName() string -}](s *AppSpec, name string) (T, error) { - var c T - errStop := errors.New("stop") - err := ForEachAppSpecComponent(s, func(component T) error { - if component.GetName() == name { - c = component - return errStop - } - return nil - }) - if err == errStop { - return c, nil - } - return c, fmt.Errorf("component %s not found", name) -} diff --git a/vendor/github.com/digitalocean/godo/apps_accessors.go b/vendor/github.com/digitalocean/godo/apps_accessors.go deleted file mode 100644 index 05c5ac4..0000000 --- a/vendor/github.com/digitalocean/godo/apps_accessors.go +++ /dev/null @@ -1,3662 +0,0 @@ -// Copyright 2017 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by gen-accessors; DO NOT EDIT. -// Instead, please run "go generate ./..." as described here: -// https://github.com/google/go-github/blob/master/CONTRIBUTING.md#submitting-a-patch - -package godo - -import ( - "time" -) - -// GetActiveDeployment returns the ActiveDeployment field. -func (a *App) GetActiveDeployment() *Deployment { - if a == nil { - return nil - } - return a.ActiveDeployment -} - -// GetBuildConfig returns the BuildConfig field. -func (a *App) GetBuildConfig() *AppBuildConfig { - if a == nil { - return nil - } - return a.BuildConfig -} - -// GetCreatedAt returns the CreatedAt field. -func (a *App) GetCreatedAt() time.Time { - if a == nil { - return time.Time{} - } - return a.CreatedAt -} - -// GetDedicatedIps returns the DedicatedIps field. -func (a *App) GetDedicatedIps() []*AppDedicatedIp { - if a == nil { - return nil - } - return a.DedicatedIps -} - -// GetDefaultIngress returns the DefaultIngress field. -func (a *App) GetDefaultIngress() string { - if a == nil { - return "" - } - return a.DefaultIngress -} - -// GetDomains returns the Domains field. -func (a *App) GetDomains() []*AppDomain { - if a == nil { - return nil - } - return a.Domains -} - -// GetID returns the ID field. -func (a *App) GetID() string { - if a == nil { - return "" - } - return a.ID -} - -// GetInProgressDeployment returns the InProgressDeployment field. -func (a *App) GetInProgressDeployment() *Deployment { - if a == nil { - return nil - } - return a.InProgressDeployment -} - -// GetLastDeploymentActiveAt returns the LastDeploymentActiveAt field. -func (a *App) GetLastDeploymentActiveAt() time.Time { - if a == nil { - return time.Time{} - } - return a.LastDeploymentActiveAt -} - -// GetLastDeploymentCreatedAt returns the LastDeploymentCreatedAt field. -func (a *App) GetLastDeploymentCreatedAt() time.Time { - if a == nil { - return time.Time{} - } - return a.LastDeploymentCreatedAt -} - -// GetLiveDomain returns the LiveDomain field. -func (a *App) GetLiveDomain() string { - if a == nil { - return "" - } - return a.LiveDomain -} - -// GetLiveURL returns the LiveURL field. -func (a *App) GetLiveURL() string { - if a == nil { - return "" - } - return a.LiveURL -} - -// GetLiveURLBase returns the LiveURLBase field. -func (a *App) GetLiveURLBase() string { - if a == nil { - return "" - } - return a.LiveURLBase -} - -// GetOwnerUUID returns the OwnerUUID field. -func (a *App) GetOwnerUUID() string { - if a == nil { - return "" - } - return a.OwnerUUID -} - -// GetPendingDeployment returns the PendingDeployment field. -func (a *App) GetPendingDeployment() *Deployment { - if a == nil { - return nil - } - return a.PendingDeployment -} - -// GetPinnedDeployment returns the PinnedDeployment field. -func (a *App) GetPinnedDeployment() *Deployment { - if a == nil { - return nil - } - return a.PinnedDeployment -} - -// GetProjectID returns the ProjectID field. -func (a *App) GetProjectID() string { - if a == nil { - return "" - } - return a.ProjectID -} - -// GetRegion returns the Region field. -func (a *App) GetRegion() *AppRegion { - if a == nil { - return nil - } - return a.Region -} - -// GetSpec returns the Spec field. -func (a *App) GetSpec() *AppSpec { - if a == nil { - return nil - } - return a.Spec -} - -// GetTierSlug returns the TierSlug field. -func (a *App) GetTierSlug() string { - if a == nil { - return "" - } - return a.TierSlug -} - -// GetUpdatedAt returns the UpdatedAt field. -func (a *App) GetUpdatedAt() time.Time { - if a == nil { - return time.Time{} - } - return a.UpdatedAt -} - -// GetComponentName returns the ComponentName field. -func (a *AppAlert) GetComponentName() string { - if a == nil { - return "" - } - return a.ComponentName -} - -// GetEmails returns the Emails field. -func (a *AppAlert) GetEmails() []string { - if a == nil { - return nil - } - return a.Emails -} - -// GetID returns the ID field. -func (a *AppAlert) GetID() string { - if a == nil { - return "" - } - return a.ID -} - -// GetPhase returns the Phase field. -func (a *AppAlert) GetPhase() AppAlertPhase { - if a == nil { - return "" - } - return a.Phase -} - -// GetProgress returns the Progress field. -func (a *AppAlert) GetProgress() *AppAlertProgress { - if a == nil { - return nil - } - return a.Progress -} - -// GetSlackWebhooks returns the SlackWebhooks field. -func (a *AppAlert) GetSlackWebhooks() []*AppAlertSlackWebhook { - if a == nil { - return nil - } - return a.SlackWebhooks -} - -// GetSpec returns the Spec field. -func (a *AppAlert) GetSpec() *AppAlertSpec { - if a == nil { - return nil - } - return a.Spec -} - -// GetSteps returns the Steps field. -func (a *AppAlertProgress) GetSteps() []*AppAlertProgressStep { - if a == nil { - return nil - } - return a.Steps -} - -// GetEndedAt returns the EndedAt field. -func (a *AppAlertProgressStep) GetEndedAt() time.Time { - if a == nil { - return time.Time{} - } - return a.EndedAt -} - -// GetName returns the Name field. -func (a *AppAlertProgressStep) GetName() string { - if a == nil { - return "" - } - return a.Name -} - -// GetReason returns the Reason field. -func (a *AppAlertProgressStep) GetReason() *AppAlertProgressStepReason { - if a == nil { - return nil - } - return a.Reason -} - -// GetStartedAt returns the StartedAt field. -func (a *AppAlertProgressStep) GetStartedAt() time.Time { - if a == nil { - return time.Time{} - } - return a.StartedAt -} - -// GetStatus returns the Status field. -func (a *AppAlertProgressStep) GetStatus() AppAlertProgressStepStatus { - if a == nil { - return "" - } - return a.Status -} - -// GetSteps returns the Steps field. -func (a *AppAlertProgressStep) GetSteps() []*AppAlertProgressStep { - if a == nil { - return nil - } - return a.Steps -} - -// GetCode returns the Code field. -func (a *AppAlertProgressStepReason) GetCode() string { - if a == nil { - return "" - } - return a.Code -} - -// GetMessage returns the Message field. -func (a *AppAlertProgressStepReason) GetMessage() string { - if a == nil { - return "" - } - return a.Message -} - -// GetChannel returns the Channel field. -func (a *AppAlertSlackWebhook) GetChannel() string { - if a == nil { - return "" - } - return a.Channel -} - -// GetURL returns the URL field. -func (a *AppAlertSlackWebhook) GetURL() string { - if a == nil { - return "" - } - return a.URL -} - -// GetDisabled returns the Disabled field. -func (a *AppAlertSpec) GetDisabled() bool { - if a == nil { - return false - } - return a.Disabled -} - -// GetOperator returns the Operator field. -func (a *AppAlertSpec) GetOperator() AppAlertSpecOperator { - if a == nil { - return "" - } - return a.Operator -} - -// GetRule returns the Rule field. -func (a *AppAlertSpec) GetRule() AppAlertSpecRule { - if a == nil { - return "" - } - return a.Rule -} - -// GetValue returns the Value field. -func (a *AppAlertSpec) GetValue() float32 { - if a == nil { - return 0 - } - return a.Value -} - -// GetWindow returns the Window field. -func (a *AppAlertSpec) GetWindow() AppAlertSpecWindow { - if a == nil { - return "" - } - return a.Window -} - -// GetMaxInstanceCount returns the MaxInstanceCount field. -func (a *AppAutoscalingSpec) GetMaxInstanceCount() int64 { - if a == nil { - return 0 - } - return a.MaxInstanceCount -} - -// GetMetrics returns the Metrics field. -func (a *AppAutoscalingSpec) GetMetrics() *AppAutoscalingSpecMetrics { - if a == nil { - return nil - } - return a.Metrics -} - -// GetMinInstanceCount returns the MinInstanceCount field. -func (a *AppAutoscalingSpec) GetMinInstanceCount() int64 { - if a == nil { - return 0 - } - return a.MinInstanceCount -} - -// GetPercent returns the Percent field. -func (a *AppAutoscalingSpecMetricCPU) GetPercent() int64 { - if a == nil { - return 0 - } - return a.Percent -} - -// GetCPU returns the CPU field. -func (a *AppAutoscalingSpecMetrics) GetCPU() *AppAutoscalingSpecMetricCPU { - if a == nil { - return nil - } - return a.CPU -} - -// GetCNBVersioning returns the CNBVersioning field. -func (a *AppBuildConfig) GetCNBVersioning() *AppBuildConfigCNBVersioning { - if a == nil { - return nil - } - return a.CNBVersioning -} - -// GetBuildpacks returns the Buildpacks field. -func (a *AppBuildConfigCNBVersioning) GetBuildpacks() []*Buildpack { - if a == nil { - return nil - } - return a.Buildpacks -} - -// GetStackID returns the StackID field. -func (a *AppBuildConfigCNBVersioning) GetStackID() string { - if a == nil { - return "" - } - return a.StackID -} - -// GetAllowCredentials returns the AllowCredentials field. -func (a *AppCORSPolicy) GetAllowCredentials() bool { - if a == nil { - return false - } - return a.AllowCredentials -} - -// GetAllowHeaders returns the AllowHeaders field. -func (a *AppCORSPolicy) GetAllowHeaders() []string { - if a == nil { - return nil - } - return a.AllowHeaders -} - -// GetAllowMethods returns the AllowMethods field. -func (a *AppCORSPolicy) GetAllowMethods() []string { - if a == nil { - return nil - } - return a.AllowMethods -} - -// GetAllowOrigins returns the AllowOrigins field. -func (a *AppCORSPolicy) GetAllowOrigins() []*AppStringMatch { - if a == nil { - return nil - } - return a.AllowOrigins -} - -// GetExposeHeaders returns the ExposeHeaders field. -func (a *AppCORSPolicy) GetExposeHeaders() []string { - if a == nil { - return nil - } - return a.ExposeHeaders -} - -// GetMaxAge returns the MaxAge field. -func (a *AppCORSPolicy) GetMaxAge() string { - if a == nil { - return "" - } - return a.MaxAge -} - -// GetProjectID returns the ProjectID field. -func (a *AppCreateRequest) GetProjectID() string { - if a == nil { - return "" - } - return a.ProjectID -} - -// GetSpec returns the Spec field. -func (a *AppCreateRequest) GetSpec() *AppSpec { - if a == nil { - return nil - } - return a.Spec -} - -// GetClusterName returns the ClusterName field. -func (a *AppDatabaseSpec) GetClusterName() string { - if a == nil { - return "" - } - return a.ClusterName -} - -// GetDBName returns the DBName field. -func (a *AppDatabaseSpec) GetDBName() string { - if a == nil { - return "" - } - return a.DBName -} - -// GetDBUser returns the DBUser field. -func (a *AppDatabaseSpec) GetDBUser() string { - if a == nil { - return "" - } - return a.DBUser -} - -// GetEngine returns the Engine field. -func (a *AppDatabaseSpec) GetEngine() AppDatabaseSpecEngine { - if a == nil { - return "" - } - return a.Engine -} - -// GetName returns the Name field. -func (a *AppDatabaseSpec) GetName() string { - if a == nil { - return "" - } - return a.Name -} - -// GetNumNodes returns the NumNodes field. -func (a *AppDatabaseSpec) GetNumNodes() int64 { - if a == nil { - return 0 - } - return a.NumNodes -} - -// GetProduction returns the Production field. -func (a *AppDatabaseSpec) GetProduction() bool { - if a == nil { - return false - } - return a.Production -} - -// GetSize returns the Size field. -func (a *AppDatabaseSpec) GetSize() string { - if a == nil { - return "" - } - return a.Size -} - -// GetVersion returns the Version field. -func (a *AppDatabaseSpec) GetVersion() string { - if a == nil { - return "" - } - return a.Version -} - -// GetID returns the ID field. -func (a *AppDedicatedIp) GetID() string { - if a == nil { - return "" - } - return a.ID -} - -// GetIp returns the Ip field. -func (a *AppDedicatedIp) GetIp() string { - if a == nil { - return "" - } - return a.Ip -} - -// GetStatus returns the Status field. -func (a *AppDedicatedIp) GetStatus() AppDedicatedIpStatus { - if a == nil { - return "" - } - return a.Status -} - -// GetCertificateExpiresAt returns the CertificateExpiresAt field. -func (a *AppDomain) GetCertificateExpiresAt() time.Time { - if a == nil { - return time.Time{} - } - return a.CertificateExpiresAt -} - -// GetID returns the ID field. -func (a *AppDomain) GetID() string { - if a == nil { - return "" - } - return a.ID -} - -// GetPhase returns the Phase field. -func (a *AppDomain) GetPhase() AppDomainPhase { - if a == nil { - return "" - } - return a.Phase -} - -// GetProgress returns the Progress field. -func (a *AppDomain) GetProgress() *AppDomainProgress { - if a == nil { - return nil - } - return a.Progress -} - -// GetRotateValidationRecords returns the RotateValidationRecords field. -func (a *AppDomain) GetRotateValidationRecords() bool { - if a == nil { - return false - } - return a.RotateValidationRecords -} - -// GetSpec returns the Spec field. -func (a *AppDomain) GetSpec() *AppDomainSpec { - if a == nil { - return nil - } - return a.Spec -} - -// GetValidation returns the Validation field. -func (a *AppDomain) GetValidation() *AppDomainValidation { - if a == nil { - return nil - } - return a.Validation -} - -// GetValidations returns the Validations field. -func (a *AppDomain) GetValidations() []*AppDomainValidation { - if a == nil { - return nil - } - return a.Validations -} - -// GetSteps returns the Steps field. -func (a *AppDomainProgress) GetSteps() []*AppDomainProgressStep { - if a == nil { - return nil - } - return a.Steps -} - -// GetEndedAt returns the EndedAt field. -func (a *AppDomainProgressStep) GetEndedAt() time.Time { - if a == nil { - return time.Time{} - } - return a.EndedAt -} - -// GetName returns the Name field. -func (a *AppDomainProgressStep) GetName() string { - if a == nil { - return "" - } - return a.Name -} - -// GetReason returns the Reason field. -func (a *AppDomainProgressStep) GetReason() *AppDomainProgressStepReason { - if a == nil { - return nil - } - return a.Reason -} - -// GetStartedAt returns the StartedAt field. -func (a *AppDomainProgressStep) GetStartedAt() time.Time { - if a == nil { - return time.Time{} - } - return a.StartedAt -} - -// GetStatus returns the Status field. -func (a *AppDomainProgressStep) GetStatus() AppDomainProgressStepStatus { - if a == nil { - return "" - } - return a.Status -} - -// GetSteps returns the Steps field. -func (a *AppDomainProgressStep) GetSteps() []*AppDomainProgressStep { - if a == nil { - return nil - } - return a.Steps -} - -// GetCode returns the Code field. -func (a *AppDomainProgressStepReason) GetCode() string { - if a == nil { - return "" - } - return a.Code -} - -// GetMessage returns the Message field. -func (a *AppDomainProgressStepReason) GetMessage() string { - if a == nil { - return "" - } - return a.Message -} - -// GetCertificate returns the Certificate field. -func (a *AppDomainSpec) GetCertificate() string { - if a == nil { - return "" - } - return a.Certificate -} - -// GetDomain returns the Domain field. -func (a *AppDomainSpec) GetDomain() string { - if a == nil { - return "" - } - return a.Domain -} - -// GetMinimumTLSVersion returns the MinimumTLSVersion field. -func (a *AppDomainSpec) GetMinimumTLSVersion() string { - if a == nil { - return "" - } - return a.MinimumTLSVersion -} - -// GetType returns the Type field. -func (a *AppDomainSpec) GetType() AppDomainSpecType { - if a == nil { - return "" - } - return a.Type -} - -// GetWildcard returns the Wildcard field. -func (a *AppDomainSpec) GetWildcard() bool { - if a == nil { - return false - } - return a.Wildcard -} - -// GetZone returns the Zone field. -func (a *AppDomainSpec) GetZone() string { - if a == nil { - return "" - } - return a.Zone -} - -// GetTXTName returns the TXTName field. -func (a *AppDomainValidation) GetTXTName() string { - if a == nil { - return "" - } - return a.TXTName -} - -// GetTXTValue returns the TXTValue field. -func (a *AppDomainValidation) GetTXTValue() string { - if a == nil { - return "" - } - return a.TXTValue -} - -// GetType returns the Type field. -func (a *AppEgressSpec) GetType() AppEgressSpecType { - if a == nil { - return "" - } - return a.Type -} - -// GetAlerts returns the Alerts field. -func (a *AppFunctionsSpec) GetAlerts() []*AppAlertSpec { - if a == nil { - return nil - } - return a.Alerts -} - -// GetCORS returns the CORS field. -func (a *AppFunctionsSpec) GetCORS() *AppCORSPolicy { - if a == nil { - return nil - } - return a.CORS -} - -// GetEnvs returns the Envs field. -func (a *AppFunctionsSpec) GetEnvs() []*AppVariableDefinition { - if a == nil { - return nil - } - return a.Envs -} - -// GetGit returns the Git field. -func (a *AppFunctionsSpec) GetGit() *GitSourceSpec { - if a == nil { - return nil - } - return a.Git -} - -// GetGitHub returns the GitHub field. -func (a *AppFunctionsSpec) GetGitHub() *GitHubSourceSpec { - if a == nil { - return nil - } - return a.GitHub -} - -// GetGitLab returns the GitLab field. -func (a *AppFunctionsSpec) GetGitLab() *GitLabSourceSpec { - if a == nil { - return nil - } - return a.GitLab -} - -// GetLogDestinations returns the LogDestinations field. -func (a *AppFunctionsSpec) GetLogDestinations() []*AppLogDestinationSpec { - if a == nil { - return nil - } - return a.LogDestinations -} - -// GetName returns the Name field. -func (a *AppFunctionsSpec) GetName() string { - if a == nil { - return "" - } - return a.Name -} - -// GetRoutes returns the Routes field. -func (a *AppFunctionsSpec) GetRoutes() []*AppRouteSpec { - if a == nil { - return nil - } - return a.Routes -} - -// GetSourceDir returns the SourceDir field. -func (a *AppFunctionsSpec) GetSourceDir() string { - if a == nil { - return "" - } - return a.SourceDir -} - -// GetLoadBalancer returns the LoadBalancer field. -func (a *AppIngressSpec) GetLoadBalancer() AppIngressSpecLoadBalancer { - if a == nil { - return "" - } - return a.LoadBalancer -} - -// GetLoadBalancerSize returns the LoadBalancerSize field. -func (a *AppIngressSpec) GetLoadBalancerSize() int64 { - if a == nil { - return 0 - } - return a.LoadBalancerSize -} - -// GetRules returns the Rules field. -func (a *AppIngressSpec) GetRules() []*AppIngressSpecRule { - if a == nil { - return nil - } - return a.Rules -} - -// GetComponent returns the Component field. -func (a *AppIngressSpecRule) GetComponent() *AppIngressSpecRuleRoutingComponent { - if a == nil { - return nil - } - return a.Component -} - -// GetCORS returns the CORS field. -func (a *AppIngressSpecRule) GetCORS() *AppCORSPolicy { - if a == nil { - return nil - } - return a.CORS -} - -// GetMatch returns the Match field. -func (a *AppIngressSpecRule) GetMatch() *AppIngressSpecRuleMatch { - if a == nil { - return nil - } - return a.Match -} - -// GetRedirect returns the Redirect field. -func (a *AppIngressSpecRule) GetRedirect() *AppIngressSpecRuleRoutingRedirect { - if a == nil { - return nil - } - return a.Redirect -} - -// GetPath returns the Path field. -func (a *AppIngressSpecRuleMatch) GetPath() *AppIngressSpecRuleStringMatch { - if a == nil { - return nil - } - return a.Path -} - -// GetName returns the Name field. -func (a *AppIngressSpecRuleRoutingComponent) GetName() string { - if a == nil { - return "" - } - return a.Name -} - -// GetPreservePathPrefix returns the PreservePathPrefix field. -func (a *AppIngressSpecRuleRoutingComponent) GetPreservePathPrefix() bool { - if a == nil { - return false - } - return a.PreservePathPrefix -} - -// GetRewrite returns the Rewrite field. -func (a *AppIngressSpecRuleRoutingComponent) GetRewrite() string { - if a == nil { - return "" - } - return a.Rewrite -} - -// GetAuthority returns the Authority field. -func (a *AppIngressSpecRuleRoutingRedirect) GetAuthority() string { - if a == nil { - return "" - } - return a.Authority -} - -// GetPort returns the Port field. -func (a *AppIngressSpecRuleRoutingRedirect) GetPort() int64 { - if a == nil { - return 0 - } - return a.Port -} - -// GetRedirectCode returns the RedirectCode field. -func (a *AppIngressSpecRuleRoutingRedirect) GetRedirectCode() int64 { - if a == nil { - return 0 - } - return a.RedirectCode -} - -// GetScheme returns the Scheme field. -func (a *AppIngressSpecRuleRoutingRedirect) GetScheme() string { - if a == nil { - return "" - } - return a.Scheme -} - -// GetUri returns the Uri field. -func (a *AppIngressSpecRuleRoutingRedirect) GetUri() string { - if a == nil { - return "" - } - return a.Uri -} - -// GetPrefix returns the Prefix field. -func (a *AppIngressSpecRuleStringMatch) GetPrefix() string { - if a == nil { - return "" - } - return a.Prefix -} - -// GetBandwidthAllowanceGib returns the BandwidthAllowanceGib field. -func (a *AppInstanceSize) GetBandwidthAllowanceGib() string { - if a == nil { - return "" - } - return a.BandwidthAllowanceGib -} - -// GetCPUs returns the CPUs field. -func (a *AppInstanceSize) GetCPUs() string { - if a == nil { - return "" - } - return a.CPUs -} - -// GetCPUType returns the CPUType field. -func (a *AppInstanceSize) GetCPUType() AppInstanceSizeCPUType { - if a == nil { - return "" - } - return a.CPUType -} - -// GetDeprecationIntent returns the DeprecationIntent field. -func (a *AppInstanceSize) GetDeprecationIntent() bool { - if a == nil { - return false - } - return a.DeprecationIntent -} - -// GetFeaturePreview returns the FeaturePreview field. -func (a *AppInstanceSize) GetFeaturePreview() bool { - if a == nil { - return false - } - return a.FeaturePreview -} - -// GetMemoryBytes returns the MemoryBytes field. -func (a *AppInstanceSize) GetMemoryBytes() string { - if a == nil { - return "" - } - return a.MemoryBytes -} - -// GetName returns the Name field. -func (a *AppInstanceSize) GetName() string { - if a == nil { - return "" - } - return a.Name -} - -// GetScalable returns the Scalable field. -func (a *AppInstanceSize) GetScalable() bool { - if a == nil { - return false - } - return a.Scalable -} - -// GetSingleInstanceOnly returns the SingleInstanceOnly field. -func (a *AppInstanceSize) GetSingleInstanceOnly() bool { - if a == nil { - return false - } - return a.SingleInstanceOnly -} - -// GetSlug returns the Slug field. -func (a *AppInstanceSize) GetSlug() string { - if a == nil { - return "" - } - return a.Slug -} - -// GetTierDowngradeTo returns the TierDowngradeTo field. -func (a *AppInstanceSize) GetTierDowngradeTo() string { - if a == nil { - return "" - } - return a.TierDowngradeTo -} - -// GetTierSlug returns the TierSlug field. -func (a *AppInstanceSize) GetTierSlug() string { - if a == nil { - return "" - } - return a.TierSlug -} - -// GetTierUpgradeTo returns the TierUpgradeTo field. -func (a *AppInstanceSize) GetTierUpgradeTo() string { - if a == nil { - return "" - } - return a.TierUpgradeTo -} - -// GetUSDPerMonth returns the USDPerMonth field. -func (a *AppInstanceSize) GetUSDPerMonth() string { - if a == nil { - return "" - } - return a.USDPerMonth -} - -// GetUSDPerSecond returns the USDPerSecond field. -func (a *AppInstanceSize) GetUSDPerSecond() string { - if a == nil { - return "" - } - return a.USDPerSecond -} - -// GetAlerts returns the Alerts field. -func (a *AppJobSpec) GetAlerts() []*AppAlertSpec { - if a == nil { - return nil - } - return a.Alerts -} - -// GetBuildCommand returns the BuildCommand field. -func (a *AppJobSpec) GetBuildCommand() string { - if a == nil { - return "" - } - return a.BuildCommand -} - -// GetDockerfilePath returns the DockerfilePath field. -func (a *AppJobSpec) GetDockerfilePath() string { - if a == nil { - return "" - } - return a.DockerfilePath -} - -// GetEnvironmentSlug returns the EnvironmentSlug field. -func (a *AppJobSpec) GetEnvironmentSlug() string { - if a == nil { - return "" - } - return a.EnvironmentSlug -} - -// GetEnvs returns the Envs field. -func (a *AppJobSpec) GetEnvs() []*AppVariableDefinition { - if a == nil { - return nil - } - return a.Envs -} - -// GetGit returns the Git field. -func (a *AppJobSpec) GetGit() *GitSourceSpec { - if a == nil { - return nil - } - return a.Git -} - -// GetGitHub returns the GitHub field. -func (a *AppJobSpec) GetGitHub() *GitHubSourceSpec { - if a == nil { - return nil - } - return a.GitHub -} - -// GetGitLab returns the GitLab field. -func (a *AppJobSpec) GetGitLab() *GitLabSourceSpec { - if a == nil { - return nil - } - return a.GitLab -} - -// GetImage returns the Image field. -func (a *AppJobSpec) GetImage() *ImageSourceSpec { - if a == nil { - return nil - } - return a.Image -} - -// GetInstanceCount returns the InstanceCount field. -func (a *AppJobSpec) GetInstanceCount() int64 { - if a == nil { - return 0 - } - return a.InstanceCount -} - -// GetInstanceSizeSlug returns the InstanceSizeSlug field. -func (a *AppJobSpec) GetInstanceSizeSlug() string { - if a == nil { - return "" - } - return a.InstanceSizeSlug -} - -// GetKind returns the Kind field. -func (a *AppJobSpec) GetKind() AppJobSpecKind { - if a == nil { - return "" - } - return a.Kind -} - -// GetLogDestinations returns the LogDestinations field. -func (a *AppJobSpec) GetLogDestinations() []*AppLogDestinationSpec { - if a == nil { - return nil - } - return a.LogDestinations -} - -// GetName returns the Name field. -func (a *AppJobSpec) GetName() string { - if a == nil { - return "" - } - return a.Name -} - -// GetRunCommand returns the RunCommand field. -func (a *AppJobSpec) GetRunCommand() string { - if a == nil { - return "" - } - return a.RunCommand -} - -// GetSourceDir returns the SourceDir field. -func (a *AppJobSpec) GetSourceDir() string { - if a == nil { - return "" - } - return a.SourceDir -} - -// GetTermination returns the Termination field. -func (a *AppJobSpec) GetTermination() *AppJobSpecTermination { - if a == nil { - return nil - } - return a.Termination -} - -// GetGracePeriodSeconds returns the GracePeriodSeconds field. -func (a *AppJobSpecTermination) GetGracePeriodSeconds() int32 { - if a == nil { - return 0 - } - return a.GracePeriodSeconds -} - -// GetDatadog returns the Datadog field. -func (a *AppLogDestinationSpec) GetDatadog() *AppLogDestinationSpecDataDog { - if a == nil { - return nil - } - return a.Datadog -} - -// GetEndpoint returns the Endpoint field. -func (a *AppLogDestinationSpec) GetEndpoint() string { - if a == nil { - return "" - } - return a.Endpoint -} - -// GetHeaders returns the Headers field. -func (a *AppLogDestinationSpec) GetHeaders() []*AppLogDestinationSpecHeader { - if a == nil { - return nil - } - return a.Headers -} - -// GetLogtail returns the Logtail field. -func (a *AppLogDestinationSpec) GetLogtail() *AppLogDestinationSpecLogtail { - if a == nil { - return nil - } - return a.Logtail -} - -// GetName returns the Name field. -func (a *AppLogDestinationSpec) GetName() string { - if a == nil { - return "" - } - return a.Name -} - -// GetOpenSearch returns the OpenSearch field. -func (a *AppLogDestinationSpec) GetOpenSearch() *AppLogDestinationSpecOpenSearch { - if a == nil { - return nil - } - return a.OpenSearch -} - -// GetPapertrail returns the Papertrail field. -func (a *AppLogDestinationSpec) GetPapertrail() *AppLogDestinationSpecPapertrail { - if a == nil { - return nil - } - return a.Papertrail -} - -// GetTLSInsecure returns the TLSInsecure field. -func (a *AppLogDestinationSpec) GetTLSInsecure() bool { - if a == nil { - return false - } - return a.TLSInsecure -} - -// GetApiKey returns the ApiKey field. -func (a *AppLogDestinationSpecDataDog) GetApiKey() string { - if a == nil { - return "" - } - return a.ApiKey -} - -// GetEndpoint returns the Endpoint field. -func (a *AppLogDestinationSpecDataDog) GetEndpoint() string { - if a == nil { - return "" - } - return a.Endpoint -} - -// GetKey returns the Key field. -func (a *AppLogDestinationSpecHeader) GetKey() string { - if a == nil { - return "" - } - return a.Key -} - -// GetValue returns the Value field. -func (a *AppLogDestinationSpecHeader) GetValue() string { - if a == nil { - return "" - } - return a.Value -} - -// GetToken returns the Token field. -func (a *AppLogDestinationSpecLogtail) GetToken() string { - if a == nil { - return "" - } - return a.Token -} - -// GetBasicAuth returns the BasicAuth field. -func (a *AppLogDestinationSpecOpenSearch) GetBasicAuth() *OpenSearchBasicAuth { - if a == nil { - return nil - } - return a.BasicAuth -} - -// GetClusterName returns the ClusterName field. -func (a *AppLogDestinationSpecOpenSearch) GetClusterName() string { - if a == nil { - return "" - } - return a.ClusterName -} - -// GetEndpoint returns the Endpoint field. -func (a *AppLogDestinationSpecOpenSearch) GetEndpoint() string { - if a == nil { - return "" - } - return a.Endpoint -} - -// GetIndexName returns the IndexName field. -func (a *AppLogDestinationSpecOpenSearch) GetIndexName() string { - if a == nil { - return "" - } - return a.IndexName -} - -// GetEndpoint returns the Endpoint field. -func (a *AppLogDestinationSpecPapertrail) GetEndpoint() string { - if a == nil { - return "" - } - return a.Endpoint -} - -// GetAppID returns the AppID field. -func (a *AppProposeRequest) GetAppID() string { - if a == nil { - return "" - } - return a.AppID -} - -// GetSpec returns the Spec field. -func (a *AppProposeRequest) GetSpec() *AppSpec { - if a == nil { - return nil - } - return a.Spec -} - -// GetAppCost returns the AppCost field. -func (a *AppProposeResponse) GetAppCost() float32 { - if a == nil { - return 0 - } - return a.AppCost -} - -// GetAppIsStarter returns the AppIsStarter field. -func (a *AppProposeResponse) GetAppIsStarter() bool { - if a == nil { - return false - } - return a.AppIsStarter -} - -// GetAppIsStatic returns the AppIsStatic field. -func (a *AppProposeResponse) GetAppIsStatic() bool { - if a == nil { - return false - } - return a.AppIsStatic -} - -// GetAppNameAvailable returns the AppNameAvailable field. -func (a *AppProposeResponse) GetAppNameAvailable() bool { - if a == nil { - return false - } - return a.AppNameAvailable -} - -// GetAppNameSuggestion returns the AppNameSuggestion field. -func (a *AppProposeResponse) GetAppNameSuggestion() string { - if a == nil { - return "" - } - return a.AppNameSuggestion -} - -// GetAppTierDowngradeCost returns the AppTierDowngradeCost field. -func (a *AppProposeResponse) GetAppTierDowngradeCost() float32 { - if a == nil { - return 0 - } - return a.AppTierDowngradeCost -} - -// GetAppTierUpgradeCost returns the AppTierUpgradeCost field. -func (a *AppProposeResponse) GetAppTierUpgradeCost() float32 { - if a == nil { - return 0 - } - return a.AppTierUpgradeCost -} - -// GetExistingStarterApps returns the ExistingStarterApps field. -func (a *AppProposeResponse) GetExistingStarterApps() string { - if a == nil { - return "" - } - return a.ExistingStarterApps -} - -// GetExistingStaticApps returns the ExistingStaticApps field. -func (a *AppProposeResponse) GetExistingStaticApps() string { - if a == nil { - return "" - } - return a.ExistingStaticApps -} - -// GetMaxFreeStarterApps returns the MaxFreeStarterApps field. -func (a *AppProposeResponse) GetMaxFreeStarterApps() string { - if a == nil { - return "" - } - return a.MaxFreeStarterApps -} - -// GetMaxFreeStaticApps returns the MaxFreeStaticApps field. -func (a *AppProposeResponse) GetMaxFreeStaticApps() string { - if a == nil { - return "" - } - return a.MaxFreeStaticApps -} - -// GetSpec returns the Spec field. -func (a *AppProposeResponse) GetSpec() *AppSpec { - if a == nil { - return nil - } - return a.Spec -} - -// GetContinent returns the Continent field. -func (a *AppRegion) GetContinent() string { - if a == nil { - return "" - } - return a.Continent -} - -// GetDataCenters returns the DataCenters field. -func (a *AppRegion) GetDataCenters() []string { - if a == nil { - return nil - } - return a.DataCenters -} - -// GetDefault returns the Default field. -func (a *AppRegion) GetDefault() bool { - if a == nil { - return false - } - return a.Default -} - -// GetDisabled returns the Disabled field. -func (a *AppRegion) GetDisabled() bool { - if a == nil { - return false - } - return a.Disabled -} - -// GetFlag returns the Flag field. -func (a *AppRegion) GetFlag() string { - if a == nil { - return "" - } - return a.Flag -} - -// GetLabel returns the Label field. -func (a *AppRegion) GetLabel() string { - if a == nil { - return "" - } - return a.Label -} - -// GetReason returns the Reason field. -func (a *AppRegion) GetReason() string { - if a == nil { - return "" - } - return a.Reason -} - -// GetSlug returns the Slug field. -func (a *AppRegion) GetSlug() string { - if a == nil { - return "" - } - return a.Slug -} - -// GetPath returns the Path field. -func (a *AppRouteSpec) GetPath() string { - if a == nil { - return "" - } - return a.Path -} - -// GetPreservePathPrefix returns the PreservePathPrefix field. -func (a *AppRouteSpec) GetPreservePathPrefix() bool { - if a == nil { - return false - } - return a.PreservePathPrefix -} - -// GetAlerts returns the Alerts field. -func (a *AppServiceSpec) GetAlerts() []*AppAlertSpec { - if a == nil { - return nil - } - return a.Alerts -} - -// GetAutoscaling returns the Autoscaling field. -func (a *AppServiceSpec) GetAutoscaling() *AppAutoscalingSpec { - if a == nil { - return nil - } - return a.Autoscaling -} - -// GetBuildCommand returns the BuildCommand field. -func (a *AppServiceSpec) GetBuildCommand() string { - if a == nil { - return "" - } - return a.BuildCommand -} - -// GetCORS returns the CORS field. -func (a *AppServiceSpec) GetCORS() *AppCORSPolicy { - if a == nil { - return nil - } - return a.CORS -} - -// GetDockerfilePath returns the DockerfilePath field. -func (a *AppServiceSpec) GetDockerfilePath() string { - if a == nil { - return "" - } - return a.DockerfilePath -} - -// GetEnvironmentSlug returns the EnvironmentSlug field. -func (a *AppServiceSpec) GetEnvironmentSlug() string { - if a == nil { - return "" - } - return a.EnvironmentSlug -} - -// GetEnvs returns the Envs field. -func (a *AppServiceSpec) GetEnvs() []*AppVariableDefinition { - if a == nil { - return nil - } - return a.Envs -} - -// GetGit returns the Git field. -func (a *AppServiceSpec) GetGit() *GitSourceSpec { - if a == nil { - return nil - } - return a.Git -} - -// GetGitHub returns the GitHub field. -func (a *AppServiceSpec) GetGitHub() *GitHubSourceSpec { - if a == nil { - return nil - } - return a.GitHub -} - -// GetGitLab returns the GitLab field. -func (a *AppServiceSpec) GetGitLab() *GitLabSourceSpec { - if a == nil { - return nil - } - return a.GitLab -} - -// GetHealthCheck returns the HealthCheck field. -func (a *AppServiceSpec) GetHealthCheck() *AppServiceSpecHealthCheck { - if a == nil { - return nil - } - return a.HealthCheck -} - -// GetHTTPPort returns the HTTPPort field. -func (a *AppServiceSpec) GetHTTPPort() int64 { - if a == nil { - return 0 - } - return a.HTTPPort -} - -// GetImage returns the Image field. -func (a *AppServiceSpec) GetImage() *ImageSourceSpec { - if a == nil { - return nil - } - return a.Image -} - -// GetInstanceCount returns the InstanceCount field. -func (a *AppServiceSpec) GetInstanceCount() int64 { - if a == nil { - return 0 - } - return a.InstanceCount -} - -// GetInstanceSizeSlug returns the InstanceSizeSlug field. -func (a *AppServiceSpec) GetInstanceSizeSlug() string { - if a == nil { - return "" - } - return a.InstanceSizeSlug -} - -// GetInternalPorts returns the InternalPorts field. -func (a *AppServiceSpec) GetInternalPorts() []int64 { - if a == nil { - return nil - } - return a.InternalPorts -} - -// GetLogDestinations returns the LogDestinations field. -func (a *AppServiceSpec) GetLogDestinations() []*AppLogDestinationSpec { - if a == nil { - return nil - } - return a.LogDestinations -} - -// GetName returns the Name field. -func (a *AppServiceSpec) GetName() string { - if a == nil { - return "" - } - return a.Name -} - -// GetRoutes returns the Routes field. -func (a *AppServiceSpec) GetRoutes() []*AppRouteSpec { - if a == nil { - return nil - } - return a.Routes -} - -// GetRunCommand returns the RunCommand field. -func (a *AppServiceSpec) GetRunCommand() string { - if a == nil { - return "" - } - return a.RunCommand -} - -// GetSourceDir returns the SourceDir field. -func (a *AppServiceSpec) GetSourceDir() string { - if a == nil { - return "" - } - return a.SourceDir -} - -// GetTermination returns the Termination field. -func (a *AppServiceSpec) GetTermination() *AppServiceSpecTermination { - if a == nil { - return nil - } - return a.Termination -} - -// GetFailureThreshold returns the FailureThreshold field. -func (a *AppServiceSpecHealthCheck) GetFailureThreshold() int32 { - if a == nil { - return 0 - } - return a.FailureThreshold -} - -// GetHTTPPath returns the HTTPPath field. -func (a *AppServiceSpecHealthCheck) GetHTTPPath() string { - if a == nil { - return "" - } - return a.HTTPPath -} - -// GetInitialDelaySeconds returns the InitialDelaySeconds field. -func (a *AppServiceSpecHealthCheck) GetInitialDelaySeconds() int32 { - if a == nil { - return 0 - } - return a.InitialDelaySeconds -} - -// GetPath returns the Path field. -func (a *AppServiceSpecHealthCheck) GetPath() string { - if a == nil { - return "" - } - return a.Path -} - -// GetPeriodSeconds returns the PeriodSeconds field. -func (a *AppServiceSpecHealthCheck) GetPeriodSeconds() int32 { - if a == nil { - return 0 - } - return a.PeriodSeconds -} - -// GetPort returns the Port field. -func (a *AppServiceSpecHealthCheck) GetPort() int64 { - if a == nil { - return 0 - } - return a.Port -} - -// GetSuccessThreshold returns the SuccessThreshold field. -func (a *AppServiceSpecHealthCheck) GetSuccessThreshold() int32 { - if a == nil { - return 0 - } - return a.SuccessThreshold -} - -// GetTimeoutSeconds returns the TimeoutSeconds field. -func (a *AppServiceSpecHealthCheck) GetTimeoutSeconds() int32 { - if a == nil { - return 0 - } - return a.TimeoutSeconds -} - -// GetDrainSeconds returns the DrainSeconds field. -func (a *AppServiceSpecTermination) GetDrainSeconds() int32 { - if a == nil { - return 0 - } - return a.DrainSeconds -} - -// GetGracePeriodSeconds returns the GracePeriodSeconds field. -func (a *AppServiceSpecTermination) GetGracePeriodSeconds() int32 { - if a == nil { - return 0 - } - return a.GracePeriodSeconds -} - -// GetAlerts returns the Alerts field. -func (a *AppSpec) GetAlerts() []*AppAlertSpec { - if a == nil { - return nil - } - return a.Alerts -} - -// GetDatabases returns the Databases field. -func (a *AppSpec) GetDatabases() []*AppDatabaseSpec { - if a == nil { - return nil - } - return a.Databases -} - -// GetDomains returns the Domains field. -func (a *AppSpec) GetDomains() []*AppDomainSpec { - if a == nil { - return nil - } - return a.Domains -} - -// GetEgress returns the Egress field. -func (a *AppSpec) GetEgress() *AppEgressSpec { - if a == nil { - return nil - } - return a.Egress -} - -// GetEnvs returns the Envs field. -func (a *AppSpec) GetEnvs() []*AppVariableDefinition { - if a == nil { - return nil - } - return a.Envs -} - -// GetFeatures returns the Features field. -func (a *AppSpec) GetFeatures() []string { - if a == nil { - return nil - } - return a.Features -} - -// GetFunctions returns the Functions field. -func (a *AppSpec) GetFunctions() []*AppFunctionsSpec { - if a == nil { - return nil - } - return a.Functions -} - -// GetIngress returns the Ingress field. -func (a *AppSpec) GetIngress() *AppIngressSpec { - if a == nil { - return nil - } - return a.Ingress -} - -// GetJobs returns the Jobs field. -func (a *AppSpec) GetJobs() []*AppJobSpec { - if a == nil { - return nil - } - return a.Jobs -} - -// GetName returns the Name field. -func (a *AppSpec) GetName() string { - if a == nil { - return "" - } - return a.Name -} - -// GetRegion returns the Region field. -func (a *AppSpec) GetRegion() string { - if a == nil { - return "" - } - return a.Region -} - -// GetServices returns the Services field. -func (a *AppSpec) GetServices() []*AppServiceSpec { - if a == nil { - return nil - } - return a.Services -} - -// GetStaticSites returns the StaticSites field. -func (a *AppSpec) GetStaticSites() []*AppStaticSiteSpec { - if a == nil { - return nil - } - return a.StaticSites -} - -// GetWorkers returns the Workers field. -func (a *AppSpec) GetWorkers() []*AppWorkerSpec { - if a == nil { - return nil - } - return a.Workers -} - -// GetBuildCommand returns the BuildCommand field. -func (a *AppStaticSiteSpec) GetBuildCommand() string { - if a == nil { - return "" - } - return a.BuildCommand -} - -// GetCatchallDocument returns the CatchallDocument field. -func (a *AppStaticSiteSpec) GetCatchallDocument() string { - if a == nil { - return "" - } - return a.CatchallDocument -} - -// GetCORS returns the CORS field. -func (a *AppStaticSiteSpec) GetCORS() *AppCORSPolicy { - if a == nil { - return nil - } - return a.CORS -} - -// GetDockerfilePath returns the DockerfilePath field. -func (a *AppStaticSiteSpec) GetDockerfilePath() string { - if a == nil { - return "" - } - return a.DockerfilePath -} - -// GetEnvironmentSlug returns the EnvironmentSlug field. -func (a *AppStaticSiteSpec) GetEnvironmentSlug() string { - if a == nil { - return "" - } - return a.EnvironmentSlug -} - -// GetEnvs returns the Envs field. -func (a *AppStaticSiteSpec) GetEnvs() []*AppVariableDefinition { - if a == nil { - return nil - } - return a.Envs -} - -// GetErrorDocument returns the ErrorDocument field. -func (a *AppStaticSiteSpec) GetErrorDocument() string { - if a == nil { - return "" - } - return a.ErrorDocument -} - -// GetGit returns the Git field. -func (a *AppStaticSiteSpec) GetGit() *GitSourceSpec { - if a == nil { - return nil - } - return a.Git -} - -// GetGitHub returns the GitHub field. -func (a *AppStaticSiteSpec) GetGitHub() *GitHubSourceSpec { - if a == nil { - return nil - } - return a.GitHub -} - -// GetGitLab returns the GitLab field. -func (a *AppStaticSiteSpec) GetGitLab() *GitLabSourceSpec { - if a == nil { - return nil - } - return a.GitLab -} - -// GetIndexDocument returns the IndexDocument field. -func (a *AppStaticSiteSpec) GetIndexDocument() string { - if a == nil { - return "" - } - return a.IndexDocument -} - -// GetName returns the Name field. -func (a *AppStaticSiteSpec) GetName() string { - if a == nil { - return "" - } - return a.Name -} - -// GetOutputDir returns the OutputDir field. -func (a *AppStaticSiteSpec) GetOutputDir() string { - if a == nil { - return "" - } - return a.OutputDir -} - -// GetRoutes returns the Routes field. -func (a *AppStaticSiteSpec) GetRoutes() []*AppRouteSpec { - if a == nil { - return nil - } - return a.Routes -} - -// GetSourceDir returns the SourceDir field. -func (a *AppStaticSiteSpec) GetSourceDir() string { - if a == nil { - return "" - } - return a.SourceDir -} - -// GetExact returns the Exact field. -func (a *AppStringMatch) GetExact() string { - if a == nil { - return "" - } - return a.Exact -} - -// GetPrefix returns the Prefix field. -func (a *AppStringMatch) GetPrefix() string { - if a == nil { - return "" - } - return a.Prefix -} - -// GetRegex returns the Regex field. -func (a *AppStringMatch) GetRegex() string { - if a == nil { - return "" - } - return a.Regex -} - -// GetBuildSeconds returns the BuildSeconds field. -func (a *AppTier) GetBuildSeconds() string { - if a == nil { - return "" - } - return a.BuildSeconds -} - -// GetEgressBandwidthBytes returns the EgressBandwidthBytes field. -func (a *AppTier) GetEgressBandwidthBytes() string { - if a == nil { - return "" - } - return a.EgressBandwidthBytes -} - -// GetName returns the Name field. -func (a *AppTier) GetName() string { - if a == nil { - return "" - } - return a.Name -} - -// GetSlug returns the Slug field. -func (a *AppTier) GetSlug() string { - if a == nil { - return "" - } - return a.Slug -} - -// GetKey returns the Key field. -func (a *AppVariableDefinition) GetKey() string { - if a == nil { - return "" - } - return a.Key -} - -// GetScope returns the Scope field. -func (a *AppVariableDefinition) GetScope() AppVariableScope { - if a == nil { - return "" - } - return a.Scope -} - -// GetType returns the Type field. -func (a *AppVariableDefinition) GetType() AppVariableType { - if a == nil { - return "" - } - return a.Type -} - -// GetValue returns the Value field. -func (a *AppVariableDefinition) GetValue() string { - if a == nil { - return "" - } - return a.Value -} - -// GetAlerts returns the Alerts field. -func (a *AppWorkerSpec) GetAlerts() []*AppAlertSpec { - if a == nil { - return nil - } - return a.Alerts -} - -// GetAutoscaling returns the Autoscaling field. -func (a *AppWorkerSpec) GetAutoscaling() *AppAutoscalingSpec { - if a == nil { - return nil - } - return a.Autoscaling -} - -// GetBuildCommand returns the BuildCommand field. -func (a *AppWorkerSpec) GetBuildCommand() string { - if a == nil { - return "" - } - return a.BuildCommand -} - -// GetDockerfilePath returns the DockerfilePath field. -func (a *AppWorkerSpec) GetDockerfilePath() string { - if a == nil { - return "" - } - return a.DockerfilePath -} - -// GetEnvironmentSlug returns the EnvironmentSlug field. -func (a *AppWorkerSpec) GetEnvironmentSlug() string { - if a == nil { - return "" - } - return a.EnvironmentSlug -} - -// GetEnvs returns the Envs field. -func (a *AppWorkerSpec) GetEnvs() []*AppVariableDefinition { - if a == nil { - return nil - } - return a.Envs -} - -// GetGit returns the Git field. -func (a *AppWorkerSpec) GetGit() *GitSourceSpec { - if a == nil { - return nil - } - return a.Git -} - -// GetGitHub returns the GitHub field. -func (a *AppWorkerSpec) GetGitHub() *GitHubSourceSpec { - if a == nil { - return nil - } - return a.GitHub -} - -// GetGitLab returns the GitLab field. -func (a *AppWorkerSpec) GetGitLab() *GitLabSourceSpec { - if a == nil { - return nil - } - return a.GitLab -} - -// GetImage returns the Image field. -func (a *AppWorkerSpec) GetImage() *ImageSourceSpec { - if a == nil { - return nil - } - return a.Image -} - -// GetInstanceCount returns the InstanceCount field. -func (a *AppWorkerSpec) GetInstanceCount() int64 { - if a == nil { - return 0 - } - return a.InstanceCount -} - -// GetInstanceSizeSlug returns the InstanceSizeSlug field. -func (a *AppWorkerSpec) GetInstanceSizeSlug() string { - if a == nil { - return "" - } - return a.InstanceSizeSlug -} - -// GetLogDestinations returns the LogDestinations field. -func (a *AppWorkerSpec) GetLogDestinations() []*AppLogDestinationSpec { - if a == nil { - return nil - } - return a.LogDestinations -} - -// GetName returns the Name field. -func (a *AppWorkerSpec) GetName() string { - if a == nil { - return "" - } - return a.Name -} - -// GetRunCommand returns the RunCommand field. -func (a *AppWorkerSpec) GetRunCommand() string { - if a == nil { - return "" - } - return a.RunCommand -} - -// GetSourceDir returns the SourceDir field. -func (a *AppWorkerSpec) GetSourceDir() string { - if a == nil { - return "" - } - return a.SourceDir -} - -// GetTermination returns the Termination field. -func (a *AppWorkerSpec) GetTermination() *AppWorkerSpecTermination { - if a == nil { - return nil - } - return a.Termination -} - -// GetGracePeriodSeconds returns the GracePeriodSeconds field. -func (a *AppWorkerSpecTermination) GetGracePeriodSeconds() int32 { - if a == nil { - return 0 - } - return a.GracePeriodSeconds -} - -// GetDescription returns the Description field. -func (b *Buildpack) GetDescription() []string { - if b == nil { - return nil - } - return b.Description -} - -// GetDocsLink returns the DocsLink field. -func (b *Buildpack) GetDocsLink() string { - if b == nil { - return "" - } - return b.DocsLink -} - -// GetID returns the ID field. -func (b *Buildpack) GetID() string { - if b == nil { - return "" - } - return b.ID -} - -// GetLatest returns the Latest field. -func (b *Buildpack) GetLatest() bool { - if b == nil { - return false - } - return b.Latest -} - -// GetMajorVersion returns the MajorVersion field. -func (b *Buildpack) GetMajorVersion() int32 { - if b == nil { - return 0 - } - return b.MajorVersion -} - -// GetName returns the Name field. -func (b *Buildpack) GetName() string { - if b == nil { - return "" - } - return b.Name -} - -// GetVersion returns the Version field. -func (b *Buildpack) GetVersion() string { - if b == nil { - return "" - } - return b.Version -} - -// GetCause returns the Cause field. -func (d *Deployment) GetCause() string { - if d == nil { - return "" - } - return d.Cause -} - -// GetCauseDetails returns the CauseDetails field. -func (d *Deployment) GetCauseDetails() *DeploymentCauseDetails { - if d == nil { - return nil - } - return d.CauseDetails -} - -// GetClonedFrom returns the ClonedFrom field. -func (d *Deployment) GetClonedFrom() string { - if d == nil { - return "" - } - return d.ClonedFrom -} - -// GetCreatedAt returns the CreatedAt field. -func (d *Deployment) GetCreatedAt() time.Time { - if d == nil { - return time.Time{} - } - return d.CreatedAt -} - -// GetFunctions returns the Functions field. -func (d *Deployment) GetFunctions() []*DeploymentFunctions { - if d == nil { - return nil - } - return d.Functions -} - -// GetID returns the ID field. -func (d *Deployment) GetID() string { - if d == nil { - return "" - } - return d.ID -} - -// GetJobs returns the Jobs field. -func (d *Deployment) GetJobs() []*DeploymentJob { - if d == nil { - return nil - } - return d.Jobs -} - -// GetLoadBalancerID returns the LoadBalancerID field. -func (d *Deployment) GetLoadBalancerID() string { - if d == nil { - return "" - } - return d.LoadBalancerID -} - -// GetPhase returns the Phase field. -func (d *Deployment) GetPhase() DeploymentPhase { - if d == nil { - return "" - } - return d.Phase -} - -// GetPhaseLastUpdatedAt returns the PhaseLastUpdatedAt field. -func (d *Deployment) GetPhaseLastUpdatedAt() time.Time { - if d == nil { - return time.Time{} - } - return d.PhaseLastUpdatedAt -} - -// GetPreviousDeploymentID returns the PreviousDeploymentID field. -func (d *Deployment) GetPreviousDeploymentID() string { - if d == nil { - return "" - } - return d.PreviousDeploymentID -} - -// GetProgress returns the Progress field. -func (d *Deployment) GetProgress() *DeploymentProgress { - if d == nil { - return nil - } - return d.Progress -} - -// GetServices returns the Services field. -func (d *Deployment) GetServices() []*DeploymentService { - if d == nil { - return nil - } - return d.Services -} - -// GetSpec returns the Spec field. -func (d *Deployment) GetSpec() *AppSpec { - if d == nil { - return nil - } - return d.Spec -} - -// GetStaticSites returns the StaticSites field. -func (d *Deployment) GetStaticSites() []*DeploymentStaticSite { - if d == nil { - return nil - } - return d.StaticSites -} - -// GetTierSlug returns the TierSlug field. -func (d *Deployment) GetTierSlug() string { - if d == nil { - return "" - } - return d.TierSlug -} - -// GetTiming returns the Timing field. -func (d *Deployment) GetTiming() *DeploymentTiming { - if d == nil { - return nil - } - return d.Timing -} - -// GetUpdatedAt returns the UpdatedAt field. -func (d *Deployment) GetUpdatedAt() time.Time { - if d == nil { - return time.Time{} - } - return d.UpdatedAt -} - -// GetWorkers returns the Workers field. -func (d *Deployment) GetWorkers() []*DeploymentWorker { - if d == nil { - return nil - } - return d.Workers -} - -// GetAutoscaler returns the Autoscaler field. -func (d *DeploymentCauseDetails) GetAutoscaler() *DeploymentCauseDetailsAutoscalerAction { - if d == nil { - return nil - } - return d.Autoscaler -} - -// GetDigitalOceanUserAction returns the DigitalOceanUserAction field. -func (d *DeploymentCauseDetails) GetDigitalOceanUserAction() *DeploymentCauseDetailsDigitalOceanUserAction { - if d == nil { - return nil - } - return d.DigitalOceanUserAction -} - -// GetDOCRPush returns the DOCRPush field. -func (d *DeploymentCauseDetails) GetDOCRPush() *DeploymentCauseDetailsDOCRPush { - if d == nil { - return nil - } - return d.DOCRPush -} - -// GetGitPush returns the GitPush field. -func (d *DeploymentCauseDetails) GetGitPush() *DeploymentCauseDetailsGitPush { - if d == nil { - return nil - } - return d.GitPush -} - -// GetInternal returns the Internal field. -func (d *DeploymentCauseDetails) GetInternal() bool { - if d == nil { - return false - } - return d.Internal -} - -// GetType returns the Type field. -func (d *DeploymentCauseDetails) GetType() DeploymentCauseDetailsType { - if d == nil { - return "" - } - return d.Type -} - -// GetAutoscaled returns the Autoscaled field. -func (d *DeploymentCauseDetailsAutoscalerAction) GetAutoscaled() bool { - if d == nil { - return false - } - return d.Autoscaled -} - -// GetEmail returns the Email field. -func (d *DeploymentCauseDetailsDigitalOceanUser) GetEmail() string { - if d == nil { - return "" - } - return d.Email -} - -// GetFullName returns the FullName field. -func (d *DeploymentCauseDetailsDigitalOceanUser) GetFullName() string { - if d == nil { - return "" - } - return d.FullName -} - -// GetUUID returns the UUID field. -func (d *DeploymentCauseDetailsDigitalOceanUser) GetUUID() string { - if d == nil { - return "" - } - return d.UUID -} - -// GetName returns the Name field. -func (d *DeploymentCauseDetailsDigitalOceanUserAction) GetName() DeploymentCauseDetailsDigitalOceanUserActionName { - if d == nil { - return "" - } - return d.Name -} - -// GetUser returns the User field. -func (d *DeploymentCauseDetailsDigitalOceanUserAction) GetUser() *DeploymentCauseDetailsDigitalOceanUser { - if d == nil { - return nil - } - return d.User -} - -// GetImageDigest returns the ImageDigest field. -func (d *DeploymentCauseDetailsDOCRPush) GetImageDigest() string { - if d == nil { - return "" - } - return d.ImageDigest -} - -// GetRegistry returns the Registry field. -func (d *DeploymentCauseDetailsDOCRPush) GetRegistry() string { - if d == nil { - return "" - } - return d.Registry -} - -// GetRepository returns the Repository field. -func (d *DeploymentCauseDetailsDOCRPush) GetRepository() string { - if d == nil { - return "" - } - return d.Repository -} - -// GetTag returns the Tag field. -func (d *DeploymentCauseDetailsDOCRPush) GetTag() string { - if d == nil { - return "" - } - return d.Tag -} - -// GetCommitAuthor returns the CommitAuthor field. -func (d *DeploymentCauseDetailsGitPush) GetCommitAuthor() string { - if d == nil { - return "" - } - return d.CommitAuthor -} - -// GetCommitMessage returns the CommitMessage field. -func (d *DeploymentCauseDetailsGitPush) GetCommitMessage() string { - if d == nil { - return "" - } - return d.CommitMessage -} - -// GetCommitSHA returns the CommitSHA field. -func (d *DeploymentCauseDetailsGitPush) GetCommitSHA() string { - if d == nil { - return "" - } - return d.CommitSHA -} - -// GetGitHub returns the GitHub field. -func (d *DeploymentCauseDetailsGitPush) GetGitHub() *GitHubSourceSpec { - if d == nil { - return nil - } - return d.GitHub -} - -// GetGitLab returns the GitLab field. -func (d *DeploymentCauseDetailsGitPush) GetGitLab() *GitLabSourceSpec { - if d == nil { - return nil - } - return d.GitLab -} - -// GetUsername returns the Username field. -func (d *DeploymentCauseDetailsGitPush) GetUsername() string { - if d == nil { - return "" - } - return d.Username -} - -// GetName returns the Name field. -func (d *DeploymentFunctions) GetName() string { - if d == nil { - return "" - } - return d.Name -} - -// GetNamespace returns the Namespace field. -func (d *DeploymentFunctions) GetNamespace() string { - if d == nil { - return "" - } - return d.Namespace -} - -// GetSourceCommitHash returns the SourceCommitHash field. -func (d *DeploymentFunctions) GetSourceCommitHash() string { - if d == nil { - return "" - } - return d.SourceCommitHash -} - -// GetBuildpacks returns the Buildpacks field. -func (d *DeploymentJob) GetBuildpacks() []*Buildpack { - if d == nil { - return nil - } - return d.Buildpacks -} - -// GetName returns the Name field. -func (d *DeploymentJob) GetName() string { - if d == nil { - return "" - } - return d.Name -} - -// GetSourceCommitHash returns the SourceCommitHash field. -func (d *DeploymentJob) GetSourceCommitHash() string { - if d == nil { - return "" - } - return d.SourceCommitHash -} - -// GetErrorSteps returns the ErrorSteps field. -func (d *DeploymentProgress) GetErrorSteps() int32 { - if d == nil { - return 0 - } - return d.ErrorSteps -} - -// GetPendingSteps returns the PendingSteps field. -func (d *DeploymentProgress) GetPendingSteps() int32 { - if d == nil { - return 0 - } - return d.PendingSteps -} - -// GetRunningSteps returns the RunningSteps field. -func (d *DeploymentProgress) GetRunningSteps() int32 { - if d == nil { - return 0 - } - return d.RunningSteps -} - -// GetSteps returns the Steps field. -func (d *DeploymentProgress) GetSteps() []*DeploymentProgressStep { - if d == nil { - return nil - } - return d.Steps -} - -// GetSuccessSteps returns the SuccessSteps field. -func (d *DeploymentProgress) GetSuccessSteps() int32 { - if d == nil { - return 0 - } - return d.SuccessSteps -} - -// GetSummarySteps returns the SummarySteps field. -func (d *DeploymentProgress) GetSummarySteps() []*DeploymentProgressStep { - if d == nil { - return nil - } - return d.SummarySteps -} - -// GetTotalSteps returns the TotalSteps field. -func (d *DeploymentProgress) GetTotalSteps() int32 { - if d == nil { - return 0 - } - return d.TotalSteps -} - -// GetComponentName returns the ComponentName field. -func (d *DeploymentProgressStep) GetComponentName() string { - if d == nil { - return "" - } - return d.ComponentName -} - -// GetEndedAt returns the EndedAt field. -func (d *DeploymentProgressStep) GetEndedAt() time.Time { - if d == nil { - return time.Time{} - } - return d.EndedAt -} - -// GetMessageBase returns the MessageBase field. -func (d *DeploymentProgressStep) GetMessageBase() string { - if d == nil { - return "" - } - return d.MessageBase -} - -// GetName returns the Name field. -func (d *DeploymentProgressStep) GetName() string { - if d == nil { - return "" - } - return d.Name -} - -// GetReason returns the Reason field. -func (d *DeploymentProgressStep) GetReason() *DeploymentProgressStepReason { - if d == nil { - return nil - } - return d.Reason -} - -// GetStartedAt returns the StartedAt field. -func (d *DeploymentProgressStep) GetStartedAt() time.Time { - if d == nil { - return time.Time{} - } - return d.StartedAt -} - -// GetStatus returns the Status field. -func (d *DeploymentProgressStep) GetStatus() DeploymentProgressStepStatus { - if d == nil { - return "" - } - return d.Status -} - -// GetSteps returns the Steps field. -func (d *DeploymentProgressStep) GetSteps() []*DeploymentProgressStep { - if d == nil { - return nil - } - return d.Steps -} - -// GetCode returns the Code field. -func (d *DeploymentProgressStepReason) GetCode() string { - if d == nil { - return "" - } - return d.Code -} - -// GetMessage returns the Message field. -func (d *DeploymentProgressStepReason) GetMessage() string { - if d == nil { - return "" - } - return d.Message -} - -// GetBuildpacks returns the Buildpacks field. -func (d *DeploymentService) GetBuildpacks() []*Buildpack { - if d == nil { - return nil - } - return d.Buildpacks -} - -// GetName returns the Name field. -func (d *DeploymentService) GetName() string { - if d == nil { - return "" - } - return d.Name -} - -// GetSourceCommitHash returns the SourceCommitHash field. -func (d *DeploymentService) GetSourceCommitHash() string { - if d == nil { - return "" - } - return d.SourceCommitHash -} - -// GetBuildpacks returns the Buildpacks field. -func (d *DeploymentStaticSite) GetBuildpacks() []*Buildpack { - if d == nil { - return nil - } - return d.Buildpacks -} - -// GetName returns the Name field. -func (d *DeploymentStaticSite) GetName() string { - if d == nil { - return "" - } - return d.Name -} - -// GetSourceCommitHash returns the SourceCommitHash field. -func (d *DeploymentStaticSite) GetSourceCommitHash() string { - if d == nil { - return "" - } - return d.SourceCommitHash -} - -// GetBuildBillable returns the BuildBillable field. -func (d *DeploymentTiming) GetBuildBillable() string { - if d == nil { - return "" - } - return d.BuildBillable -} - -// GetBuildTotal returns the BuildTotal field. -func (d *DeploymentTiming) GetBuildTotal() string { - if d == nil { - return "" - } - return d.BuildTotal -} - -// GetComponents returns the Components field. -func (d *DeploymentTiming) GetComponents() []*DeploymentTimingComponent { - if d == nil { - return nil - } - return d.Components -} - -// GetDatabaseProvision returns the DatabaseProvision field. -func (d *DeploymentTiming) GetDatabaseProvision() string { - if d == nil { - return "" - } - return d.DatabaseProvision -} - -// GetDeploying returns the Deploying field. -func (d *DeploymentTiming) GetDeploying() string { - if d == nil { - return "" - } - return d.Deploying -} - -// GetPending returns the Pending field. -func (d *DeploymentTiming) GetPending() string { - if d == nil { - return "" - } - return d.Pending -} - -// GetBuildBillable returns the BuildBillable field. -func (d *DeploymentTimingComponent) GetBuildBillable() string { - if d == nil { - return "" - } - return d.BuildBillable -} - -// GetName returns the Name field. -func (d *DeploymentTimingComponent) GetName() string { - if d == nil { - return "" - } - return d.Name -} - -// GetBuildpacks returns the Buildpacks field. -func (d *DeploymentWorker) GetBuildpacks() []*Buildpack { - if d == nil { - return nil - } - return d.Buildpacks -} - -// GetName returns the Name field. -func (d *DeploymentWorker) GetName() string { - if d == nil { - return "" - } - return d.Name -} - -// GetSourceCommitHash returns the SourceCommitHash field. -func (d *DeploymentWorker) GetSourceCommitHash() string { - if d == nil { - return "" - } - return d.SourceCommitHash -} - -// GetSpec returns the Spec field. -func (d *DeployTemplate) GetSpec() *AppSpec { - if d == nil { - return nil - } - return d.Spec -} - -// GetCommitSHA returns the CommitSHA field. -func (d *DetectRequest) GetCommitSHA() string { - if d == nil { - return "" - } - return d.CommitSHA -} - -// GetGit returns the Git field. -func (d *DetectRequest) GetGit() *GitSourceSpec { - if d == nil { - return nil - } - return d.Git -} - -// GetGitHub returns the GitHub field. -func (d *DetectRequest) GetGitHub() *GitHubSourceSpec { - if d == nil { - return nil - } - return d.GitHub -} - -// GetGitLab returns the GitLab field. -func (d *DetectRequest) GetGitLab() *GitLabSourceSpec { - if d == nil { - return nil - } - return d.GitLab -} - -// GetSourceDir returns the SourceDir field. -func (d *DetectRequest) GetSourceDir() string { - if d == nil { - return "" - } - return d.SourceDir -} - -// GetComponents returns the Components field. -func (d *DetectResponse) GetComponents() []*DetectResponseComponent { - if d == nil { - return nil - } - return d.Components -} - -// GetTemplate returns the Template field. -func (d *DetectResponse) GetTemplate() *DeployTemplate { - if d == nil { - return nil - } - return d.Template -} - -// GetTemplateError returns the TemplateError field. -func (d *DetectResponse) GetTemplateError() string { - if d == nil { - return "" - } - return d.TemplateError -} - -// GetTemplateFound returns the TemplateFound field. -func (d *DetectResponse) GetTemplateFound() bool { - if d == nil { - return false - } - return d.TemplateFound -} - -// GetTemplateValid returns the TemplateValid field. -func (d *DetectResponse) GetTemplateValid() bool { - if d == nil { - return false - } - return d.TemplateValid -} - -// GetBuildCommand returns the BuildCommand field. -func (d *DetectResponseComponent) GetBuildCommand() string { - if d == nil { - return "" - } - return d.BuildCommand -} - -// GetBuildpacks returns the Buildpacks field. -func (d *DetectResponseComponent) GetBuildpacks() []*Buildpack { - if d == nil { - return nil - } - return d.Buildpacks -} - -// GetDockerfiles returns the Dockerfiles field. -func (d *DetectResponseComponent) GetDockerfiles() []string { - if d == nil { - return nil - } - return d.Dockerfiles -} - -// GetEnvironmentSlug returns the EnvironmentSlug field. -func (d *DetectResponseComponent) GetEnvironmentSlug() string { - if d == nil { - return "" - } - return d.EnvironmentSlug -} - -// GetEnvVars returns the EnvVars field. -func (d *DetectResponseComponent) GetEnvVars() []*AppVariableDefinition { - if d == nil { - return nil - } - return d.EnvVars -} - -// GetHTTPPorts returns the HTTPPorts field. -func (d *DetectResponseComponent) GetHTTPPorts() []int64 { - if d == nil { - return nil - } - return d.HTTPPorts -} - -// GetRunCommand returns the RunCommand field. -func (d *DetectResponseComponent) GetRunCommand() string { - if d == nil { - return "" - } - return d.RunCommand -} - -// GetServerlessPackages returns the ServerlessPackages field. -func (d *DetectResponseComponent) GetServerlessPackages() []*DetectResponseServerlessPackage { - if d == nil { - return nil - } - return d.ServerlessPackages -} - -// GetSourceDir returns the SourceDir field. -func (d *DetectResponseComponent) GetSourceDir() string { - if d == nil { - return "" - } - return d.SourceDir -} - -// GetStrategy returns the Strategy field. -func (d *DetectResponseComponent) GetStrategy() DetectResponseType { - if d == nil { - return "" - } - return d.Strategy -} - -// GetTypes returns the Types field. -func (d *DetectResponseComponent) GetTypes() []string { - if d == nil { - return nil - } - return d.Types -} - -// GetLimits returns the Limits field. -func (d *DetectResponseServerlessFunction) GetLimits() *DetectResponseServerlessFunctionLimits { - if d == nil { - return nil - } - return d.Limits -} - -// GetName returns the Name field. -func (d *DetectResponseServerlessFunction) GetName() string { - if d == nil { - return "" - } - return d.Name -} - -// GetPackage returns the Package field. -func (d *DetectResponseServerlessFunction) GetPackage() string { - if d == nil { - return "" - } - return d.Package -} - -// GetRuntime returns the Runtime field. -func (d *DetectResponseServerlessFunction) GetRuntime() string { - if d == nil { - return "" - } - return d.Runtime -} - -// GetLogs returns the Logs field. -func (d *DetectResponseServerlessFunctionLimits) GetLogs() string { - if d == nil { - return "" - } - return d.Logs -} - -// GetMemory returns the Memory field. -func (d *DetectResponseServerlessFunctionLimits) GetMemory() string { - if d == nil { - return "" - } - return d.Memory -} - -// GetTimeout returns the Timeout field. -func (d *DetectResponseServerlessFunctionLimits) GetTimeout() string { - if d == nil { - return "" - } - return d.Timeout -} - -// GetFunctions returns the Functions field. -func (d *DetectResponseServerlessPackage) GetFunctions() []*DetectResponseServerlessFunction { - if d == nil { - return nil - } - return d.Functions -} - -// GetName returns the Name field. -func (d *DetectResponseServerlessPackage) GetName() string { - if d == nil { - return "" - } - return d.Name -} - -// GetConnectionDetails returns the ConnectionDetails field. -func (g *GetAppDatabaseConnectionDetailsResponse) GetConnectionDetails() []*GetDatabaseConnectionDetailsResponse { - if g == nil { - return nil - } - return g.ConnectionDetails -} - -// GetComponentName returns the ComponentName field. -func (g *GetDatabaseConnectionDetailsResponse) GetComponentName() string { - if g == nil { - return "" - } - return g.ComponentName -} - -// GetDatabaseName returns the DatabaseName field. -func (g *GetDatabaseConnectionDetailsResponse) GetDatabaseName() string { - if g == nil { - return "" - } - return g.DatabaseName -} - -// GetDatabaseURL returns the DatabaseURL field. -func (g *GetDatabaseConnectionDetailsResponse) GetDatabaseURL() string { - if g == nil { - return "" - } - return g.DatabaseURL -} - -// GetHost returns the Host field. -func (g *GetDatabaseConnectionDetailsResponse) GetHost() string { - if g == nil { - return "" - } - return g.Host -} - -// GetPassword returns the Password field. -func (g *GetDatabaseConnectionDetailsResponse) GetPassword() string { - if g == nil { - return "" - } - return g.Password -} - -// GetPools returns the Pools field. -func (g *GetDatabaseConnectionDetailsResponse) GetPools() []*GetDatabaseConnectionDetailsResponsePool { - if g == nil { - return nil - } - return g.Pools -} - -// GetPort returns the Port field. -func (g *GetDatabaseConnectionDetailsResponse) GetPort() int64 { - if g == nil { - return 0 - } - return g.Port -} - -// GetSslMode returns the SslMode field. -func (g *GetDatabaseConnectionDetailsResponse) GetSslMode() string { - if g == nil { - return "" - } - return g.SslMode -} - -// GetUsername returns the Username field. -func (g *GetDatabaseConnectionDetailsResponse) GetUsername() string { - if g == nil { - return "" - } - return g.Username -} - -// GetDatabaseName returns the DatabaseName field. -func (g *GetDatabaseConnectionDetailsResponsePool) GetDatabaseName() string { - if g == nil { - return "" - } - return g.DatabaseName -} - -// GetDatabaseURL returns the DatabaseURL field. -func (g *GetDatabaseConnectionDetailsResponsePool) GetDatabaseURL() string { - if g == nil { - return "" - } - return g.DatabaseURL -} - -// GetHost returns the Host field. -func (g *GetDatabaseConnectionDetailsResponsePool) GetHost() string { - if g == nil { - return "" - } - return g.Host -} - -// GetPassword returns the Password field. -func (g *GetDatabaseConnectionDetailsResponsePool) GetPassword() string { - if g == nil { - return "" - } - return g.Password -} - -// GetPoolName returns the PoolName field. -func (g *GetDatabaseConnectionDetailsResponsePool) GetPoolName() string { - if g == nil { - return "" - } - return g.PoolName -} - -// GetPort returns the Port field. -func (g *GetDatabaseConnectionDetailsResponsePool) GetPort() int64 { - if g == nil { - return 0 - } - return g.Port -} - -// GetSslMode returns the SslMode field. -func (g *GetDatabaseConnectionDetailsResponsePool) GetSslMode() string { - if g == nil { - return "" - } - return g.SslMode -} - -// GetUsername returns the Username field. -func (g *GetDatabaseConnectionDetailsResponsePool) GetUsername() string { - if g == nil { - return "" - } - return g.Username -} - -// GetIsEnabled returns the IsEnabled field. -func (g *GetDatabaseTrustedSourceResponse) GetIsEnabled() bool { - if g == nil { - return false - } - return g.IsEnabled -} - -// GetBranch returns the Branch field. -func (g *GitHubSourceSpec) GetBranch() string { - if g == nil { - return "" - } - return g.Branch -} - -// GetDeployOnPush returns the DeployOnPush field. -func (g *GitHubSourceSpec) GetDeployOnPush() bool { - if g == nil { - return false - } - return g.DeployOnPush -} - -// GetRepo returns the Repo field. -func (g *GitHubSourceSpec) GetRepo() string { - if g == nil { - return "" - } - return g.Repo -} - -// GetBranch returns the Branch field. -func (g *GitLabSourceSpec) GetBranch() string { - if g == nil { - return "" - } - return g.Branch -} - -// GetDeployOnPush returns the DeployOnPush field. -func (g *GitLabSourceSpec) GetDeployOnPush() bool { - if g == nil { - return false - } - return g.DeployOnPush -} - -// GetRepo returns the Repo field. -func (g *GitLabSourceSpec) GetRepo() string { - if g == nil { - return "" - } - return g.Repo -} - -// GetBranch returns the Branch field. -func (g *GitSourceSpec) GetBranch() string { - if g == nil { - return "" - } - return g.Branch -} - -// GetRepoCloneURL returns the RepoCloneURL field. -func (g *GitSourceSpec) GetRepoCloneURL() string { - if g == nil { - return "" - } - return g.RepoCloneURL -} - -// GetDeployOnPush returns the DeployOnPush field. -func (i *ImageSourceSpec) GetDeployOnPush() *ImageSourceSpecDeployOnPush { - if i == nil { - return nil - } - return i.DeployOnPush -} - -// GetDigest returns the Digest field. -func (i *ImageSourceSpec) GetDigest() string { - if i == nil { - return "" - } - return i.Digest -} - -// GetRegistry returns the Registry field. -func (i *ImageSourceSpec) GetRegistry() string { - if i == nil { - return "" - } - return i.Registry -} - -// GetRegistryCredentials returns the RegistryCredentials field. -func (i *ImageSourceSpec) GetRegistryCredentials() string { - if i == nil { - return "" - } - return i.RegistryCredentials -} - -// GetRegistryType returns the RegistryType field. -func (i *ImageSourceSpec) GetRegistryType() ImageSourceSpecRegistryType { - if i == nil { - return "" - } - return i.RegistryType -} - -// GetRepository returns the Repository field. -func (i *ImageSourceSpec) GetRepository() string { - if i == nil { - return "" - } - return i.Repository -} - -// GetTag returns the Tag field. -func (i *ImageSourceSpec) GetTag() string { - if i == nil { - return "" - } - return i.Tag -} - -// GetEnabled returns the Enabled field. -func (i *ImageSourceSpecDeployOnPush) GetEnabled() bool { - if i == nil { - return false - } - return i.Enabled -} - -// GetBuildpacks returns the Buildpacks field. -func (l *ListBuildpacksResponse) GetBuildpacks() []*Buildpack { - if l == nil { - return nil - } - return l.Buildpacks -} - -// GetPassword returns the Password field. -func (o *OpenSearchBasicAuth) GetPassword() string { - if o == nil { - return "" - } - return o.Password -} - -// GetUser returns the User field. -func (o *OpenSearchBasicAuth) GetUser() string { - if o == nil { - return "" - } - return o.User -} - -// GetAppID returns the AppID field. -func (r *ResetDatabasePasswordRequest) GetAppID() string { - if r == nil { - return "" - } - return r.AppID -} - -// GetComponentName returns the ComponentName field. -func (r *ResetDatabasePasswordRequest) GetComponentName() string { - if r == nil { - return "" - } - return r.ComponentName -} - -// GetDeployment returns the Deployment field. -func (r *ResetDatabasePasswordResponse) GetDeployment() *Deployment { - if r == nil { - return nil - } - return r.Deployment -} - -// GetAppID returns the AppID field. -func (t *ToggleDatabaseTrustedSourceRequest) GetAppID() string { - if t == nil { - return "" - } - return t.AppID -} - -// GetComponentName returns the ComponentName field. -func (t *ToggleDatabaseTrustedSourceRequest) GetComponentName() string { - if t == nil { - return "" - } - return t.ComponentName -} - -// GetEnable returns the Enable field. -func (t *ToggleDatabaseTrustedSourceRequest) GetEnable() bool { - if t == nil { - return false - } - return t.Enable -} - -// GetIsEnabled returns the IsEnabled field. -func (t *ToggleDatabaseTrustedSourceResponse) GetIsEnabled() bool { - if t == nil { - return false - } - return t.IsEnabled -} - -// GetAffectedComponents returns the AffectedComponents field. -func (u *UpgradeBuildpackResponse) GetAffectedComponents() []string { - if u == nil { - return nil - } - return u.AffectedComponents -} - -// GetDeployment returns the Deployment field. -func (u *UpgradeBuildpackResponse) GetDeployment() *Deployment { - if u == nil { - return nil - } - return u.Deployment -} diff --git a/vendor/github.com/digitalocean/godo/balance.go b/vendor/github.com/digitalocean/godo/balance.go deleted file mode 100644 index bfd0b04..0000000 --- a/vendor/github.com/digitalocean/godo/balance.go +++ /dev/null @@ -1,52 +0,0 @@ -package godo - -import ( - "context" - "net/http" - "time" -) - -// BalanceService is an interface for interfacing with the Balance -// endpoints of the DigitalOcean API -// See: https://docs.digitalocean.com/reference/api/api-reference/#operation/balance_get -type BalanceService interface { - Get(context.Context) (*Balance, *Response, error) -} - -// BalanceServiceOp handles communication with the Balance related methods of -// the DigitalOcean API. -type BalanceServiceOp struct { - client *Client -} - -var _ BalanceService = &BalanceServiceOp{} - -// Balance represents a DigitalOcean Balance -type Balance struct { - MonthToDateBalance string `json:"month_to_date_balance"` - AccountBalance string `json:"account_balance"` - MonthToDateUsage string `json:"month_to_date_usage"` - GeneratedAt time.Time `json:"generated_at"` -} - -func (r Balance) String() string { - return Stringify(r) -} - -// Get DigitalOcean balance info -func (s *BalanceServiceOp) Get(ctx context.Context) (*Balance, *Response, error) { - path := "v2/customers/my/balance" - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(Balance) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root, resp, err -} diff --git a/vendor/github.com/digitalocean/godo/billing_history.go b/vendor/github.com/digitalocean/godo/billing_history.go deleted file mode 100644 index ae87c10..0000000 --- a/vendor/github.com/digitalocean/godo/billing_history.go +++ /dev/null @@ -1,72 +0,0 @@ -package godo - -import ( - "context" - "net/http" - "time" -) - -const billingHistoryBasePath = "v2/customers/my/billing_history" - -// BillingHistoryService is an interface for interfacing with the BillingHistory -// endpoints of the DigitalOcean API -// See: https://docs.digitalocean.com/reference/api/api-reference/#operation/billingHistory_list -type BillingHistoryService interface { - List(context.Context, *ListOptions) (*BillingHistory, *Response, error) -} - -// BillingHistoryServiceOp handles communication with the BillingHistory related methods of -// the DigitalOcean API. -type BillingHistoryServiceOp struct { - client *Client -} - -var _ BillingHistoryService = &BillingHistoryServiceOp{} - -// BillingHistory represents a DigitalOcean Billing History -type BillingHistory struct { - BillingHistory []BillingHistoryEntry `json:"billing_history"` - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -// BillingHistoryEntry represents an entry in a customer's Billing History -type BillingHistoryEntry struct { - Description string `json:"description"` - Amount string `json:"amount"` - InvoiceID *string `json:"invoice_id"` - InvoiceUUID *string `json:"invoice_uuid"` - Date time.Time `json:"date"` - Type string `json:"type"` -} - -func (b BillingHistory) String() string { - return Stringify(b) -} - -// List the Billing History for a customer -func (s *BillingHistoryServiceOp) List(ctx context.Context, opt *ListOptions) (*BillingHistory, *Response, error) { - path, err := addOptions(billingHistoryBasePath, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(BillingHistory) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root, resp, err -} diff --git a/vendor/github.com/digitalocean/godo/cdn.go b/vendor/github.com/digitalocean/godo/cdn.go deleted file mode 100644 index 4c97d11..0000000 --- a/vendor/github.com/digitalocean/godo/cdn.go +++ /dev/null @@ -1,218 +0,0 @@ -package godo - -import ( - "context" - "fmt" - "net/http" - "time" -) - -const cdnBasePath = "v2/cdn/endpoints" - -// CDNService is an interface for managing Spaces CDN with the DigitalOcean API. -type CDNService interface { - List(context.Context, *ListOptions) ([]CDN, *Response, error) - Get(context.Context, string) (*CDN, *Response, error) - Create(context.Context, *CDNCreateRequest) (*CDN, *Response, error) - UpdateTTL(context.Context, string, *CDNUpdateTTLRequest) (*CDN, *Response, error) - UpdateCustomDomain(context.Context, string, *CDNUpdateCustomDomainRequest) (*CDN, *Response, error) - FlushCache(context.Context, string, *CDNFlushCacheRequest) (*Response, error) - Delete(context.Context, string) (*Response, error) -} - -// CDNServiceOp handles communication with the CDN related methods of the -// DigitalOcean API. -type CDNServiceOp struct { - client *Client -} - -var _ CDNService = &CDNServiceOp{} - -// CDN represents a DigitalOcean CDN -type CDN struct { - ID string `json:"id"` - Origin string `json:"origin"` - Endpoint string `json:"endpoint"` - CreatedAt time.Time `json:"created_at"` - TTL uint32 `json:"ttl"` - CertificateID string `json:"certificate_id,omitempty"` - CustomDomain string `json:"custom_domain,omitempty"` -} - -// CDNRoot represents a response from the DigitalOcean API -type cdnRoot struct { - Endpoint *CDN `json:"endpoint"` -} - -type cdnsRoot struct { - Endpoints []CDN `json:"endpoints"` - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -// CDNCreateRequest represents a request to create a CDN. -type CDNCreateRequest struct { - Origin string `json:"origin"` - TTL uint32 `json:"ttl"` - CustomDomain string `json:"custom_domain,omitempty"` - CertificateID string `json:"certificate_id,omitempty"` -} - -// CDNUpdateTTLRequest represents a request to update the ttl of a CDN. -type CDNUpdateTTLRequest struct { - TTL uint32 `json:"ttl"` -} - -// CDNUpdateCustomDomainRequest represents a request to update the custom domain of a CDN. -type CDNUpdateCustomDomainRequest struct { - CustomDomain string `json:"custom_domain"` - CertificateID string `json:"certificate_id"` -} - -// CDNFlushCacheRequest represents a request to flush cache of a CDN. -type CDNFlushCacheRequest struct { - Files []string `json:"files"` -} - -// List all CDN endpoints -func (c CDNServiceOp) List(ctx context.Context, opt *ListOptions) ([]CDN, *Response, error) { - path, err := addOptions(cdnBasePath, opt) - if err != nil { - return nil, nil, err - } - - req, err := c.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(cdnsRoot) - resp, err := c.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.Endpoints, resp, err -} - -// Get individual CDN. It requires a non-empty cdn id. -func (c CDNServiceOp) Get(ctx context.Context, id string) (*CDN, *Response, error) { - if len(id) == 0 { - return nil, nil, NewArgError("id", "cannot be an empty string") - } - - path := fmt.Sprintf("%s/%s", cdnBasePath, id) - - req, err := c.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(cdnRoot) - resp, err := c.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Endpoint, resp, err -} - -// Create a new CDN -func (c CDNServiceOp) Create(ctx context.Context, createRequest *CDNCreateRequest) (*CDN, *Response, error) { - if createRequest == nil { - return nil, nil, NewArgError("createRequest", "cannot be nil") - } - - req, err := c.client.NewRequest(ctx, http.MethodPost, cdnBasePath, createRequest) - if err != nil { - return nil, nil, err - } - - root := new(cdnRoot) - resp, err := c.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Endpoint, resp, err -} - -// UpdateTTL updates the ttl of an individual CDN -func (c CDNServiceOp) UpdateTTL(ctx context.Context, id string, updateRequest *CDNUpdateTTLRequest) (*CDN, *Response, error) { - return c.update(ctx, id, updateRequest) -} - -// UpdateCustomDomain sets or removes the custom domain of an individual CDN -func (c CDNServiceOp) UpdateCustomDomain(ctx context.Context, id string, updateRequest *CDNUpdateCustomDomainRequest) (*CDN, *Response, error) { - return c.update(ctx, id, updateRequest) -} - -func (c CDNServiceOp) update(ctx context.Context, id string, updateRequest interface{}) (*CDN, *Response, error) { - if updateRequest == nil { - return nil, nil, NewArgError("updateRequest", "cannot be nil") - } - - if len(id) == 0 { - return nil, nil, NewArgError("id", "cannot be an empty string") - } - path := fmt.Sprintf("%s/%s", cdnBasePath, id) - - req, err := c.client.NewRequest(ctx, http.MethodPut, path, updateRequest) - if err != nil { - return nil, nil, err - } - - root := new(cdnRoot) - resp, err := c.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Endpoint, resp, err -} - -// FlushCache flushes the cache of an individual CDN. Requires a non-empty slice of file paths and/or wildcards -func (c CDNServiceOp) FlushCache(ctx context.Context, id string, flushCacheRequest *CDNFlushCacheRequest) (*Response, error) { - if flushCacheRequest == nil { - return nil, NewArgError("flushCacheRequest", "cannot be nil") - } - - if len(id) == 0 { - return nil, NewArgError("id", "cannot be an empty string") - } - - path := fmt.Sprintf("%s/%s/cache", cdnBasePath, id) - - req, err := c.client.NewRequest(ctx, http.MethodDelete, path, flushCacheRequest) - if err != nil { - return nil, err - } - - resp, err := c.client.Do(ctx, req, nil) - - return resp, err -} - -// Delete an individual CDN -func (c CDNServiceOp) Delete(ctx context.Context, id string) (*Response, error) { - if len(id) == 0 { - return nil, NewArgError("id", "cannot be an empty string") - } - - path := fmt.Sprintf("%s/%s", cdnBasePath, id) - - req, err := c.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - - resp, err := c.client.Do(ctx, req, nil) - - return resp, err -} diff --git a/vendor/github.com/digitalocean/godo/certificates.go b/vendor/github.com/digitalocean/godo/certificates.go deleted file mode 100644 index 7612acf..0000000 --- a/vendor/github.com/digitalocean/godo/certificates.go +++ /dev/null @@ -1,165 +0,0 @@ -package godo - -import ( - "context" - "fmt" - "net/http" - "path" -) - -const certificatesBasePath = "/v2/certificates" - -// CertificatesService is an interface for managing certificates with the DigitalOcean API. -// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Certificates -type CertificatesService interface { - Get(context.Context, string) (*Certificate, *Response, error) - List(context.Context, *ListOptions) ([]Certificate, *Response, error) - ListByName(context.Context, string, *ListOptions) ([]Certificate, *Response, error) - Create(context.Context, *CertificateRequest) (*Certificate, *Response, error) - Delete(context.Context, string) (*Response, error) -} - -// Certificate represents a DigitalOcean certificate configuration. -type Certificate struct { - ID string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - DNSNames []string `json:"dns_names,omitempty"` - NotAfter string `json:"not_after,omitempty"` - SHA1Fingerprint string `json:"sha1_fingerprint,omitempty"` - Created string `json:"created_at,omitempty"` - State string `json:"state,omitempty"` - Type string `json:"type,omitempty"` -} - -// CertificateRequest represents configuration for a new certificate. -type CertificateRequest struct { - Name string `json:"name,omitempty"` - DNSNames []string `json:"dns_names,omitempty"` - PrivateKey string `json:"private_key,omitempty"` - LeafCertificate string `json:"leaf_certificate,omitempty"` - CertificateChain string `json:"certificate_chain,omitempty"` - Type string `json:"type,omitempty"` -} - -type certificateRoot struct { - Certificate *Certificate `json:"certificate"` -} - -type certificatesRoot struct { - Certificates []Certificate `json:"certificates"` - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -// CertificatesServiceOp handles communication with certificates methods of the DigitalOcean API. -type CertificatesServiceOp struct { - client *Client -} - -var _ CertificatesService = &CertificatesServiceOp{} - -// Get an existing certificate by its identifier. -func (c *CertificatesServiceOp) Get(ctx context.Context, cID string) (*Certificate, *Response, error) { - urlStr := path.Join(certificatesBasePath, cID) - - req, err := c.client.NewRequest(ctx, http.MethodGet, urlStr, nil) - if err != nil { - return nil, nil, err - } - - root := new(certificateRoot) - resp, err := c.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Certificate, resp, nil -} - -// List all certificates. -func (c *CertificatesServiceOp) List(ctx context.Context, opt *ListOptions) ([]Certificate, *Response, error) { - urlStr, err := addOptions(certificatesBasePath, opt) - if err != nil { - return nil, nil, err - } - - req, err := c.client.NewRequest(ctx, http.MethodGet, urlStr, nil) - if err != nil { - return nil, nil, err - } - - root := new(certificatesRoot) - resp, err := c.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.Certificates, resp, nil -} - -func (c *CertificatesServiceOp) ListByName(ctx context.Context, name string, opt *ListOptions) ([]Certificate, *Response, error) { - - if len(name) < 1 { - return nil, nil, NewArgError("name", "cannot be an empty string") - } - - path := fmt.Sprintf("%s?name=%s", certificatesBasePath, name) - urlStr, err := addOptions(path, opt) - if err != nil { - return nil, nil, err - } - - req, err := c.client.NewRequest(ctx, http.MethodGet, urlStr, nil) - if err != nil { - return nil, nil, err - } - - root := new(certificatesRoot) - resp, err := c.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.Certificates, resp, err -} - -// Create a new certificate with provided configuration. -func (c *CertificatesServiceOp) Create(ctx context.Context, cr *CertificateRequest) (*Certificate, *Response, error) { - req, err := c.client.NewRequest(ctx, http.MethodPost, certificatesBasePath, cr) - if err != nil { - return nil, nil, err - } - - root := new(certificateRoot) - resp, err := c.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Certificate, resp, nil -} - -// Delete a certificate by its identifier. -func (c *CertificatesServiceOp) Delete(ctx context.Context, cID string) (*Response, error) { - urlStr := path.Join(certificatesBasePath, cID) - - req, err := c.client.NewRequest(ctx, http.MethodDelete, urlStr, nil) - if err != nil { - return nil, err - } - - return c.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/digitalocean/godo/databases.go b/vendor/github.com/digitalocean/godo/databases.go deleted file mode 100644 index b915391..0000000 --- a/vendor/github.com/digitalocean/godo/databases.go +++ /dev/null @@ -1,1561 +0,0 @@ -package godo - -import ( - "context" - "fmt" - "net/http" - "strings" - "time" -) - -const ( - databaseBasePath = "/v2/databases" - databaseSinglePath = databaseBasePath + "/%s" - databaseCAPath = databaseBasePath + "/%s/ca" - databaseConfigPath = databaseBasePath + "/%s/config" - databaseResizePath = databaseBasePath + "/%s/resize" - databaseMigratePath = databaseBasePath + "/%s/migrate" - databaseMaintenancePath = databaseBasePath + "/%s/maintenance" - databaseBackupsPath = databaseBasePath + "/%s/backups" - databaseUsersPath = databaseBasePath + "/%s/users" - databaseUserPath = databaseBasePath + "/%s/users/%s" - databaseResetUserAuthPath = databaseUserPath + "/reset_auth" - databaseDBPath = databaseBasePath + "/%s/dbs/%s" - databaseDBsPath = databaseBasePath + "/%s/dbs" - databasePoolPath = databaseBasePath + "/%s/pools/%s" - databasePoolsPath = databaseBasePath + "/%s/pools" - databaseReplicaPath = databaseBasePath + "/%s/replicas/%s" - databaseReplicasPath = databaseBasePath + "/%s/replicas" - databaseEvictionPolicyPath = databaseBasePath + "/%s/eviction_policy" - databaseSQLModePath = databaseBasePath + "/%s/sql_mode" - databaseFirewallRulesPath = databaseBasePath + "/%s/firewall" - databaseOptionsPath = databaseBasePath + "/options" - databaseUpgradeMajorVersionPath = databaseBasePath + "/%s/upgrade" - databasePromoteReplicaToPrimaryPath = databaseReplicaPath + "/promote" - databaseTopicPath = databaseBasePath + "/%s/topics/%s" - databaseTopicsPath = databaseBasePath + "/%s/topics" - databaseMetricsCredentialsPath = databaseBasePath + "/metrics/credentials" - databaseEvents = databaseBasePath + "/%s/events" -) - -// SQL Mode constants allow for MySQL-specific SQL flavor configuration. -const ( - SQLModeAllowInvalidDates = "ALLOW_INVALID_DATES" - SQLModeANSIQuotes = "ANSI_QUOTES" - SQLModeHighNotPrecedence = "HIGH_NOT_PRECEDENCE" - SQLModeIgnoreSpace = "IGNORE_SPACE" - SQLModeNoAuthCreateUser = "NO_AUTO_CREATE_USER" - SQLModeNoAutoValueOnZero = "NO_AUTO_VALUE_ON_ZERO" - SQLModeNoBackslashEscapes = "NO_BACKSLASH_ESCAPES" - SQLModeNoDirInCreate = "NO_DIR_IN_CREATE" - SQLModeNoEngineSubstitution = "NO_ENGINE_SUBSTITUTION" - SQLModeNoFieldOptions = "NO_FIELD_OPTIONS" - SQLModeNoKeyOptions = "NO_KEY_OPTIONS" - SQLModeNoTableOptions = "NO_TABLE_OPTIONS" - SQLModeNoUnsignedSubtraction = "NO_UNSIGNED_SUBTRACTION" - SQLModeNoZeroDate = "NO_ZERO_DATE" - SQLModeNoZeroInDate = "NO_ZERO_IN_DATE" - SQLModeOnlyFullGroupBy = "ONLY_FULL_GROUP_BY" - SQLModePadCharToFullLength = "PAD_CHAR_TO_FULL_LENGTH" - SQLModePipesAsConcat = "PIPES_AS_CONCAT" - SQLModeRealAsFloat = "REAL_AS_FLOAT" - SQLModeStrictAllTables = "STRICT_ALL_TABLES" - SQLModeStrictTransTables = "STRICT_TRANS_TABLES" - SQLModeANSI = "ANSI" - SQLModeDB2 = "DB2" - SQLModeMaxDB = "MAXDB" - SQLModeMSSQL = "MSSQL" - SQLModeMYSQL323 = "MYSQL323" - SQLModeMYSQL40 = "MYSQL40" - SQLModeOracle = "ORACLE" - SQLModePostgreSQL = "POSTGRESQL" - SQLModeTraditional = "TRADITIONAL" -) - -// SQL Auth constants allow for MySQL-specific user auth plugins -const ( - SQLAuthPluginNative = "mysql_native_password" - SQLAuthPluginCachingSHA2 = "caching_sha2_password" -) - -// Redis eviction policies supported by the managed Redis product. -const ( - EvictionPolicyNoEviction = "noeviction" - EvictionPolicyAllKeysLRU = "allkeys_lru" - EvictionPolicyAllKeysRandom = "allkeys_random" - EvictionPolicyVolatileLRU = "volatile_lru" - EvictionPolicyVolatileRandom = "volatile_random" - EvictionPolicyVolatileTTL = "volatile_ttl" -) - -// evictionPolicyMap is used to normalize the eviction policy string in requests -// to the advanced Redis configuration endpoint from the consts used with SetEvictionPolicy. -var evictionPolicyMap = map[string]string{ - EvictionPolicyAllKeysLRU: "allkeys-lru", - EvictionPolicyAllKeysRandom: "allkeys-random", - EvictionPolicyVolatileLRU: "volatile-lru", - EvictionPolicyVolatileRandom: "volatile-random", - EvictionPolicyVolatileTTL: "volatile-ttl", -} - -// The DatabasesService provides access to the DigitalOcean managed database -// suite of products through the public API. Customers can create new database -// clusters, migrate them between regions, create replicas and interact with -// their configurations. Each database service is referred to as a Database. A -// SQL database service can have multiple databases residing in the system. To -// help make these entities distinct from Databases in godo, we refer to them -// here as DatabaseDBs. -// -// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Databases -type DatabasesService interface { - List(context.Context, *ListOptions) ([]Database, *Response, error) - Get(context.Context, string) (*Database, *Response, error) - GetCA(context.Context, string) (*DatabaseCA, *Response, error) - Create(context.Context, *DatabaseCreateRequest) (*Database, *Response, error) - Delete(context.Context, string) (*Response, error) - Resize(context.Context, string, *DatabaseResizeRequest) (*Response, error) - Migrate(context.Context, string, *DatabaseMigrateRequest) (*Response, error) - UpdateMaintenance(context.Context, string, *DatabaseUpdateMaintenanceRequest) (*Response, error) - ListBackups(context.Context, string, *ListOptions) ([]DatabaseBackup, *Response, error) - GetUser(context.Context, string, string) (*DatabaseUser, *Response, error) - ListUsers(context.Context, string, *ListOptions) ([]DatabaseUser, *Response, error) - CreateUser(context.Context, string, *DatabaseCreateUserRequest) (*DatabaseUser, *Response, error) - UpdateUser(context.Context, string, string, *DatabaseUpdateUserRequest) (*DatabaseUser, *Response, error) - DeleteUser(context.Context, string, string) (*Response, error) - ResetUserAuth(context.Context, string, string, *DatabaseResetUserAuthRequest) (*DatabaseUser, *Response, error) - ListDBs(context.Context, string, *ListOptions) ([]DatabaseDB, *Response, error) - CreateDB(context.Context, string, *DatabaseCreateDBRequest) (*DatabaseDB, *Response, error) - GetDB(context.Context, string, string) (*DatabaseDB, *Response, error) - DeleteDB(context.Context, string, string) (*Response, error) - ListPools(context.Context, string, *ListOptions) ([]DatabasePool, *Response, error) - CreatePool(context.Context, string, *DatabaseCreatePoolRequest) (*DatabasePool, *Response, error) - GetPool(context.Context, string, string) (*DatabasePool, *Response, error) - DeletePool(context.Context, string, string) (*Response, error) - UpdatePool(context.Context, string, string, *DatabaseUpdatePoolRequest) (*Response, error) - GetReplica(context.Context, string, string) (*DatabaseReplica, *Response, error) - ListReplicas(context.Context, string, *ListOptions) ([]DatabaseReplica, *Response, error) - CreateReplica(context.Context, string, *DatabaseCreateReplicaRequest) (*DatabaseReplica, *Response, error) - DeleteReplica(context.Context, string, string) (*Response, error) - PromoteReplicaToPrimary(context.Context, string, string) (*Response, error) - GetEvictionPolicy(context.Context, string) (string, *Response, error) - SetEvictionPolicy(context.Context, string, string) (*Response, error) - GetSQLMode(context.Context, string) (string, *Response, error) - SetSQLMode(context.Context, string, ...string) (*Response, error) - GetFirewallRules(context.Context, string) ([]DatabaseFirewallRule, *Response, error) - UpdateFirewallRules(context.Context, string, *DatabaseUpdateFirewallRulesRequest) (*Response, error) - GetPostgreSQLConfig(context.Context, string) (*PostgreSQLConfig, *Response, error) - GetRedisConfig(context.Context, string) (*RedisConfig, *Response, error) - GetMySQLConfig(context.Context, string) (*MySQLConfig, *Response, error) - UpdatePostgreSQLConfig(context.Context, string, *PostgreSQLConfig) (*Response, error) - UpdateRedisConfig(context.Context, string, *RedisConfig) (*Response, error) - UpdateMySQLConfig(context.Context, string, *MySQLConfig) (*Response, error) - ListOptions(todo context.Context) (*DatabaseOptions, *Response, error) - UpgradeMajorVersion(context.Context, string, *UpgradeVersionRequest) (*Response, error) - ListTopics(context.Context, string, *ListOptions) ([]DatabaseTopic, *Response, error) - CreateTopic(context.Context, string, *DatabaseCreateTopicRequest) (*DatabaseTopic, *Response, error) - GetTopic(context.Context, string, string) (*DatabaseTopic, *Response, error) - DeleteTopic(context.Context, string, string) (*Response, error) - UpdateTopic(context.Context, string, string, *DatabaseUpdateTopicRequest) (*Response, error) - GetMetricsCredentials(context.Context) (*DatabaseMetricsCredentials, *Response, error) - UpdateMetricsCredentials(context.Context, *DatabaseUpdateMetricsCredentialsRequest) (*Response, error) - ListDatabaseEvents(context.Context, string, *ListOptions) ([]DatabaseEvent, *Response, error) -} - -// DatabasesServiceOp handles communication with the Databases related methods -// of the DigitalOcean API. -type DatabasesServiceOp struct { - client *Client -} - -var _ DatabasesService = &DatabasesServiceOp{} - -// Database represents a DigitalOcean managed database product. These managed databases -// are usually comprised of a cluster of database nodes, a primary and 0 or more replicas. -// The EngineSlug is a string which indicates the type of database service. Some examples are -// "pg", "mysql" or "redis". A Database also includes connection information and other -// properties of the service like region, size and current status. -type Database struct { - ID string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - EngineSlug string `json:"engine,omitempty"` - VersionSlug string `json:"version,omitempty"` - Connection *DatabaseConnection `json:"connection,omitempty"` - UIConnection *DatabaseConnection `json:"ui_connection,omitempty"` - PrivateConnection *DatabaseConnection `json:"private_connection,omitempty"` - StandbyConnection *DatabaseConnection `json:"standby_connection,omitempty"` - StandbyPrivateConnection *DatabaseConnection `json:"standby_private_connection,omitempty"` - Users []DatabaseUser `json:"users,omitempty"` - NumNodes int `json:"num_nodes,omitempty"` - SizeSlug string `json:"size,omitempty"` - DBNames []string `json:"db_names,omitempty"` - RegionSlug string `json:"region,omitempty"` - Status string `json:"status,omitempty"` - MaintenanceWindow *DatabaseMaintenanceWindow `json:"maintenance_window,omitempty"` - CreatedAt time.Time `json:"created_at,omitempty"` - PrivateNetworkUUID string `json:"private_network_uuid,omitempty"` - Tags []string `json:"tags,omitempty"` - ProjectID string `json:"project_id,omitempty"` - StorageSizeMib uint64 `json:"storage_size_mib,omitempty"` - MetricsEndpoints []*ServiceAddress `json:"metrics_endpoints,omitempty"` -} - -// DatabaseCA represents a database ca. -type DatabaseCA struct { - Certificate []byte `json:"certificate"` -} - -// DatabaseConnection represents a database connection -type DatabaseConnection struct { - Protocol string `json:"protocol"` - URI string `json:"uri,omitempty"` - Database string `json:"database,omitempty"` - Host string `json:"host,omitempty"` - Port int `json:"port,omitempty"` - User string `json:"user,omitempty"` - Password string `json:"password,omitempty"` - SSL bool `json:"ssl,omitempty"` - ApplicationPorts map[string]uint32 `json:"application_ports,omitempty"` -} - -// ServiceAddress represents a host:port for a generic service (e.g. metrics endpoint) -type ServiceAddress struct { - Host string `json:"host"` - Port int `json:"port"` -} - -// DatabaseUser represents a user in the database -type DatabaseUser struct { - Name string `json:"name,omitempty"` - Role string `json:"role,omitempty"` - Password string `json:"password,omitempty"` - AccessCert string `json:"access_cert,omitempty"` - AccessKey string `json:"access_key,omitempty"` - MySQLSettings *DatabaseMySQLUserSettings `json:"mysql_settings,omitempty"` - Settings *DatabaseUserSettings `json:"settings,omitempty"` -} - -// KafkaACL contains Kafka specific user access control information -type KafkaACL struct { - ID string `json:"id,omitempty"` - Permission string `json:"permission,omitempty"` - Topic string `json:"topic,omitempty"` -} - -// DatabaseUserSettings contains Kafka-specific user settings -type DatabaseUserSettings struct { - ACL []*KafkaACL `json:"acl,omitempty"` -} - -// DatabaseMySQLUserSettings contains MySQL-specific user settings -type DatabaseMySQLUserSettings struct { - AuthPlugin string `json:"auth_plugin"` -} - -// DatabaseMaintenanceWindow represents the maintenance_window of a database -// cluster -type DatabaseMaintenanceWindow struct { - Day string `json:"day,omitempty"` - Hour string `json:"hour,omitempty"` - Pending bool `json:"pending,omitempty"` - Description []string `json:"description,omitempty"` -} - -// DatabaseBackup represents a database backup. -type DatabaseBackup struct { - CreatedAt time.Time `json:"created_at,omitempty"` - SizeGigabytes float64 `json:"size_gigabytes,omitempty"` -} - -// DatabaseBackupRestore contains information needed to restore a backup. -type DatabaseBackupRestore struct { - DatabaseName string `json:"database_name,omitempty"` - BackupCreatedAt string `json:"backup_created_at,omitempty"` -} - -// DatabaseCreateRequest represents a request to create a database cluster -type DatabaseCreateRequest struct { - Name string `json:"name,omitempty"` - EngineSlug string `json:"engine,omitempty"` - Version string `json:"version,omitempty"` - SizeSlug string `json:"size,omitempty"` - Region string `json:"region,omitempty"` - NumNodes int `json:"num_nodes,omitempty"` - PrivateNetworkUUID string `json:"private_network_uuid"` - Tags []string `json:"tags,omitempty"` - BackupRestore *DatabaseBackupRestore `json:"backup_restore,omitempty"` - ProjectID string `json:"project_id"` - StorageSizeMib uint64 `json:"storage_size_mib,omitempty"` -} - -// DatabaseResizeRequest can be used to initiate a database resize operation. -type DatabaseResizeRequest struct { - SizeSlug string `json:"size,omitempty"` - NumNodes int `json:"num_nodes,omitempty"` - StorageSizeMib uint64 `json:"storage_size_mib,omitempty"` -} - -// DatabaseMigrateRequest can be used to initiate a database migrate operation. -type DatabaseMigrateRequest struct { - Region string `json:"region,omitempty"` - PrivateNetworkUUID string `json:"private_network_uuid"` -} - -// DatabaseUpdateMaintenanceRequest can be used to update the database's maintenance window. -type DatabaseUpdateMaintenanceRequest struct { - Day string `json:"day,omitempty"` - Hour string `json:"hour,omitempty"` -} - -// DatabaseDB represents an engine-specific database created within a database cluster. For SQL -// databases like PostgreSQL or MySQL, a "DB" refers to a database created on the RDBMS. For instance, -// a PostgreSQL database server can contain many database schemas, each with its own settings, access -// permissions and data. ListDBs will return all databases present on the server. -type DatabaseDB struct { - Name string `json:"name"` -} - -// DatabaseTopic represents a Kafka topic -type DatabaseTopic struct { - Name string `json:"name"` - Partitions []*TopicPartition `json:"partitions,omitempty"` - ReplicationFactor *uint32 `json:"replication_factor,omitempty"` - State string `json:"state,omitempty"` - Config *TopicConfig `json:"config,omitempty"` -} - -// TopicPartition represents the state of a Kafka topic partition -type TopicPartition struct { - EarliestOffset uint64 `json:"earliest_offset,omitempty"` - InSyncReplicas uint32 `json:"in_sync_replicas,omitempty"` - Id uint32 `json:"id,omitempty"` - Size uint64 `json:"size,omitempty"` - ConsumerGroups []*TopicConsumerGroup `json:"consumer_groups,omitempty"` -} - -// TopicConsumerGroup represents a consumer group for a particular Kafka topic -type TopicConsumerGroup struct { - Name string `json:"name,omitempty"` - Offset uint64 `json:"offset,omitempty"` -} - -// TopicConfig represents all configurable options for a Kafka topic -type TopicConfig struct { - CleanupPolicy string `json:"cleanup_policy,omitempty"` - CompressionType string `json:"compression_type,omitempty"` - DeleteRetentionMS *uint64 `json:"delete_retention_ms,omitempty"` - FileDeleteDelayMS *uint64 `json:"file_delete_delay_ms,omitempty"` - FlushMessages *uint64 `json:"flush_messages,omitempty"` - FlushMS *uint64 `json:"flush_ms,omitempty"` - IndexIntervalBytes *uint64 `json:"index_interval_bytes,omitempty"` - MaxCompactionLagMS *uint64 `json:"max_compaction_lag_ms,omitempty"` - MaxMessageBytes *uint64 `json:"max_message_bytes,omitempty"` - MessageDownConversionEnable *bool `json:"message_down_conversion_enable,omitempty"` - MessageFormatVersion string `json:"message_format_version,omitempty"` - MessageTimestampDifferenceMaxMS *uint64 `json:"message_timestamp_difference_max_ms,omitempty"` - MessageTimestampType string `json:"message_timestamp_type,omitempty"` - MinCleanableDirtyRatio *float32 `json:"min_cleanable_dirty_ratio,omitempty"` - MinCompactionLagMS *uint64 `json:"min_compaction_lag_ms,omitempty"` - MinInsyncReplicas *uint32 `json:"min_insync_replicas,omitempty"` - Preallocate *bool `json:"preallocate,omitempty"` - RetentionBytes *int64 `json:"retention_bytes,omitempty"` - RetentionMS *int64 `json:"retention_ms,omitempty"` - SegmentBytes *uint64 `json:"segment_bytes,omitempty"` - SegmentIndexBytes *uint64 `json:"segment_index_bytes,omitempty"` - SegmentJitterMS *uint64 `json:"segment_jitter_ms,omitempty"` - SegmentMS *uint64 `json:"segment_ms,omitempty"` -} - -// DatabaseCreateTopicRequest is used to create a new topic within a kafka cluster -type DatabaseCreateTopicRequest struct { - Name string `json:"name"` - PartitionCount *uint32 `json:"partition_count,omitempty"` - ReplicationFactor *uint32 `json:"replication_factor,omitempty"` - Config *TopicConfig `json:"config,omitempty"` -} - -// DatabaseUpdateTopicRequest ... -type DatabaseUpdateTopicRequest struct { - PartitionCount *uint32 `json:"partition_count,omitempty"` - ReplicationFactor *uint32 `json:"replication_factor,omitempty"` - Config *TopicConfig `json:"config,omitempty"` -} - -// DatabaseReplica represents a read-only replica of a particular database -type DatabaseReplica struct { - ID string `json:"id"` - Name string `json:"name"` - Connection *DatabaseConnection `json:"connection"` - PrivateConnection *DatabaseConnection `json:"private_connection,omitempty"` - Region string `json:"region"` - Status string `json:"status"` - CreatedAt time.Time `json:"created_at"` - PrivateNetworkUUID string `json:"private_network_uuid,omitempty"` - Tags []string `json:"tags,omitempty"` - StorageSizeMib uint64 `json:"storage_size_mib,omitempty"` -} - -// DatabasePool represents a database connection pool -type DatabasePool struct { - User string `json:"user"` - Name string `json:"name"` - Size int `json:"size"` - Database string `json:"db"` - Mode string `json:"mode"` - Connection *DatabaseConnection `json:"connection"` - PrivateConnection *DatabaseConnection `json:"private_connection,omitempty"` - StandbyConnection *DatabaseConnection `json:"standby_connection,omitempty"` - StandbyPrivateConnection *DatabaseConnection `json:"standby_private_connection,omitempty"` -} - -// DatabaseCreatePoolRequest is used to create a new database connection pool -type DatabaseCreatePoolRequest struct { - User string `json:"user"` - Name string `json:"name"` - Size int `json:"size"` - Database string `json:"db"` - Mode string `json:"mode"` -} - -// DatabaseUpdatePoolRequest is used to update a database connection pool -type DatabaseUpdatePoolRequest struct { - User string `json:"user,omitempty"` - Size int `json:"size"` - Database string `json:"db"` - Mode string `json:"mode"` -} - -// DatabaseCreateUserRequest is used to create a new database user -type DatabaseCreateUserRequest struct { - Name string `json:"name"` - MySQLSettings *DatabaseMySQLUserSettings `json:"mysql_settings,omitempty"` - Settings *DatabaseUserSettings `json:"settings,omitempty"` -} - -// DatabaseUpdateUserRequest is used to update an existing database user -type DatabaseUpdateUserRequest struct { - Settings *DatabaseUserSettings `json:"settings,omitempty"` -} - -// DatabaseResetUserAuthRequest is used to reset a users DB auth -type DatabaseResetUserAuthRequest struct { - MySQLSettings *DatabaseMySQLUserSettings `json:"mysql_settings,omitempty"` - Settings *DatabaseUserSettings `json:"settings,omitempty"` -} - -// DatabaseCreateDBRequest is used to create a new engine-specific database within the cluster -type DatabaseCreateDBRequest struct { - Name string `json:"name"` -} - -// DatabaseCreateReplicaRequest is used to create a new read-only replica -type DatabaseCreateReplicaRequest struct { - Name string `json:"name"` - Region string `json:"region"` - Size string `json:"size"` - PrivateNetworkUUID string `json:"private_network_uuid"` - Tags []string `json:"tags,omitempty"` - StorageSizeMib uint64 `json:"storage_size_mib,omitempty"` -} - -// DatabaseUpdateFirewallRulesRequest is used to set the firewall rules for a database -type DatabaseUpdateFirewallRulesRequest struct { - Rules []*DatabaseFirewallRule `json:"rules"` -} - -// DatabaseFirewallRule is a rule describing an inbound source to a database -type DatabaseFirewallRule struct { - UUID string `json:"uuid"` - ClusterUUID string `json:"cluster_uuid"` - Type string `json:"type"` - Value string `json:"value"` - CreatedAt time.Time `json:"created_at"` -} - -// PostgreSQLConfig holds advanced configurations for PostgreSQL database clusters. -type PostgreSQLConfig struct { - AutovacuumFreezeMaxAge *int `json:"autovacuum_freeze_max_age,omitempty"` - AutovacuumMaxWorkers *int `json:"autovacuum_max_workers,omitempty"` - AutovacuumNaptime *int `json:"autovacuum_naptime,omitempty"` - AutovacuumVacuumThreshold *int `json:"autovacuum_vacuum_threshold,omitempty"` - AutovacuumAnalyzeThreshold *int `json:"autovacuum_analyze_threshold,omitempty"` - AutovacuumVacuumScaleFactor *float32 `json:"autovacuum_vacuum_scale_factor,omitempty"` - AutovacuumAnalyzeScaleFactor *float32 `json:"autovacuum_analyze_scale_factor,omitempty"` - AutovacuumVacuumCostDelay *int `json:"autovacuum_vacuum_cost_delay,omitempty"` - AutovacuumVacuumCostLimit *int `json:"autovacuum_vacuum_cost_limit,omitempty"` - BGWriterDelay *int `json:"bgwriter_delay,omitempty"` - BGWriterFlushAfter *int `json:"bgwriter_flush_after,omitempty"` - BGWriterLRUMaxpages *int `json:"bgwriter_lru_maxpages,omitempty"` - BGWriterLRUMultiplier *float32 `json:"bgwriter_lru_multiplier,omitempty"` - DeadlockTimeoutMillis *int `json:"deadlock_timeout,omitempty"` - DefaultToastCompression *string `json:"default_toast_compression,omitempty"` - IdleInTransactionSessionTimeout *int `json:"idle_in_transaction_session_timeout,omitempty"` - JIT *bool `json:"jit,omitempty"` - LogAutovacuumMinDuration *int `json:"log_autovacuum_min_duration,omitempty"` - LogErrorVerbosity *string `json:"log_error_verbosity,omitempty"` - LogLinePrefix *string `json:"log_line_prefix,omitempty"` - LogMinDurationStatement *int `json:"log_min_duration_statement,omitempty"` - MaxFilesPerProcess *int `json:"max_files_per_process,omitempty"` - MaxPreparedTransactions *int `json:"max_prepared_transactions,omitempty"` - MaxPredLocksPerTransaction *int `json:"max_pred_locks_per_transaction,omitempty"` - MaxLocksPerTransaction *int `json:"max_locks_per_transaction,omitempty"` - MaxStackDepth *int `json:"max_stack_depth,omitempty"` - MaxStandbyArchiveDelay *int `json:"max_standby_archive_delay,omitempty"` - MaxStandbyStreamingDelay *int `json:"max_standby_streaming_delay,omitempty"` - MaxReplicationSlots *int `json:"max_replication_slots,omitempty"` - MaxLogicalReplicationWorkers *int `json:"max_logical_replication_workers,omitempty"` - MaxParallelWorkers *int `json:"max_parallel_workers,omitempty"` - MaxParallelWorkersPerGather *int `json:"max_parallel_workers_per_gather,omitempty"` - MaxWorkerProcesses *int `json:"max_worker_processes,omitempty"` - PGPartmanBGWRole *string `json:"pg_partman_bgw.role,omitempty"` - PGPartmanBGWInterval *int `json:"pg_partman_bgw.interval,omitempty"` - PGStatStatementsTrack *string `json:"pg_stat_statements.track,omitempty"` - TempFileLimit *int `json:"temp_file_limit,omitempty"` - Timezone *string `json:"timezone,omitempty"` - TrackActivityQuerySize *int `json:"track_activity_query_size,omitempty"` - TrackCommitTimestamp *string `json:"track_commit_timestamp,omitempty"` - TrackFunctions *string `json:"track_functions,omitempty"` - TrackIOTiming *string `json:"track_io_timing,omitempty"` - MaxWalSenders *int `json:"max_wal_senders,omitempty"` - WalSenderTimeout *int `json:"wal_sender_timeout,omitempty"` - WalWriterDelay *int `json:"wal_writer_delay,omitempty"` - SharedBuffersPercentage *float32 `json:"shared_buffers_percentage,omitempty"` - PgBouncer *PostgreSQLBouncerConfig `json:"pgbouncer,omitempty"` - BackupHour *int `json:"backup_hour,omitempty"` - BackupMinute *int `json:"backup_minute,omitempty"` - WorkMem *int `json:"work_mem,omitempty"` - TimeScaleDB *PostgreSQLTimeScaleDBConfig `json:"timescaledb,omitempty"` -} - -// PostgreSQLBouncerConfig configuration -type PostgreSQLBouncerConfig struct { - ServerResetQueryAlways *bool `json:"server_reset_query_always,omitempty"` - IgnoreStartupParameters *[]string `json:"ignore_startup_parameters,omitempty"` - MinPoolSize *int `json:"min_pool_size,omitempty"` - ServerLifetime *int `json:"server_lifetime,omitempty"` - ServerIdleTimeout *int `json:"server_idle_timeout,omitempty"` - AutodbPoolSize *int `json:"autodb_pool_size,omitempty"` - AutodbPoolMode *string `json:"autodb_pool_mode,omitempty"` - AutodbMaxDbConnections *int `json:"autodb_max_db_connections,omitempty"` - AutodbIdleTimeout *int `json:"autodb_idle_timeout,omitempty"` -} - -// PostgreSQLTimeScaleDBConfig configuration -type PostgreSQLTimeScaleDBConfig struct { - MaxBackgroundWorkers *int `json:"max_background_workers,omitempty"` -} - -// RedisConfig holds advanced configurations for Redis database clusters. -type RedisConfig struct { - RedisMaxmemoryPolicy *string `json:"redis_maxmemory_policy,omitempty"` - RedisPubsubClientOutputBufferLimit *int `json:"redis_pubsub_client_output_buffer_limit,omitempty"` - RedisNumberOfDatabases *int `json:"redis_number_of_databases,omitempty"` - RedisIOThreads *int `json:"redis_io_threads,omitempty"` - RedisLFULogFactor *int `json:"redis_lfu_log_factor,omitempty"` - RedisLFUDecayTime *int `json:"redis_lfu_decay_time,omitempty"` - RedisSSL *bool `json:"redis_ssl,omitempty"` - RedisTimeout *int `json:"redis_timeout,omitempty"` - RedisNotifyKeyspaceEvents *string `json:"redis_notify_keyspace_events,omitempty"` - RedisPersistence *string `json:"redis_persistence,omitempty"` - RedisACLChannelsDefault *string `json:"redis_acl_channels_default,omitempty"` -} - -// MySQLConfig holds advanced configurations for MySQL database clusters. -type MySQLConfig struct { - ConnectTimeout *int `json:"connect_timeout,omitempty"` - DefaultTimeZone *string `json:"default_time_zone,omitempty"` - InnodbLogBufferSize *int `json:"innodb_log_buffer_size,omitempty"` - InnodbOnlineAlterLogMaxSize *int `json:"innodb_online_alter_log_max_size,omitempty"` - InnodbLockWaitTimeout *int `json:"innodb_lock_wait_timeout,omitempty"` - InteractiveTimeout *int `json:"interactive_timeout,omitempty"` - MaxAllowedPacket *int `json:"max_allowed_packet,omitempty"` - NetReadTimeout *int `json:"net_read_timeout,omitempty"` - SortBufferSize *int `json:"sort_buffer_size,omitempty"` - SQLMode *string `json:"sql_mode,omitempty"` - SQLRequirePrimaryKey *bool `json:"sql_require_primary_key,omitempty"` - WaitTimeout *int `json:"wait_timeout,omitempty"` - NetWriteTimeout *int `json:"net_write_timeout,omitempty"` - GroupConcatMaxLen *int `json:"group_concat_max_len,omitempty"` - InformationSchemaStatsExpiry *int `json:"information_schema_stats_expiry,omitempty"` - InnodbFtMinTokenSize *int `json:"innodb_ft_min_token_size,omitempty"` - InnodbFtServerStopwordTable *string `json:"innodb_ft_server_stopword_table,omitempty"` - InnodbPrintAllDeadlocks *bool `json:"innodb_print_all_deadlocks,omitempty"` - InnodbRollbackOnTimeout *bool `json:"innodb_rollback_on_timeout,omitempty"` - InternalTmpMemStorageEngine *string `json:"internal_tmp_mem_storage_engine,omitempty"` - MaxHeapTableSize *int `json:"max_heap_table_size,omitempty"` - TmpTableSize *int `json:"tmp_table_size,omitempty"` - SlowQueryLog *bool `json:"slow_query_log,omitempty"` - LongQueryTime *float32 `json:"long_query_time,omitempty"` - BackupHour *int `json:"backup_hour,omitempty"` - BackupMinute *int `json:"backup_minute,omitempty"` - BinlogRetentionPeriod *int `json:"binlog_retention_period,omitempty"` -} - -type databaseUserRoot struct { - User *DatabaseUser `json:"user"` -} - -type databaseUsersRoot struct { - Users []DatabaseUser `json:"users"` -} - -type databaseDBRoot struct { - DB *DatabaseDB `json:"db"` -} - -type databaseDBsRoot struct { - DBs []DatabaseDB `json:"dbs"` -} - -type databasesRoot struct { - Databases []Database `json:"databases"` -} - -type databaseRoot struct { - Database *Database `json:"database"` -} - -type databaseCARoot struct { - CA *DatabaseCA `json:"ca"` -} - -type databasePostgreSQLConfigRoot struct { - Config *PostgreSQLConfig `json:"config"` -} - -type databaseRedisConfigRoot struct { - Config *RedisConfig `json:"config"` -} - -type databaseMySQLConfigRoot struct { - Config *MySQLConfig `json:"config"` -} - -type databaseBackupsRoot struct { - Backups []DatabaseBackup `json:"backups"` -} - -type databasePoolRoot struct { - Pool *DatabasePool `json:"pool"` -} - -type databasePoolsRoot struct { - Pools []DatabasePool `json:"pools"` -} - -type databaseReplicaRoot struct { - Replica *DatabaseReplica `json:"replica"` -} - -type databaseReplicasRoot struct { - Replicas []DatabaseReplica `json:"replicas"` -} - -type evictionPolicyRoot struct { - EvictionPolicy string `json:"eviction_policy"` -} - -type UpgradeVersionRequest struct { - Version string `json:"version"` -} - -type sqlModeRoot struct { - SQLMode string `json:"sql_mode"` -} - -type databaseFirewallRuleRoot struct { - Rules []DatabaseFirewallRule `json:"rules"` -} - -// databaseOptionsRoot represents the root of all available database options (i.e. engines, regions, version, etc.) -type databaseOptionsRoot struct { - Options *DatabaseOptions `json:"options"` -} - -type databaseTopicRoot struct { - Topic *DatabaseTopic `json:"topic"` -} - -type databaseTopicsRoot struct { - Topics []DatabaseTopic `json:"topics"` -} - -type databaseMetricsCredentialsRoot struct { - Credentials *DatabaseMetricsCredentials `json:"credentials"` -} - -type DatabaseMetricsCredentials struct { - BasicAuthUsername string `json:"basic_auth_username"` - BasicAuthPassword string `json:"basic_auth_password"` -} - -type DatabaseUpdateMetricsCredentialsRequest struct { - Credentials *DatabaseMetricsCredentials `json:"credentials"` -} - -// DatabaseOptions represents the available database engines -type DatabaseOptions struct { - MongoDBOptions DatabaseEngineOptions `json:"mongodb"` - MySQLOptions DatabaseEngineOptions `json:"mysql"` - PostgresSQLOptions DatabaseEngineOptions `json:"pg"` - RedisOptions DatabaseEngineOptions `json:"redis"` - KafkaOptions DatabaseEngineOptions `json:"kafka"` - OpensearchOptions DatabaseEngineOptions `json:"opensearch"` -} - -// DatabaseEngineOptions represents the configuration options that are available for a given database engine -type DatabaseEngineOptions struct { - Regions []string `json:"regions"` - Versions []string `json:"versions"` - Layouts []DatabaseLayout `json:"layouts"` -} - -// DatabaseLayout represents the slugs available for a given database engine at various node counts -type DatabaseLayout struct { - NodeNum int `json:"num_nodes"` - Sizes []string `json:"sizes"` -} - -// ListDatabaseEvents contains a list of project events. -type ListDatabaseEvents struct { - Events []DatabaseEvent `json:"events"` -} - -// DatbaseEvent contains the information about a Datbase event. -type DatabaseEvent struct { - ID string `json:"id"` - ServiceName string `json:"cluster_name"` - EventType string `json:"event_type"` - CreateTime string `json:"create_time"` -} - -type ListDatabaseEventsRoot struct { - Events []DatabaseEvent `json:"events"` -} - -// URN returns a URN identifier for the database -func (d Database) URN() string { - return ToURN("dbaas", d.ID) -} - -// List returns a list of the Databases visible with the caller's API token -func (svc *DatabasesServiceOp) List(ctx context.Context, opts *ListOptions) ([]Database, *Response, error) { - path := databaseBasePath - path, err := addOptions(path, opts) - if err != nil { - return nil, nil, err - } - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(databasesRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Databases, resp, nil -} - -// Get retrieves the details of a database cluster -func (svc *DatabasesServiceOp) Get(ctx context.Context, databaseID string) (*Database, *Response, error) { - path := fmt.Sprintf(databaseSinglePath, databaseID) - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(databaseRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Database, resp, nil -} - -// GetCA retrieves the CA of a database cluster. -func (svc *DatabasesServiceOp) GetCA(ctx context.Context, databaseID string) (*DatabaseCA, *Response, error) { - path := fmt.Sprintf(databaseCAPath, databaseID) - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(databaseCARoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.CA, resp, nil -} - -// Create creates a database cluster -func (svc *DatabasesServiceOp) Create(ctx context.Context, create *DatabaseCreateRequest) (*Database, *Response, error) { - path := databaseBasePath - req, err := svc.client.NewRequest(ctx, http.MethodPost, path, create) - if err != nil { - return nil, nil, err - } - root := new(databaseRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Database, resp, nil -} - -// Delete deletes a database cluster. There is no way to recover a cluster once -// it has been destroyed. -func (svc *DatabasesServiceOp) Delete(ctx context.Context, databaseID string) (*Response, error) { - path := fmt.Sprintf("%s/%s", databaseBasePath, databaseID) - req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - resp, err := svc.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - return resp, nil -} - -// Resize resizes a database cluster by number of nodes or size -func (svc *DatabasesServiceOp) Resize(ctx context.Context, databaseID string, resize *DatabaseResizeRequest) (*Response, error) { - path := fmt.Sprintf(databaseResizePath, databaseID) - req, err := svc.client.NewRequest(ctx, http.MethodPut, path, resize) - if err != nil { - return nil, err - } - resp, err := svc.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - return resp, nil -} - -// Migrate migrates a database cluster to a new region -func (svc *DatabasesServiceOp) Migrate(ctx context.Context, databaseID string, migrate *DatabaseMigrateRequest) (*Response, error) { - path := fmt.Sprintf(databaseMigratePath, databaseID) - req, err := svc.client.NewRequest(ctx, http.MethodPut, path, migrate) - if err != nil { - return nil, err - } - resp, err := svc.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - return resp, nil -} - -// UpdateMaintenance updates the maintenance window on a cluster -func (svc *DatabasesServiceOp) UpdateMaintenance(ctx context.Context, databaseID string, maintenance *DatabaseUpdateMaintenanceRequest) (*Response, error) { - path := fmt.Sprintf(databaseMaintenancePath, databaseID) - req, err := svc.client.NewRequest(ctx, http.MethodPut, path, maintenance) - if err != nil { - return nil, err - } - resp, err := svc.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - return resp, nil -} - -// ListBackups returns a list of the current backups of a database -func (svc *DatabasesServiceOp) ListBackups(ctx context.Context, databaseID string, opts *ListOptions) ([]DatabaseBackup, *Response, error) { - path := fmt.Sprintf(databaseBackupsPath, databaseID) - path, err := addOptions(path, opts) - if err != nil { - return nil, nil, err - } - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(databaseBackupsRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Backups, resp, nil -} - -// GetUser returns the database user identified by userID -func (svc *DatabasesServiceOp) GetUser(ctx context.Context, databaseID, userID string) (*DatabaseUser, *Response, error) { - path := fmt.Sprintf(databaseUserPath, databaseID, userID) - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(databaseUserRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.User, resp, nil -} - -// ListUsers returns all database users for the database -func (svc *DatabasesServiceOp) ListUsers(ctx context.Context, databaseID string, opts *ListOptions) ([]DatabaseUser, *Response, error) { - path := fmt.Sprintf(databaseUsersPath, databaseID) - path, err := addOptions(path, opts) - if err != nil { - return nil, nil, err - } - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(databaseUsersRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Users, resp, nil -} - -// CreateUser will create a new database user -func (svc *DatabasesServiceOp) CreateUser(ctx context.Context, databaseID string, createUser *DatabaseCreateUserRequest) (*DatabaseUser, *Response, error) { - path := fmt.Sprintf(databaseUsersPath, databaseID) - req, err := svc.client.NewRequest(ctx, http.MethodPost, path, createUser) - if err != nil { - return nil, nil, err - } - root := new(databaseUserRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.User, resp, nil -} - -// UpdateUser will update an existing database user -func (svc *DatabasesServiceOp) UpdateUser(ctx context.Context, databaseID, userID string, updateUser *DatabaseUpdateUserRequest) (*DatabaseUser, *Response, error) { - path := fmt.Sprintf(databaseUserPath, databaseID, userID) - req, err := svc.client.NewRequest(ctx, http.MethodPut, path, updateUser) - if err != nil { - return nil, nil, err - } - root := new(databaseUserRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.User, resp, nil -} - -// ResetUserAuth will reset user authentication -func (svc *DatabasesServiceOp) ResetUserAuth(ctx context.Context, databaseID, userID string, resetAuth *DatabaseResetUserAuthRequest) (*DatabaseUser, *Response, error) { - path := fmt.Sprintf(databaseResetUserAuthPath, databaseID, userID) - req, err := svc.client.NewRequest(ctx, http.MethodPost, path, resetAuth) - if err != nil { - return nil, nil, err - } - root := new(databaseUserRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.User, resp, nil -} - -// DeleteUser will delete an existing database user -func (svc *DatabasesServiceOp) DeleteUser(ctx context.Context, databaseID, userID string) (*Response, error) { - path := fmt.Sprintf(databaseUserPath, databaseID, userID) - req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - resp, err := svc.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - return resp, nil -} - -// ListDBs returns all databases for a given database cluster -func (svc *DatabasesServiceOp) ListDBs(ctx context.Context, databaseID string, opts *ListOptions) ([]DatabaseDB, *Response, error) { - path := fmt.Sprintf(databaseDBsPath, databaseID) - path, err := addOptions(path, opts) - if err != nil { - return nil, nil, err - } - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(databaseDBsRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.DBs, resp, nil -} - -// GetDB returns a single database by name -func (svc *DatabasesServiceOp) GetDB(ctx context.Context, databaseID, name string) (*DatabaseDB, *Response, error) { - path := fmt.Sprintf(databaseDBPath, databaseID, name) - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(databaseDBRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.DB, resp, nil -} - -// CreateDB will create a new database -func (svc *DatabasesServiceOp) CreateDB(ctx context.Context, databaseID string, createDB *DatabaseCreateDBRequest) (*DatabaseDB, *Response, error) { - path := fmt.Sprintf(databaseDBsPath, databaseID) - req, err := svc.client.NewRequest(ctx, http.MethodPost, path, createDB) - if err != nil { - return nil, nil, err - } - root := new(databaseDBRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.DB, resp, nil -} - -// DeleteDB will delete an existing database -func (svc *DatabasesServiceOp) DeleteDB(ctx context.Context, databaseID, name string) (*Response, error) { - path := fmt.Sprintf(databaseDBPath, databaseID, name) - req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - resp, err := svc.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - return resp, nil -} - -// ListPools returns all connection pools for a given database cluster -func (svc *DatabasesServiceOp) ListPools(ctx context.Context, databaseID string, opts *ListOptions) ([]DatabasePool, *Response, error) { - path := fmt.Sprintf(databasePoolsPath, databaseID) - path, err := addOptions(path, opts) - if err != nil { - return nil, nil, err - } - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(databasePoolsRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Pools, resp, nil -} - -// GetPool returns a single database connection pool by name -func (svc *DatabasesServiceOp) GetPool(ctx context.Context, databaseID, name string) (*DatabasePool, *Response, error) { - path := fmt.Sprintf(databasePoolPath, databaseID, name) - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(databasePoolRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Pool, resp, nil -} - -// CreatePool will create a new database connection pool -func (svc *DatabasesServiceOp) CreatePool(ctx context.Context, databaseID string, createPool *DatabaseCreatePoolRequest) (*DatabasePool, *Response, error) { - path := fmt.Sprintf(databasePoolsPath, databaseID) - req, err := svc.client.NewRequest(ctx, http.MethodPost, path, createPool) - if err != nil { - return nil, nil, err - } - root := new(databasePoolRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Pool, resp, nil -} - -// DeletePool will delete an existing database connection pool -func (svc *DatabasesServiceOp) DeletePool(ctx context.Context, databaseID, name string) (*Response, error) { - path := fmt.Sprintf(databasePoolPath, databaseID, name) - req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - resp, err := svc.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - return resp, nil -} - -// UpdatePool will update an existing database connection pool -func (svc *DatabasesServiceOp) UpdatePool(ctx context.Context, databaseID, name string, updatePool *DatabaseUpdatePoolRequest) (*Response, error) { - path := fmt.Sprintf(databasePoolPath, databaseID, name) - - if updatePool == nil { - return nil, NewArgError("updatePool", "cannot be nil") - } - - if updatePool.Mode == "" { - return nil, NewArgError("mode", "cannot be empty") - } - - if updatePool.Database == "" { - return nil, NewArgError("database", "cannot be empty") - } - - if updatePool.Size < 1 { - return nil, NewArgError("size", "cannot be less than 1") - } - - req, err := svc.client.NewRequest(ctx, http.MethodPut, path, updatePool) - if err != nil { - return nil, err - } - resp, err := svc.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - return resp, nil -} - -// GetReplica returns a single database replica -func (svc *DatabasesServiceOp) GetReplica(ctx context.Context, databaseID, name string) (*DatabaseReplica, *Response, error) { - path := fmt.Sprintf(databaseReplicaPath, databaseID, name) - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(databaseReplicaRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Replica, resp, nil -} - -// ListReplicas returns all read-only replicas for a given database cluster -func (svc *DatabasesServiceOp) ListReplicas(ctx context.Context, databaseID string, opts *ListOptions) ([]DatabaseReplica, *Response, error) { - path := fmt.Sprintf(databaseReplicasPath, databaseID) - path, err := addOptions(path, opts) - if err != nil { - return nil, nil, err - } - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(databaseReplicasRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Replicas, resp, nil -} - -// CreateReplica will create a new database connection pool -func (svc *DatabasesServiceOp) CreateReplica(ctx context.Context, databaseID string, createReplica *DatabaseCreateReplicaRequest) (*DatabaseReplica, *Response, error) { - path := fmt.Sprintf(databaseReplicasPath, databaseID) - req, err := svc.client.NewRequest(ctx, http.MethodPost, path, createReplica) - if err != nil { - return nil, nil, err - } - root := new(databaseReplicaRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Replica, resp, nil -} - -// DeleteReplica will delete an existing database replica -func (svc *DatabasesServiceOp) DeleteReplica(ctx context.Context, databaseID, name string) (*Response, error) { - path := fmt.Sprintf(databaseReplicaPath, databaseID, name) - req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - resp, err := svc.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - return resp, nil -} - -// PromoteReplicaToPrimary will sever the read replica integration and then promote the replica cluster to be a R/W cluster -func (svc *DatabasesServiceOp) PromoteReplicaToPrimary(ctx context.Context, databaseID, name string) (*Response, error) { - path := fmt.Sprintf(databasePromoteReplicaToPrimaryPath, databaseID, name) - req, err := svc.client.NewRequest(ctx, http.MethodPut, path, nil) - if err != nil { - return nil, err - } - resp, err := svc.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - return resp, nil -} - -// GetEvictionPolicy loads the eviction policy for a given Redis cluster. -func (svc *DatabasesServiceOp) GetEvictionPolicy(ctx context.Context, databaseID string) (string, *Response, error) { - path := fmt.Sprintf(databaseEvictionPolicyPath, databaseID) - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return "", nil, err - } - root := new(evictionPolicyRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return "", resp, err - } - return root.EvictionPolicy, resp, nil -} - -// SetEvictionPolicy updates the eviction policy for a given Redis cluster. -// -// The valid eviction policies are documented by the exported string constants -// with the prefix `EvictionPolicy`. -func (svc *DatabasesServiceOp) SetEvictionPolicy(ctx context.Context, databaseID, policy string) (*Response, error) { - path := fmt.Sprintf(databaseEvictionPolicyPath, databaseID) - root := &evictionPolicyRoot{EvictionPolicy: policy} - req, err := svc.client.NewRequest(ctx, http.MethodPut, path, root) - if err != nil { - return nil, err - } - resp, err := svc.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - return resp, nil -} - -// GetSQLMode loads the SQL Mode settings for a given MySQL cluster. -func (svc *DatabasesServiceOp) GetSQLMode(ctx context.Context, databaseID string) (string, *Response, error) { - path := fmt.Sprintf(databaseSQLModePath, databaseID) - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return "", nil, err - } - root := &sqlModeRoot{} - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return "", resp, err - } - return root.SQLMode, resp, nil -} - -// SetSQLMode updates the SQL Mode settings for a given MySQL cluster. -func (svc *DatabasesServiceOp) SetSQLMode(ctx context.Context, databaseID string, sqlModes ...string) (*Response, error) { - path := fmt.Sprintf(databaseSQLModePath, databaseID) - root := &sqlModeRoot{SQLMode: strings.Join(sqlModes, ",")} - req, err := svc.client.NewRequest(ctx, http.MethodPut, path, root) - if err != nil { - return nil, err - } - resp, err := svc.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - return resp, nil -} - -// GetFirewallRules loads the inbound sources for a given cluster. -func (svc *DatabasesServiceOp) GetFirewallRules(ctx context.Context, databaseID string) ([]DatabaseFirewallRule, *Response, error) { - path := fmt.Sprintf(databaseFirewallRulesPath, databaseID) - root := new(databaseFirewallRuleRoot) - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Rules, resp, nil -} - -// UpdateFirewallRules sets the inbound sources for a given cluster. -func (svc *DatabasesServiceOp) UpdateFirewallRules(ctx context.Context, databaseID string, firewallRulesReq *DatabaseUpdateFirewallRulesRequest) (*Response, error) { - path := fmt.Sprintf(databaseFirewallRulesPath, databaseID) - req, err := svc.client.NewRequest(ctx, http.MethodPut, path, firewallRulesReq) - if err != nil { - return nil, err - } - return svc.client.Do(ctx, req, nil) -} - -// GetPostgreSQLConfig retrieves the config for a PostgreSQL database cluster. -func (svc *DatabasesServiceOp) GetPostgreSQLConfig(ctx context.Context, databaseID string) (*PostgreSQLConfig, *Response, error) { - path := fmt.Sprintf(databaseConfigPath, databaseID) - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(databasePostgreSQLConfigRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Config, resp, nil -} - -// UpdatePostgreSQLConfig updates the config for a PostgreSQL database cluster. -func (svc *DatabasesServiceOp) UpdatePostgreSQLConfig(ctx context.Context, databaseID string, config *PostgreSQLConfig) (*Response, error) { - path := fmt.Sprintf(databaseConfigPath, databaseID) - root := &databasePostgreSQLConfigRoot{ - Config: config, - } - req, err := svc.client.NewRequest(ctx, http.MethodPatch, path, root) - if err != nil { - return nil, err - } - resp, err := svc.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - return resp, nil -} - -// GetRedisConfig retrieves the config for a Redis database cluster. -func (svc *DatabasesServiceOp) GetRedisConfig(ctx context.Context, databaseID string) (*RedisConfig, *Response, error) { - path := fmt.Sprintf(databaseConfigPath, databaseID) - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(databaseRedisConfigRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Config, resp, nil -} - -// UpdateRedisConfig updates the config for a Redis database cluster. -func (svc *DatabasesServiceOp) UpdateRedisConfig(ctx context.Context, databaseID string, config *RedisConfig) (*Response, error) { - path := fmt.Sprintf(databaseConfigPath, databaseID) - - // We provide consts for use with SetEvictionPolicy method. Unfortunately, those are - // in a different format than what can be used for RedisConfig.RedisMaxmemoryPolicy. - // So we attempt to normalize them here to use dashes as separators if provided in - // the old format (underscores). Other values are passed through untouched. - if config.RedisMaxmemoryPolicy != nil { - if policy, ok := evictionPolicyMap[*config.RedisMaxmemoryPolicy]; ok { - config.RedisMaxmemoryPolicy = &policy - } - } - - root := &databaseRedisConfigRoot{ - Config: config, - } - req, err := svc.client.NewRequest(ctx, http.MethodPatch, path, root) - if err != nil { - return nil, err - } - resp, err := svc.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - return resp, nil -} - -// GetMySQLConfig retrieves the config for a MySQL database cluster. -func (svc *DatabasesServiceOp) GetMySQLConfig(ctx context.Context, databaseID string) (*MySQLConfig, *Response, error) { - path := fmt.Sprintf(databaseConfigPath, databaseID) - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(databaseMySQLConfigRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Config, resp, nil -} - -// UpdateMySQLConfig updates the config for a MySQL database cluster. -func (svc *DatabasesServiceOp) UpdateMySQLConfig(ctx context.Context, databaseID string, config *MySQLConfig) (*Response, error) { - path := fmt.Sprintf(databaseConfigPath, databaseID) - root := &databaseMySQLConfigRoot{ - Config: config, - } - req, err := svc.client.NewRequest(ctx, http.MethodPatch, path, root) - if err != nil { - return nil, err - } - resp, err := svc.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - return resp, nil -} - -// ListOptions gets the database options available. -func (svc *DatabasesServiceOp) ListOptions(ctx context.Context) (*DatabaseOptions, *Response, error) { - root := new(databaseOptionsRoot) - req, err := svc.client.NewRequest(ctx, http.MethodGet, databaseOptionsPath, nil) - if err != nil { - return nil, nil, err - } - - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Options, resp, nil -} - -// UpgradeMajorVersion upgrades the major version of a cluster. -func (svc *DatabasesServiceOp) UpgradeMajorVersion(ctx context.Context, databaseID string, upgradeReq *UpgradeVersionRequest) (*Response, error) { - path := fmt.Sprintf(databaseUpgradeMajorVersionPath, databaseID) - req, err := svc.client.NewRequest(ctx, http.MethodPut, path, upgradeReq) - if err != nil { - return nil, err - } - - resp, err := svc.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - - return resp, nil -} - -// ListTopics returns all topics for a given kafka cluster -func (svc *DatabasesServiceOp) ListTopics(ctx context.Context, databaseID string, opts *ListOptions) ([]DatabaseTopic, *Response, error) { - path := fmt.Sprintf(databaseTopicsPath, databaseID) - path, err := addOptions(path, opts) - if err != nil { - return nil, nil, err - } - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(databaseTopicsRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Topics, resp, nil -} - -// GetTopic returns a single kafka topic by name -func (svc *DatabasesServiceOp) GetTopic(ctx context.Context, databaseID, name string) (*DatabaseTopic, *Response, error) { - path := fmt.Sprintf(databaseTopicPath, databaseID, name) - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(databaseTopicRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Topic, resp, nil -} - -// CreateTopic will create a new kafka topic -func (svc *DatabasesServiceOp) CreateTopic(ctx context.Context, databaseID string, createTopic *DatabaseCreateTopicRequest) (*DatabaseTopic, *Response, error) { - path := fmt.Sprintf(databaseTopicsPath, databaseID) - req, err := svc.client.NewRequest(ctx, http.MethodPost, path, createTopic) - if err != nil { - return nil, nil, err - } - root := new(databaseTopicRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Topic, resp, nil -} - -// UpdateTopic updates a single kafka topic -func (svc *DatabasesServiceOp) UpdateTopic(ctx context.Context, databaseID string, name string, updateTopic *DatabaseUpdateTopicRequest) (*Response, error) { - path := fmt.Sprintf(databaseTopicPath, databaseID, name) - req, err := svc.client.NewRequest(ctx, http.MethodPut, path, updateTopic) - if err != nil { - return nil, err - } - root := new(databaseTopicRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return resp, err - } - return resp, nil -} - -// DeleteTopic will delete an existing kafka topic -func (svc *DatabasesServiceOp) DeleteTopic(ctx context.Context, databaseID, name string) (*Response, error) { - path := fmt.Sprintf(databaseTopicPath, databaseID, name) - req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - resp, err := svc.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - return resp, nil -} - -// GetMetricsCredentials gets the credentials required to access a user's metrics endpoints -func (svc *DatabasesServiceOp) GetMetricsCredentials(ctx context.Context) (*DatabaseMetricsCredentials, *Response, error) { - req, err := svc.client.NewRequest(ctx, http.MethodGet, databaseMetricsCredentialsPath, nil) - if err != nil { - return nil, nil, err - } - - root := new(databaseMetricsCredentialsRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Credentials, resp, nil -} - -// UpdateMetricsAuth updates the credentials required to access a user's metrics endpoints -func (svc *DatabasesServiceOp) UpdateMetricsCredentials(ctx context.Context, updateCreds *DatabaseUpdateMetricsCredentialsRequest) (*Response, error) { - req, err := svc.client.NewRequest(ctx, http.MethodPut, databaseMetricsCredentialsPath, updateCreds) - if err != nil { - return nil, err - } - resp, err := svc.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - return resp, nil -} - -// ListDatabaseEvents returns all the events for a given cluster -func (svc *DatabasesServiceOp) ListDatabaseEvents(ctx context.Context, databaseID string, opts *ListOptions) ([]DatabaseEvent, *Response, error) { - path := fmt.Sprintf(databaseEvents, databaseID) - path, err := addOptions(path, opts) - if err != nil { - return nil, nil, err - } - root := new(ListDatabaseEventsRoot) - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Events, resp, nil -} diff --git a/vendor/github.com/digitalocean/godo/doc.go b/vendor/github.com/digitalocean/godo/doc.go deleted file mode 100644 index 113b02a..0000000 --- a/vendor/github.com/digitalocean/godo/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package godo is the DigitalOcean API v2 client for Go. -package godo diff --git a/vendor/github.com/digitalocean/godo/domains.go b/vendor/github.com/digitalocean/godo/domains.go deleted file mode 100644 index 6a86296..0000000 --- a/vendor/github.com/digitalocean/godo/domains.go +++ /dev/null @@ -1,411 +0,0 @@ -package godo - -import ( - "context" - "fmt" - "net/http" -) - -const domainsBasePath = "v2/domains" - -// DomainsService is an interface for managing DNS with the DigitalOcean API. -// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Domains and -// https://docs.digitalocean.com/reference/api/api-reference/#tag/Domain-Records -type DomainsService interface { - List(context.Context, *ListOptions) ([]Domain, *Response, error) - Get(context.Context, string) (*Domain, *Response, error) - Create(context.Context, *DomainCreateRequest) (*Domain, *Response, error) - Delete(context.Context, string) (*Response, error) - - Records(context.Context, string, *ListOptions) ([]DomainRecord, *Response, error) - RecordsByType(context.Context, string, string, *ListOptions) ([]DomainRecord, *Response, error) - RecordsByName(context.Context, string, string, *ListOptions) ([]DomainRecord, *Response, error) - RecordsByTypeAndName(context.Context, string, string, string, *ListOptions) ([]DomainRecord, *Response, error) - Record(context.Context, string, int) (*DomainRecord, *Response, error) - DeleteRecord(context.Context, string, int) (*Response, error) - EditRecord(context.Context, string, int, *DomainRecordEditRequest) (*DomainRecord, *Response, error) - CreateRecord(context.Context, string, *DomainRecordEditRequest) (*DomainRecord, *Response, error) -} - -// DomainsServiceOp handles communication with the domain related methods of the -// DigitalOcean API. -type DomainsServiceOp struct { - client *Client -} - -var _ DomainsService = &DomainsServiceOp{} - -// Domain represents a DigitalOcean domain -type Domain struct { - Name string `json:"name"` - TTL int `json:"ttl"` - ZoneFile string `json:"zone_file"` -} - -// domainRoot represents a response from the DigitalOcean API -type domainRoot struct { - Domain *Domain `json:"domain"` -} - -type domainsRoot struct { - Domains []Domain `json:"domains"` - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -// DomainCreateRequest represents a request to create a domain. -type DomainCreateRequest struct { - Name string `json:"name"` - IPAddress string `json:"ip_address,omitempty"` -} - -// DomainRecordRoot is the root of an individual Domain Record response -type domainRecordRoot struct { - DomainRecord *DomainRecord `json:"domain_record"` -} - -// DomainRecordsRoot is the root of a group of Domain Record responses -type domainRecordsRoot struct { - DomainRecords []DomainRecord `json:"domain_records"` - Links *Links `json:"links"` -} - -// DomainRecord represents a DigitalOcean DomainRecord -type DomainRecord struct { - ID int `json:"id,omitempty"` - Type string `json:"type,omitempty"` - Name string `json:"name,omitempty"` - Data string `json:"data,omitempty"` - Priority int `json:"priority"` - Port int `json:"port"` - TTL int `json:"ttl,omitempty"` - Weight int `json:"weight"` - Flags int `json:"flags"` - Tag string `json:"tag,omitempty"` -} - -// DomainRecordEditRequest represents a request to update a domain record. -type DomainRecordEditRequest struct { - Type string `json:"type,omitempty"` - Name string `json:"name,omitempty"` - Data string `json:"data,omitempty"` - Priority int `json:"priority"` - Port int `json:"port"` - TTL int `json:"ttl,omitempty"` - Weight int `json:"weight"` - Flags int `json:"flags"` - Tag string `json:"tag,omitempty"` -} - -func (d Domain) String() string { - return Stringify(d) -} - -// URN returns the domain name in a valid DO API URN form. -func (d Domain) URN() string { - return ToURN("Domain", d.Name) -} - -// List all domains. -func (s DomainsServiceOp) List(ctx context.Context, opt *ListOptions) ([]Domain, *Response, error) { - path := domainsBasePath - path, err := addOptions(path, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(domainsRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.Domains, resp, err -} - -// Get individual domain. It requires a non-empty domain name. -func (s *DomainsServiceOp) Get(ctx context.Context, name string) (*Domain, *Response, error) { - if len(name) < 1 { - return nil, nil, NewArgError("name", "cannot be an empty string") - } - - path := fmt.Sprintf("%s/%s", domainsBasePath, name) - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(domainRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Domain, resp, err -} - -// Create a new domain -func (s *DomainsServiceOp) Create(ctx context.Context, createRequest *DomainCreateRequest) (*Domain, *Response, error) { - if createRequest == nil { - return nil, nil, NewArgError("createRequest", "cannot be nil") - } - - path := domainsBasePath - - req, err := s.client.NewRequest(ctx, http.MethodPost, path, createRequest) - if err != nil { - return nil, nil, err - } - - root := new(domainRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Domain, resp, err -} - -// Delete domain -func (s *DomainsServiceOp) Delete(ctx context.Context, name string) (*Response, error) { - if len(name) < 1 { - return nil, NewArgError("name", "cannot be an empty string") - } - - path := fmt.Sprintf("%s/%s", domainsBasePath, name) - - req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - - return resp, err -} - -// Converts a DomainRecord to a string. -func (d DomainRecord) String() string { - return Stringify(d) -} - -// Converts a DomainRecordEditRequest to a string. -func (d DomainRecordEditRequest) String() string { - return Stringify(d) -} - -// Records returns a slice of DomainRecord for a domain. -func (s *DomainsServiceOp) Records(ctx context.Context, domain string, opt *ListOptions) ([]DomainRecord, *Response, error) { - if len(domain) < 1 { - return nil, nil, NewArgError("domain", "cannot be an empty string") - } - - path := fmt.Sprintf("%s/%s/records", domainsBasePath, domain) - path, err := addOptions(path, opt) - if err != nil { - return nil, nil, err - } - - return s.records(ctx, path) -} - -// RecordsByType returns a slice of DomainRecord for a domain matched by record type. -func (s *DomainsServiceOp) RecordsByType(ctx context.Context, domain, ofType string, opt *ListOptions) ([]DomainRecord, *Response, error) { - if len(domain) < 1 { - return nil, nil, NewArgError("domain", "cannot be an empty string") - } - - if len(ofType) < 1 { - return nil, nil, NewArgError("type", "cannot be an empty string") - } - - path := fmt.Sprintf("%s/%s/records?type=%s", domainsBasePath, domain, ofType) - path, err := addOptions(path, opt) - if err != nil { - return nil, nil, err - } - - return s.records(ctx, path) -} - -// RecordsByName returns a slice of DomainRecord for a domain matched by record name. -func (s *DomainsServiceOp) RecordsByName(ctx context.Context, domain, name string, opt *ListOptions) ([]DomainRecord, *Response, error) { - if len(domain) < 1 { - return nil, nil, NewArgError("domain", "cannot be an empty string") - } - - if len(name) < 1 { - return nil, nil, NewArgError("name", "cannot be an empty string") - } - - path := fmt.Sprintf("%s/%s/records?name=%s", domainsBasePath, domain, name) - path, err := addOptions(path, opt) - if err != nil { - return nil, nil, err - } - - return s.records(ctx, path) -} - -// RecordsByTypeAndName returns a slice of DomainRecord for a domain matched by record type and name. -func (s *DomainsServiceOp) RecordsByTypeAndName(ctx context.Context, domain, ofType, name string, opt *ListOptions) ([]DomainRecord, *Response, error) { - if len(domain) < 1 { - return nil, nil, NewArgError("domain", "cannot be an empty string") - } - - if len(ofType) < 1 { - return nil, nil, NewArgError("type", "cannot be an empty string") - } - - if len(name) < 1 { - return nil, nil, NewArgError("name", "cannot be an empty string") - } - - path := fmt.Sprintf("%s/%s/records?type=%s&name=%s", domainsBasePath, domain, ofType, name) - path, err := addOptions(path, opt) - if err != nil { - return nil, nil, err - } - - return s.records(ctx, path) -} - -// Record returns the record id from a domain -func (s *DomainsServiceOp) Record(ctx context.Context, domain string, id int) (*DomainRecord, *Response, error) { - if len(domain) < 1 { - return nil, nil, NewArgError("domain", "cannot be an empty string") - } - - if id < 1 { - return nil, nil, NewArgError("id", "cannot be less than 1") - } - - path := fmt.Sprintf("%s/%s/records/%d", domainsBasePath, domain, id) - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - record := new(domainRecordRoot) - resp, err := s.client.Do(ctx, req, record) - if err != nil { - return nil, resp, err - } - - return record.DomainRecord, resp, err -} - -// DeleteRecord deletes a record from a domain identified by id -func (s *DomainsServiceOp) DeleteRecord(ctx context.Context, domain string, id int) (*Response, error) { - if len(domain) < 1 { - return nil, NewArgError("domain", "cannot be an empty string") - } - - if id < 1 { - return nil, NewArgError("id", "cannot be less than 1") - } - - path := fmt.Sprintf("%s/%s/records/%d", domainsBasePath, domain, id) - - req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - - return resp, err -} - -// EditRecord edits a record using a DomainRecordEditRequest -func (s *DomainsServiceOp) EditRecord(ctx context.Context, - domain string, - id int, - editRequest *DomainRecordEditRequest, -) (*DomainRecord, *Response, error) { - if len(domain) < 1 { - return nil, nil, NewArgError("domain", "cannot be an empty string") - } - - if id < 1 { - return nil, nil, NewArgError("id", "cannot be less than 1") - } - - if editRequest == nil { - return nil, nil, NewArgError("editRequest", "cannot be nil") - } - - path := fmt.Sprintf("%s/%s/records/%d", domainsBasePath, domain, id) - - req, err := s.client.NewRequest(ctx, http.MethodPut, path, editRequest) - if err != nil { - return nil, nil, err - } - - root := new(domainRecordRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.DomainRecord, resp, err -} - -// CreateRecord creates a record using a DomainRecordEditRequest -func (s *DomainsServiceOp) CreateRecord(ctx context.Context, - domain string, - createRequest *DomainRecordEditRequest) (*DomainRecord, *Response, error) { - if len(domain) < 1 { - return nil, nil, NewArgError("domain", "cannot be empty string") - } - - if createRequest == nil { - return nil, nil, NewArgError("createRequest", "cannot be nil") - } - - path := fmt.Sprintf("%s/%s/records", domainsBasePath, domain) - req, err := s.client.NewRequest(ctx, http.MethodPost, path, createRequest) - - if err != nil { - return nil, nil, err - } - - d := new(domainRecordRoot) - resp, err := s.client.Do(ctx, req, d) - if err != nil { - return nil, resp, err - } - - return d.DomainRecord, resp, err -} - -// Performs a domain records request given a path. -func (s *DomainsServiceOp) records(ctx context.Context, path string) ([]DomainRecord, *Response, error) { - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(domainRecordsRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - - return root.DomainRecords, resp, err -} diff --git a/vendor/github.com/digitalocean/godo/droplet_actions.go b/vendor/github.com/digitalocean/godo/droplet_actions.go deleted file mode 100644 index 2e09d0c..0000000 --- a/vendor/github.com/digitalocean/godo/droplet_actions.go +++ /dev/null @@ -1,329 +0,0 @@ -package godo - -import ( - "context" - "fmt" - "net/http" - "net/url" -) - -// ActionRequest represents DigitalOcean Action Request -type ActionRequest map[string]interface{} - -// DropletActionsService is an interface for interfacing with the Droplet actions -// endpoints of the DigitalOcean API -// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Droplet-Actions -type DropletActionsService interface { - Shutdown(context.Context, int) (*Action, *Response, error) - ShutdownByTag(context.Context, string) ([]Action, *Response, error) - PowerOff(context.Context, int) (*Action, *Response, error) - PowerOffByTag(context.Context, string) ([]Action, *Response, error) - PowerOn(context.Context, int) (*Action, *Response, error) - PowerOnByTag(context.Context, string) ([]Action, *Response, error) - PowerCycle(context.Context, int) (*Action, *Response, error) - PowerCycleByTag(context.Context, string) ([]Action, *Response, error) - Reboot(context.Context, int) (*Action, *Response, error) - Restore(context.Context, int, int) (*Action, *Response, error) - Resize(context.Context, int, string, bool) (*Action, *Response, error) - Rename(context.Context, int, string) (*Action, *Response, error) - Snapshot(context.Context, int, string) (*Action, *Response, error) - SnapshotByTag(context.Context, string, string) ([]Action, *Response, error) - EnableBackups(context.Context, int) (*Action, *Response, error) - EnableBackupsByTag(context.Context, string) ([]Action, *Response, error) - DisableBackups(context.Context, int) (*Action, *Response, error) - DisableBackupsByTag(context.Context, string) ([]Action, *Response, error) - PasswordReset(context.Context, int) (*Action, *Response, error) - RebuildByImageID(context.Context, int, int) (*Action, *Response, error) - RebuildByImageSlug(context.Context, int, string) (*Action, *Response, error) - ChangeKernel(context.Context, int, int) (*Action, *Response, error) - EnableIPv6(context.Context, int) (*Action, *Response, error) - EnableIPv6ByTag(context.Context, string) ([]Action, *Response, error) - EnablePrivateNetworking(context.Context, int) (*Action, *Response, error) - EnablePrivateNetworkingByTag(context.Context, string) ([]Action, *Response, error) - Get(context.Context, int, int) (*Action, *Response, error) - GetByURI(context.Context, string) (*Action, *Response, error) -} - -// DropletActionsServiceOp handles communication with the Droplet action related -// methods of the DigitalOcean API. -type DropletActionsServiceOp struct { - client *Client -} - -var _ DropletActionsService = &DropletActionsServiceOp{} - -// Shutdown a Droplet -func (s *DropletActionsServiceOp) Shutdown(ctx context.Context, id int) (*Action, *Response, error) { - request := &ActionRequest{"type": "shutdown"} - return s.doAction(ctx, id, request) -} - -// ShutdownByTag shuts down Droplets matched by a Tag. -func (s *DropletActionsServiceOp) ShutdownByTag(ctx context.Context, tag string) ([]Action, *Response, error) { - request := &ActionRequest{"type": "shutdown"} - return s.doActionByTag(ctx, tag, request) -} - -// PowerOff a Droplet -func (s *DropletActionsServiceOp) PowerOff(ctx context.Context, id int) (*Action, *Response, error) { - request := &ActionRequest{"type": "power_off"} - return s.doAction(ctx, id, request) -} - -// PowerOffByTag powers off Droplets matched by a Tag. -func (s *DropletActionsServiceOp) PowerOffByTag(ctx context.Context, tag string) ([]Action, *Response, error) { - request := &ActionRequest{"type": "power_off"} - return s.doActionByTag(ctx, tag, request) -} - -// PowerOn a Droplet -func (s *DropletActionsServiceOp) PowerOn(ctx context.Context, id int) (*Action, *Response, error) { - request := &ActionRequest{"type": "power_on"} - return s.doAction(ctx, id, request) -} - -// PowerOnByTag powers on Droplets matched by a Tag. -func (s *DropletActionsServiceOp) PowerOnByTag(ctx context.Context, tag string) ([]Action, *Response, error) { - request := &ActionRequest{"type": "power_on"} - return s.doActionByTag(ctx, tag, request) -} - -// PowerCycle a Droplet -func (s *DropletActionsServiceOp) PowerCycle(ctx context.Context, id int) (*Action, *Response, error) { - request := &ActionRequest{"type": "power_cycle"} - return s.doAction(ctx, id, request) -} - -// PowerCycleByTag power cycles Droplets matched by a Tag. -func (s *DropletActionsServiceOp) PowerCycleByTag(ctx context.Context, tag string) ([]Action, *Response, error) { - request := &ActionRequest{"type": "power_cycle"} - return s.doActionByTag(ctx, tag, request) -} - -// Reboot a Droplet -func (s *DropletActionsServiceOp) Reboot(ctx context.Context, id int) (*Action, *Response, error) { - request := &ActionRequest{"type": "reboot"} - return s.doAction(ctx, id, request) -} - -// Restore an image to a Droplet -func (s *DropletActionsServiceOp) Restore(ctx context.Context, id, imageID int) (*Action, *Response, error) { - requestType := "restore" - request := &ActionRequest{ - "type": requestType, - "image": float64(imageID), - } - return s.doAction(ctx, id, request) -} - -// Resize a Droplet -func (s *DropletActionsServiceOp) Resize(ctx context.Context, id int, sizeSlug string, resizeDisk bool) (*Action, *Response, error) { - requestType := "resize" - request := &ActionRequest{ - "type": requestType, - "size": sizeSlug, - "disk": resizeDisk, - } - return s.doAction(ctx, id, request) -} - -// Rename a Droplet -func (s *DropletActionsServiceOp) Rename(ctx context.Context, id int, name string) (*Action, *Response, error) { - requestType := "rename" - request := &ActionRequest{ - "type": requestType, - "name": name, - } - return s.doAction(ctx, id, request) -} - -// Snapshot a Droplet. -func (s *DropletActionsServiceOp) Snapshot(ctx context.Context, id int, name string) (*Action, *Response, error) { - requestType := "snapshot" - request := &ActionRequest{ - "type": requestType, - "name": name, - } - return s.doAction(ctx, id, request) -} - -// SnapshotByTag snapshots Droplets matched by a Tag. -func (s *DropletActionsServiceOp) SnapshotByTag(ctx context.Context, tag string, name string) ([]Action, *Response, error) { - requestType := "snapshot" - request := &ActionRequest{ - "type": requestType, - "name": name, - } - return s.doActionByTag(ctx, tag, request) -} - -// EnableBackups enables backups for a Droplet. -func (s *DropletActionsServiceOp) EnableBackups(ctx context.Context, id int) (*Action, *Response, error) { - request := &ActionRequest{"type": "enable_backups"} - return s.doAction(ctx, id, request) -} - -// EnableBackupsByTag enables backups for Droplets matched by a Tag. -func (s *DropletActionsServiceOp) EnableBackupsByTag(ctx context.Context, tag string) ([]Action, *Response, error) { - request := &ActionRequest{"type": "enable_backups"} - return s.doActionByTag(ctx, tag, request) -} - -// DisableBackups disables backups for a Droplet. -func (s *DropletActionsServiceOp) DisableBackups(ctx context.Context, id int) (*Action, *Response, error) { - request := &ActionRequest{"type": "disable_backups"} - return s.doAction(ctx, id, request) -} - -// DisableBackupsByTag disables backups for Droplet matched by a Tag. -func (s *DropletActionsServiceOp) DisableBackupsByTag(ctx context.Context, tag string) ([]Action, *Response, error) { - request := &ActionRequest{"type": "disable_backups"} - return s.doActionByTag(ctx, tag, request) -} - -// PasswordReset resets the password for a Droplet. -func (s *DropletActionsServiceOp) PasswordReset(ctx context.Context, id int) (*Action, *Response, error) { - request := &ActionRequest{"type": "password_reset"} - return s.doAction(ctx, id, request) -} - -// RebuildByImageID rebuilds a Droplet from an image with a given id. -func (s *DropletActionsServiceOp) RebuildByImageID(ctx context.Context, id, imageID int) (*Action, *Response, error) { - request := &ActionRequest{"type": "rebuild", "image": imageID} - return s.doAction(ctx, id, request) -} - -// RebuildByImageSlug rebuilds a Droplet from an Image matched by a given Slug. -func (s *DropletActionsServiceOp) RebuildByImageSlug(ctx context.Context, id int, slug string) (*Action, *Response, error) { - request := &ActionRequest{"type": "rebuild", "image": slug} - return s.doAction(ctx, id, request) -} - -// ChangeKernel changes the kernel for a Droplet. -func (s *DropletActionsServiceOp) ChangeKernel(ctx context.Context, id, kernelID int) (*Action, *Response, error) { - request := &ActionRequest{"type": "change_kernel", "kernel": kernelID} - return s.doAction(ctx, id, request) -} - -// EnableIPv6 enables IPv6 for a Droplet. -func (s *DropletActionsServiceOp) EnableIPv6(ctx context.Context, id int) (*Action, *Response, error) { - request := &ActionRequest{"type": "enable_ipv6"} - return s.doAction(ctx, id, request) -} - -// EnableIPv6ByTag enables IPv6 for Droplets matched by a Tag. -func (s *DropletActionsServiceOp) EnableIPv6ByTag(ctx context.Context, tag string) ([]Action, *Response, error) { - request := &ActionRequest{"type": "enable_ipv6"} - return s.doActionByTag(ctx, tag, request) -} - -// EnablePrivateNetworking enables private networking for a Droplet. -func (s *DropletActionsServiceOp) EnablePrivateNetworking(ctx context.Context, id int) (*Action, *Response, error) { - request := &ActionRequest{"type": "enable_private_networking"} - return s.doAction(ctx, id, request) -} - -// EnablePrivateNetworkingByTag enables private networking for Droplets matched by a Tag. -func (s *DropletActionsServiceOp) EnablePrivateNetworkingByTag(ctx context.Context, tag string) ([]Action, *Response, error) { - request := &ActionRequest{"type": "enable_private_networking"} - return s.doActionByTag(ctx, tag, request) -} - -func (s *DropletActionsServiceOp) doAction(ctx context.Context, id int, request *ActionRequest) (*Action, *Response, error) { - if id < 1 { - return nil, nil, NewArgError("id", "cannot be less than 1") - } - - if request == nil { - return nil, nil, NewArgError("request", "request can't be nil") - } - - path := dropletActionPath(id) - - req, err := s.client.NewRequest(ctx, http.MethodPost, path, request) - if err != nil { - return nil, nil, err - } - - root := new(actionRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Event, resp, err -} - -func (s *DropletActionsServiceOp) doActionByTag(ctx context.Context, tag string, request *ActionRequest) ([]Action, *Response, error) { - if tag == "" { - return nil, nil, NewArgError("tag", "cannot be empty") - } - - if request == nil { - return nil, nil, NewArgError("request", "request can't be nil") - } - - path := dropletActionPathByTag(tag) - - req, err := s.client.NewRequest(ctx, http.MethodPost, path, request) - if err != nil { - return nil, nil, err - } - - root := new(actionsRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Actions, resp, err -} - -// Get an action for a particular Droplet by id. -func (s *DropletActionsServiceOp) Get(ctx context.Context, dropletID, actionID int) (*Action, *Response, error) { - if dropletID < 1 { - return nil, nil, NewArgError("dropletID", "cannot be less than 1") - } - - if actionID < 1 { - return nil, nil, NewArgError("actionID", "cannot be less than 1") - } - - path := fmt.Sprintf("%s/%d", dropletActionPath(dropletID), actionID) - return s.get(ctx, path) -} - -// GetByURI gets an action for a particular Droplet by URI. -func (s *DropletActionsServiceOp) GetByURI(ctx context.Context, rawurl string) (*Action, *Response, error) { - u, err := url.Parse(rawurl) - if err != nil { - return nil, nil, err - } - - return s.get(ctx, u.Path) - -} - -func (s *DropletActionsServiceOp) get(ctx context.Context, path string) (*Action, *Response, error) { - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(actionRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Event, resp, err - -} - -func dropletActionPath(dropletID int) string { - return fmt.Sprintf("v2/droplets/%d/actions", dropletID) -} - -func dropletActionPathByTag(tag string) string { - return fmt.Sprintf("v2/droplets/actions?tag_name=%s", tag) -} diff --git a/vendor/github.com/digitalocean/godo/droplets.go b/vendor/github.com/digitalocean/godo/droplets.go deleted file mode 100644 index 5f19863..0000000 --- a/vendor/github.com/digitalocean/godo/droplets.go +++ /dev/null @@ -1,608 +0,0 @@ -package godo - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "net/http" -) - -const dropletBasePath = "v2/droplets" - -var errNoNetworks = errors.New("no networks have been defined") - -// DropletsService is an interface for interfacing with the Droplet -// endpoints of the DigitalOcean API -// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Droplets -type DropletsService interface { - List(context.Context, *ListOptions) ([]Droplet, *Response, error) - ListByName(context.Context, string, *ListOptions) ([]Droplet, *Response, error) - ListByTag(context.Context, string, *ListOptions) ([]Droplet, *Response, error) - Get(context.Context, int) (*Droplet, *Response, error) - Create(context.Context, *DropletCreateRequest) (*Droplet, *Response, error) - CreateMultiple(context.Context, *DropletMultiCreateRequest) ([]Droplet, *Response, error) - Delete(context.Context, int) (*Response, error) - DeleteByTag(context.Context, string) (*Response, error) - Kernels(context.Context, int, *ListOptions) ([]Kernel, *Response, error) - Snapshots(context.Context, int, *ListOptions) ([]Image, *Response, error) - Backups(context.Context, int, *ListOptions) ([]Image, *Response, error) - Actions(context.Context, int, *ListOptions) ([]Action, *Response, error) - Neighbors(context.Context, int) ([]Droplet, *Response, error) -} - -// DropletsServiceOp handles communication with the Droplet related methods of the -// DigitalOcean API. -type DropletsServiceOp struct { - client *Client -} - -var _ DropletsService = &DropletsServiceOp{} - -// Droplet represents a DigitalOcean Droplet -type Droplet struct { - ID int `json:"id,float64,omitempty"` - Name string `json:"name,omitempty"` - Memory int `json:"memory,omitempty"` - Vcpus int `json:"vcpus,omitempty"` - Disk int `json:"disk,omitempty"` - Region *Region `json:"region,omitempty"` - Image *Image `json:"image,omitempty"` - Size *Size `json:"size,omitempty"` - SizeSlug string `json:"size_slug,omitempty"` - BackupIDs []int `json:"backup_ids,omitempty"` - NextBackupWindow *BackupWindow `json:"next_backup_window,omitempty"` - SnapshotIDs []int `json:"snapshot_ids,omitempty"` - Features []string `json:"features,omitempty"` - Locked bool `json:"locked,bool,omitempty"` - Status string `json:"status,omitempty"` - Networks *Networks `json:"networks,omitempty"` - Created string `json:"created_at,omitempty"` - Kernel *Kernel `json:"kernel,omitempty"` - Tags []string `json:"tags,omitempty"` - VolumeIDs []string `json:"volume_ids"` - VPCUUID string `json:"vpc_uuid,omitempty"` -} - -// PublicIPv4 returns the public IPv4 address for the Droplet. -func (d *Droplet) PublicIPv4() (string, error) { - if d.Networks == nil { - return "", errNoNetworks - } - - for _, v4 := range d.Networks.V4 { - if v4.Type == "public" { - return v4.IPAddress, nil - } - } - - return "", nil -} - -// PrivateIPv4 returns the private IPv4 address for the Droplet. -func (d *Droplet) PrivateIPv4() (string, error) { - if d.Networks == nil { - return "", errNoNetworks - } - - for _, v4 := range d.Networks.V4 { - if v4.Type == "private" { - return v4.IPAddress, nil - } - } - - return "", nil -} - -// PublicIPv6 returns the public IPv6 address for the Droplet. -func (d *Droplet) PublicIPv6() (string, error) { - if d.Networks == nil { - return "", errNoNetworks - } - - for _, v6 := range d.Networks.V6 { - if v6.Type == "public" { - return v6.IPAddress, nil - } - } - - return "", nil -} - -// Kernel object -type Kernel struct { - ID int `json:"id,float64,omitempty"` - Name string `json:"name,omitempty"` - Version string `json:"version,omitempty"` -} - -// BackupWindow object -type BackupWindow struct { - Start *Timestamp `json:"start,omitempty"` - End *Timestamp `json:"end,omitempty"` -} - -// Convert Droplet to a string -func (d Droplet) String() string { - return Stringify(d) -} - -// URN returns the droplet ID in a valid DO API URN form. -func (d Droplet) URN() string { - return ToURN("Droplet", d.ID) -} - -// DropletRoot represents a Droplet root -type dropletRoot struct { - Droplet *Droplet `json:"droplet"` - Links *Links `json:"links,omitempty"` -} - -type dropletsRoot struct { - Droplets []Droplet `json:"droplets"` - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -type kernelsRoot struct { - Kernels []Kernel `json:"kernels,omitempty"` - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -type dropletSnapshotsRoot struct { - Snapshots []Image `json:"snapshots,omitempty"` - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -type backupsRoot struct { - Backups []Image `json:"backups,omitempty"` - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -// DropletCreateImage identifies an image for the create request. It prefers slug over ID. -type DropletCreateImage struct { - ID int - Slug string -} - -// MarshalJSON returns either the slug or id of the image. It returns the id -// if the slug is empty. -func (d DropletCreateImage) MarshalJSON() ([]byte, error) { - if d.Slug != "" { - return json.Marshal(d.Slug) - } - - return json.Marshal(d.ID) -} - -// DropletCreateVolume identifies a volume to attach for the create request. -type DropletCreateVolume struct { - ID string - // Deprecated: You must pass the volume's ID when creating a Droplet. - Name string -} - -// MarshalJSON returns an object with either the ID or name of the volume. It -// prefers the ID over the name. -func (d DropletCreateVolume) MarshalJSON() ([]byte, error) { - if d.ID != "" { - return json.Marshal(struct { - ID string `json:"id"` - }{ID: d.ID}) - } - - return json.Marshal(struct { - Name string `json:"name"` - }{Name: d.Name}) -} - -// DropletCreateSSHKey identifies a SSH Key for the create request. It prefers fingerprint over ID. -type DropletCreateSSHKey struct { - ID int - Fingerprint string -} - -// MarshalJSON returns either the fingerprint or id of the ssh key. It returns -// the id if the fingerprint is empty. -func (d DropletCreateSSHKey) MarshalJSON() ([]byte, error) { - if d.Fingerprint != "" { - return json.Marshal(d.Fingerprint) - } - - return json.Marshal(d.ID) -} - -// DropletCreateRequest represents a request to create a Droplet. -type DropletCreateRequest struct { - Name string `json:"name"` - Region string `json:"region"` - Size string `json:"size"` - Image DropletCreateImage `json:"image"` - SSHKeys []DropletCreateSSHKey `json:"ssh_keys"` - Backups bool `json:"backups"` - IPv6 bool `json:"ipv6"` - PrivateNetworking bool `json:"private_networking"` - Monitoring bool `json:"monitoring"` - UserData string `json:"user_data,omitempty"` - Volumes []DropletCreateVolume `json:"volumes,omitempty"` - Tags []string `json:"tags"` - VPCUUID string `json:"vpc_uuid,omitempty"` - WithDropletAgent *bool `json:"with_droplet_agent,omitempty"` -} - -// DropletMultiCreateRequest is a request to create multiple Droplets. -type DropletMultiCreateRequest struct { - Names []string `json:"names"` - Region string `json:"region"` - Size string `json:"size"` - Image DropletCreateImage `json:"image"` - SSHKeys []DropletCreateSSHKey `json:"ssh_keys"` - Backups bool `json:"backups"` - IPv6 bool `json:"ipv6"` - PrivateNetworking bool `json:"private_networking"` - Monitoring bool `json:"monitoring"` - UserData string `json:"user_data,omitempty"` - Tags []string `json:"tags"` - VPCUUID string `json:"vpc_uuid,omitempty"` - WithDropletAgent *bool `json:"with_droplet_agent,omitempty"` -} - -func (d DropletCreateRequest) String() string { - return Stringify(d) -} - -func (d DropletMultiCreateRequest) String() string { - return Stringify(d) -} - -// Networks represents the Droplet's Networks. -type Networks struct { - V4 []NetworkV4 `json:"v4,omitempty"` - V6 []NetworkV6 `json:"v6,omitempty"` -} - -// NetworkV4 represents a DigitalOcean IPv4 Network. -type NetworkV4 struct { - IPAddress string `json:"ip_address,omitempty"` - Netmask string `json:"netmask,omitempty"` - Gateway string `json:"gateway,omitempty"` - Type string `json:"type,omitempty"` -} - -func (n NetworkV4) String() string { - return Stringify(n) -} - -// NetworkV6 represents a DigitalOcean IPv6 network. -type NetworkV6 struct { - IPAddress string `json:"ip_address,omitempty"` - Netmask int `json:"netmask,omitempty"` - Gateway string `json:"gateway,omitempty"` - Type string `json:"type,omitempty"` -} - -func (n NetworkV6) String() string { - return Stringify(n) -} - -// Performs a list request given a path. -func (s *DropletsServiceOp) list(ctx context.Context, path string) ([]Droplet, *Response, error) { - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(dropletsRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.Droplets, resp, err -} - -// List all Droplets. -func (s *DropletsServiceOp) List(ctx context.Context, opt *ListOptions) ([]Droplet, *Response, error) { - path := dropletBasePath - path, err := addOptions(path, opt) - if err != nil { - return nil, nil, err - } - - return s.list(ctx, path) -} - -// ListByName lists all Droplets filtered by name returning only exact matches. -// It is case-insensitive -func (s *DropletsServiceOp) ListByName(ctx context.Context, name string, opt *ListOptions) ([]Droplet, *Response, error) { - path := fmt.Sprintf("%s?name=%s", dropletBasePath, name) - path, err := addOptions(path, opt) - if err != nil { - return nil, nil, err - } - - return s.list(ctx, path) -} - -// ListByTag lists all Droplets matched by a Tag. -func (s *DropletsServiceOp) ListByTag(ctx context.Context, tag string, opt *ListOptions) ([]Droplet, *Response, error) { - path := fmt.Sprintf("%s?tag_name=%s", dropletBasePath, tag) - path, err := addOptions(path, opt) - if err != nil { - return nil, nil, err - } - - return s.list(ctx, path) -} - -// Get individual Droplet. -func (s *DropletsServiceOp) Get(ctx context.Context, dropletID int) (*Droplet, *Response, error) { - if dropletID < 1 { - return nil, nil, NewArgError("dropletID", "cannot be less than 1") - } - - path := fmt.Sprintf("%s/%d", dropletBasePath, dropletID) - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(dropletRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Droplet, resp, err -} - -// Create Droplet -func (s *DropletsServiceOp) Create(ctx context.Context, createRequest *DropletCreateRequest) (*Droplet, *Response, error) { - if createRequest == nil { - return nil, nil, NewArgError("createRequest", "cannot be nil") - } - - path := dropletBasePath - - req, err := s.client.NewRequest(ctx, http.MethodPost, path, createRequest) - if err != nil { - return nil, nil, err - } - - root := new(dropletRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - - return root.Droplet, resp, err -} - -// CreateMultiple creates multiple Droplets. -func (s *DropletsServiceOp) CreateMultiple(ctx context.Context, createRequest *DropletMultiCreateRequest) ([]Droplet, *Response, error) { - if createRequest == nil { - return nil, nil, NewArgError("createRequest", "cannot be nil") - } - - path := dropletBasePath - - req, err := s.client.NewRequest(ctx, http.MethodPost, path, createRequest) - if err != nil { - return nil, nil, err - } - - root := new(dropletsRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - - return root.Droplets, resp, err -} - -// Performs a delete request given a path -func (s *DropletsServiceOp) delete(ctx context.Context, path string) (*Response, error) { - req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - - return resp, err -} - -// Delete Droplet. -func (s *DropletsServiceOp) Delete(ctx context.Context, dropletID int) (*Response, error) { - if dropletID < 1 { - return nil, NewArgError("dropletID", "cannot be less than 1") - } - - path := fmt.Sprintf("%s/%d", dropletBasePath, dropletID) - - return s.delete(ctx, path) -} - -// DeleteByTag deletes Droplets matched by a Tag. -func (s *DropletsServiceOp) DeleteByTag(ctx context.Context, tag string) (*Response, error) { - if tag == "" { - return nil, NewArgError("tag", "cannot be empty") - } - - path := fmt.Sprintf("%s?tag_name=%s", dropletBasePath, tag) - - return s.delete(ctx, path) -} - -// Kernels lists kernels available for a Droplet. -func (s *DropletsServiceOp) Kernels(ctx context.Context, dropletID int, opt *ListOptions) ([]Kernel, *Response, error) { - if dropletID < 1 { - return nil, nil, NewArgError("dropletID", "cannot be less than 1") - } - - path := fmt.Sprintf("%s/%d/kernels", dropletBasePath, dropletID) - path, err := addOptions(path, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(kernelsRoot) - resp, err := s.client.Do(ctx, req, root) - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.Kernels, resp, err -} - -// Actions lists the actions for a Droplet. -func (s *DropletsServiceOp) Actions(ctx context.Context, dropletID int, opt *ListOptions) ([]Action, *Response, error) { - if dropletID < 1 { - return nil, nil, NewArgError("dropletID", "cannot be less than 1") - } - - path := fmt.Sprintf("%s/%d/actions", dropletBasePath, dropletID) - path, err := addOptions(path, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(actionsRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.Actions, resp, err -} - -// Backups lists the backups for a Droplet. -func (s *DropletsServiceOp) Backups(ctx context.Context, dropletID int, opt *ListOptions) ([]Image, *Response, error) { - if dropletID < 1 { - return nil, nil, NewArgError("dropletID", "cannot be less than 1") - } - - path := fmt.Sprintf("%s/%d/backups", dropletBasePath, dropletID) - path, err := addOptions(path, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(backupsRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.Backups, resp, err -} - -// Snapshots lists the snapshots available for a Droplet. -func (s *DropletsServiceOp) Snapshots(ctx context.Context, dropletID int, opt *ListOptions) ([]Image, *Response, error) { - if dropletID < 1 { - return nil, nil, NewArgError("dropletID", "cannot be less than 1") - } - - path := fmt.Sprintf("%s/%d/snapshots", dropletBasePath, dropletID) - path, err := addOptions(path, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(dropletSnapshotsRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.Snapshots, resp, err -} - -// Neighbors lists the neighbors for a Droplet. -func (s *DropletsServiceOp) Neighbors(ctx context.Context, dropletID int) ([]Droplet, *Response, error) { - if dropletID < 1 { - return nil, nil, NewArgError("dropletID", "cannot be less than 1") - } - - path := fmt.Sprintf("%s/%d/neighbors", dropletBasePath, dropletID) - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(dropletsRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Droplets, resp, err -} - -func (s *DropletsServiceOp) dropletActionStatus(ctx context.Context, uri string) (string, error) { - action, _, err := s.client.DropletActions.GetByURI(ctx, uri) - - if err != nil { - return "", err - } - - return action.Status, nil -} diff --git a/vendor/github.com/digitalocean/godo/errors.go b/vendor/github.com/digitalocean/godo/errors.go deleted file mode 100644 index a65ebd7..0000000 --- a/vendor/github.com/digitalocean/godo/errors.go +++ /dev/null @@ -1,24 +0,0 @@ -package godo - -import "fmt" - -// ArgError is an error that represents an error with an input to godo. It -// identifies the argument and the cause (if possible). -type ArgError struct { - arg string - reason string -} - -var _ error = &ArgError{} - -// NewArgError creates an InputError. -func NewArgError(arg, reason string) *ArgError { - return &ArgError{ - arg: arg, - reason: reason, - } -} - -func (e *ArgError) Error() string { - return fmt.Sprintf("%s is invalid because %s", e.arg, e.reason) -} diff --git a/vendor/github.com/digitalocean/godo/firewalls.go b/vendor/github.com/digitalocean/godo/firewalls.go deleted file mode 100644 index d2aadb4..0000000 --- a/vendor/github.com/digitalocean/godo/firewalls.go +++ /dev/null @@ -1,274 +0,0 @@ -package godo - -import ( - "context" - "net/http" - "path" - "strconv" -) - -const firewallsBasePath = "/v2/firewalls" - -// FirewallsService is an interface for managing Firewalls with the DigitalOcean API. -// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Firewalls -type FirewallsService interface { - Get(context.Context, string) (*Firewall, *Response, error) - Create(context.Context, *FirewallRequest) (*Firewall, *Response, error) - Update(context.Context, string, *FirewallRequest) (*Firewall, *Response, error) - Delete(context.Context, string) (*Response, error) - List(context.Context, *ListOptions) ([]Firewall, *Response, error) - ListByDroplet(context.Context, int, *ListOptions) ([]Firewall, *Response, error) - AddDroplets(context.Context, string, ...int) (*Response, error) - RemoveDroplets(context.Context, string, ...int) (*Response, error) - AddTags(context.Context, string, ...string) (*Response, error) - RemoveTags(context.Context, string, ...string) (*Response, error) - AddRules(context.Context, string, *FirewallRulesRequest) (*Response, error) - RemoveRules(context.Context, string, *FirewallRulesRequest) (*Response, error) -} - -// FirewallsServiceOp handles communication with Firewalls methods of the DigitalOcean API. -type FirewallsServiceOp struct { - client *Client -} - -// Firewall represents a DigitalOcean Firewall configuration. -type Firewall struct { - ID string `json:"id"` - Name string `json:"name"` - Status string `json:"status"` - InboundRules []InboundRule `json:"inbound_rules"` - OutboundRules []OutboundRule `json:"outbound_rules"` - DropletIDs []int `json:"droplet_ids"` - Tags []string `json:"tags"` - Created string `json:"created_at"` - PendingChanges []PendingChange `json:"pending_changes"` -} - -// String creates a human-readable description of a Firewall. -func (fw Firewall) String() string { - return Stringify(fw) -} - -// URN returns the firewall name in a valid DO API URN form. -func (fw Firewall) URN() string { - return ToURN("Firewall", fw.ID) -} - -// FirewallRequest represents the configuration to be applied to an existing or a new Firewall. -type FirewallRequest struct { - Name string `json:"name"` - InboundRules []InboundRule `json:"inbound_rules"` - OutboundRules []OutboundRule `json:"outbound_rules"` - DropletIDs []int `json:"droplet_ids"` - Tags []string `json:"tags"` -} - -// FirewallRulesRequest represents rules configuration to be applied to an existing Firewall. -type FirewallRulesRequest struct { - InboundRules []InboundRule `json:"inbound_rules"` - OutboundRules []OutboundRule `json:"outbound_rules"` -} - -// InboundRule represents a DigitalOcean Firewall inbound rule. -type InboundRule struct { - Protocol string `json:"protocol,omitempty"` - PortRange string `json:"ports,omitempty"` - Sources *Sources `json:"sources"` -} - -// OutboundRule represents a DigitalOcean Firewall outbound rule. -type OutboundRule struct { - Protocol string `json:"protocol,omitempty"` - PortRange string `json:"ports,omitempty"` - Destinations *Destinations `json:"destinations"` -} - -// Sources represents a DigitalOcean Firewall InboundRule sources. -type Sources struct { - Addresses []string `json:"addresses,omitempty"` - Tags []string `json:"tags,omitempty"` - DropletIDs []int `json:"droplet_ids,omitempty"` - LoadBalancerUIDs []string `json:"load_balancer_uids,omitempty"` - KubernetesIDs []string `json:"kubernetes_ids,omitempty"` -} - -// PendingChange represents a DigitalOcean Firewall status details. -type PendingChange struct { - DropletID int `json:"droplet_id,omitempty"` - Removing bool `json:"removing,omitempty"` - Status string `json:"status,omitempty"` -} - -// Destinations represents a DigitalOcean Firewall OutboundRule destinations. -type Destinations struct { - Addresses []string `json:"addresses,omitempty"` - Tags []string `json:"tags,omitempty"` - DropletIDs []int `json:"droplet_ids,omitempty"` - LoadBalancerUIDs []string `json:"load_balancer_uids,omitempty"` - KubernetesIDs []string `json:"kubernetes_ids,omitempty"` -} - -var _ FirewallsService = &FirewallsServiceOp{} - -// Get an existing Firewall by its identifier. -func (fw *FirewallsServiceOp) Get(ctx context.Context, fID string) (*Firewall, *Response, error) { - path := path.Join(firewallsBasePath, fID) - - req, err := fw.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(firewallRoot) - resp, err := fw.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Firewall, resp, err -} - -// Create a new Firewall with a given configuration. -func (fw *FirewallsServiceOp) Create(ctx context.Context, fr *FirewallRequest) (*Firewall, *Response, error) { - req, err := fw.client.NewRequest(ctx, http.MethodPost, firewallsBasePath, fr) - if err != nil { - return nil, nil, err - } - - root := new(firewallRoot) - resp, err := fw.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Firewall, resp, err -} - -// Update an existing Firewall with new configuration. -func (fw *FirewallsServiceOp) Update(ctx context.Context, fID string, fr *FirewallRequest) (*Firewall, *Response, error) { - path := path.Join(firewallsBasePath, fID) - - req, err := fw.client.NewRequest(ctx, "PUT", path, fr) - if err != nil { - return nil, nil, err - } - - root := new(firewallRoot) - resp, err := fw.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Firewall, resp, err -} - -// Delete a Firewall by its identifier. -func (fw *FirewallsServiceOp) Delete(ctx context.Context, fID string) (*Response, error) { - path := path.Join(firewallsBasePath, fID) - return fw.createAndDoReq(ctx, http.MethodDelete, path, nil) -} - -// List Firewalls. -func (fw *FirewallsServiceOp) List(ctx context.Context, opt *ListOptions) ([]Firewall, *Response, error) { - path, err := addOptions(firewallsBasePath, opt) - if err != nil { - return nil, nil, err - } - - return fw.listHelper(ctx, path) -} - -// ListByDroplet Firewalls. -func (fw *FirewallsServiceOp) ListByDroplet(ctx context.Context, dID int, opt *ListOptions) ([]Firewall, *Response, error) { - basePath := path.Join(dropletBasePath, strconv.Itoa(dID), "firewalls") - path, err := addOptions(basePath, opt) - if err != nil { - return nil, nil, err - } - - return fw.listHelper(ctx, path) -} - -// AddDroplets to a Firewall. -func (fw *FirewallsServiceOp) AddDroplets(ctx context.Context, fID string, dropletIDs ...int) (*Response, error) { - path := path.Join(firewallsBasePath, fID, "droplets") - return fw.createAndDoReq(ctx, http.MethodPost, path, &dropletsRequest{IDs: dropletIDs}) -} - -// RemoveDroplets from a Firewall. -func (fw *FirewallsServiceOp) RemoveDroplets(ctx context.Context, fID string, dropletIDs ...int) (*Response, error) { - path := path.Join(firewallsBasePath, fID, "droplets") - return fw.createAndDoReq(ctx, http.MethodDelete, path, &dropletsRequest{IDs: dropletIDs}) -} - -// AddTags to a Firewall. -func (fw *FirewallsServiceOp) AddTags(ctx context.Context, fID string, tags ...string) (*Response, error) { - path := path.Join(firewallsBasePath, fID, "tags") - return fw.createAndDoReq(ctx, http.MethodPost, path, &tagsRequest{Tags: tags}) -} - -// RemoveTags from a Firewall. -func (fw *FirewallsServiceOp) RemoveTags(ctx context.Context, fID string, tags ...string) (*Response, error) { - path := path.Join(firewallsBasePath, fID, "tags") - return fw.createAndDoReq(ctx, http.MethodDelete, path, &tagsRequest{Tags: tags}) -} - -// AddRules to a Firewall. -func (fw *FirewallsServiceOp) AddRules(ctx context.Context, fID string, rr *FirewallRulesRequest) (*Response, error) { - path := path.Join(firewallsBasePath, fID, "rules") - return fw.createAndDoReq(ctx, http.MethodPost, path, rr) -} - -// RemoveRules from a Firewall. -func (fw *FirewallsServiceOp) RemoveRules(ctx context.Context, fID string, rr *FirewallRulesRequest) (*Response, error) { - path := path.Join(firewallsBasePath, fID, "rules") - return fw.createAndDoReq(ctx, http.MethodDelete, path, rr) -} - -type dropletsRequest struct { - IDs []int `json:"droplet_ids"` -} - -type tagsRequest struct { - Tags []string `json:"tags"` -} - -type firewallRoot struct { - Firewall *Firewall `json:"firewall"` -} - -type firewallsRoot struct { - Firewalls []Firewall `json:"firewalls"` - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -func (fw *FirewallsServiceOp) createAndDoReq(ctx context.Context, method, path string, v interface{}) (*Response, error) { - req, err := fw.client.NewRequest(ctx, method, path, v) - if err != nil { - return nil, err - } - - return fw.client.Do(ctx, req, nil) -} - -func (fw *FirewallsServiceOp) listHelper(ctx context.Context, path string) ([]Firewall, *Response, error) { - req, err := fw.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(firewallsRoot) - resp, err := fw.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.Firewalls, resp, err -} diff --git a/vendor/github.com/digitalocean/godo/floating_ips.go b/vendor/github.com/digitalocean/godo/floating_ips.go deleted file mode 100644 index 5a29c67..0000000 --- a/vendor/github.com/digitalocean/godo/floating_ips.go +++ /dev/null @@ -1,147 +0,0 @@ -package godo - -import ( - "context" - "fmt" - "net/http" -) - -const floatingBasePath = "v2/floating_ips" - -// FloatingIPsService is an interface for interfacing with the floating IPs -// endpoints of the Digital Ocean API. -// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Floating-IPs -type FloatingIPsService interface { - List(context.Context, *ListOptions) ([]FloatingIP, *Response, error) - Get(context.Context, string) (*FloatingIP, *Response, error) - Create(context.Context, *FloatingIPCreateRequest) (*FloatingIP, *Response, error) - Delete(context.Context, string) (*Response, error) -} - -// FloatingIPsServiceOp handles communication with the floating IPs related methods of the -// DigitalOcean API. -type FloatingIPsServiceOp struct { - client *Client -} - -var _ FloatingIPsService = &FloatingIPsServiceOp{} - -// FloatingIP represents a Digital Ocean floating IP. -type FloatingIP struct { - Region *Region `json:"region"` - Droplet *Droplet `json:"droplet"` - IP string `json:"ip"` - ProjectID string `json:"project_id"` - Locked bool `json:"locked"` -} - -func (f FloatingIP) String() string { - return Stringify(f) -} - -// URN returns the floating IP in a valid DO API URN form. -func (f FloatingIP) URN() string { - return ToURN("FloatingIP", f.IP) -} - -type floatingIPsRoot struct { - FloatingIPs []FloatingIP `json:"floating_ips"` - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -type floatingIPRoot struct { - FloatingIP *FloatingIP `json:"floating_ip"` - Links *Links `json:"links,omitempty"` -} - -// FloatingIPCreateRequest represents a request to create a floating IP. -// Specify DropletID to assign the floating IP to a Droplet or Region -// to reserve it to the region. -type FloatingIPCreateRequest struct { - Region string `json:"region,omitempty"` - DropletID int `json:"droplet_id,omitempty"` - ProjectID string `json:"project_id,omitempty"` -} - -// List all floating IPs. -func (f *FloatingIPsServiceOp) List(ctx context.Context, opt *ListOptions) ([]FloatingIP, *Response, error) { - path := floatingBasePath - path, err := addOptions(path, opt) - if err != nil { - return nil, nil, err - } - - req, err := f.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(floatingIPsRoot) - resp, err := f.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.FloatingIPs, resp, err -} - -// Get an individual floating IP. -func (f *FloatingIPsServiceOp) Get(ctx context.Context, ip string) (*FloatingIP, *Response, error) { - path := fmt.Sprintf("%s/%s", floatingBasePath, ip) - - req, err := f.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(floatingIPRoot) - resp, err := f.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.FloatingIP, resp, err -} - -// Create a floating IP. If the DropletID field of the request is not empty, -// the floating IP will also be assigned to the droplet. -func (f *FloatingIPsServiceOp) Create(ctx context.Context, createRequest *FloatingIPCreateRequest) (*FloatingIP, *Response, error) { - path := floatingBasePath - - req, err := f.client.NewRequest(ctx, http.MethodPost, path, createRequest) - if err != nil { - return nil, nil, err - } - - root := new(floatingIPRoot) - resp, err := f.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - - return root.FloatingIP, resp, err -} - -// Delete a floating IP. -func (f *FloatingIPsServiceOp) Delete(ctx context.Context, ip string) (*Response, error) { - path := fmt.Sprintf("%s/%s", floatingBasePath, ip) - - req, err := f.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - - resp, err := f.client.Do(ctx, req, nil) - - return resp, err -} diff --git a/vendor/github.com/digitalocean/godo/floating_ips_actions.go b/vendor/github.com/digitalocean/godo/floating_ips_actions.go deleted file mode 100644 index 9fd6e0a..0000000 --- a/vendor/github.com/digitalocean/godo/floating_ips_actions.go +++ /dev/null @@ -1,109 +0,0 @@ -package godo - -import ( - "context" - "fmt" - "net/http" -) - -// FloatingIPActionsService is an interface for interfacing with the -// floating IPs actions endpoints of the Digital Ocean API. -// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Floating-IP-Actions -type FloatingIPActionsService interface { - Assign(ctx context.Context, ip string, dropletID int) (*Action, *Response, error) - Unassign(ctx context.Context, ip string) (*Action, *Response, error) - Get(ctx context.Context, ip string, actionID int) (*Action, *Response, error) - List(ctx context.Context, ip string, opt *ListOptions) ([]Action, *Response, error) -} - -// FloatingIPActionsServiceOp handles communication with the floating IPs -// action related methods of the DigitalOcean API. -type FloatingIPActionsServiceOp struct { - client *Client -} - -// Assign a floating IP to a droplet. -func (s *FloatingIPActionsServiceOp) Assign(ctx context.Context, ip string, dropletID int) (*Action, *Response, error) { - request := &ActionRequest{ - "type": "assign", - "droplet_id": dropletID, - } - return s.doAction(ctx, ip, request) -} - -// Unassign a floating IP from the droplet it is currently assigned to. -func (s *FloatingIPActionsServiceOp) Unassign(ctx context.Context, ip string) (*Action, *Response, error) { - request := &ActionRequest{"type": "unassign"} - return s.doAction(ctx, ip, request) -} - -// Get an action for a particular floating IP by id. -func (s *FloatingIPActionsServiceOp) Get(ctx context.Context, ip string, actionID int) (*Action, *Response, error) { - path := fmt.Sprintf("%s/%d", floatingIPActionPath(ip), actionID) - return s.get(ctx, path) -} - -// List the actions for a particular floating IP. -func (s *FloatingIPActionsServiceOp) List(ctx context.Context, ip string, opt *ListOptions) ([]Action, *Response, error) { - path := floatingIPActionPath(ip) - path, err := addOptions(path, opt) - if err != nil { - return nil, nil, err - } - - return s.list(ctx, path) -} - -func (s *FloatingIPActionsServiceOp) doAction(ctx context.Context, ip string, request *ActionRequest) (*Action, *Response, error) { - path := floatingIPActionPath(ip) - - req, err := s.client.NewRequest(ctx, http.MethodPost, path, request) - if err != nil { - return nil, nil, err - } - - root := new(actionRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Event, resp, err -} - -func (s *FloatingIPActionsServiceOp) get(ctx context.Context, path string) (*Action, *Response, error) { - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(actionRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Event, resp, err -} - -func (s *FloatingIPActionsServiceOp) list(ctx context.Context, path string) ([]Action, *Response, error) { - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(actionsRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - - return root.Actions, resp, err -} - -func floatingIPActionPath(ip string) string { - return fmt.Sprintf("%s/%s/actions", floatingBasePath, ip) -} diff --git a/vendor/github.com/digitalocean/godo/functions.go b/vendor/github.com/digitalocean/godo/functions.go deleted file mode 100644 index 61c8077..0000000 --- a/vendor/github.com/digitalocean/godo/functions.go +++ /dev/null @@ -1,236 +0,0 @@ -package godo - -import ( - "context" - "fmt" - "net/http" - "time" -) - -const ( - functionsBasePath = "/v2/functions/namespaces" - functionsNamespacePath = functionsBasePath + "/%s" - functionsTriggerBasePath = functionsNamespacePath + "/triggers" -) - -type FunctionsService interface { - ListNamespaces(context.Context) ([]FunctionsNamespace, *Response, error) - GetNamespace(context.Context, string) (*FunctionsNamespace, *Response, error) - CreateNamespace(context.Context, *FunctionsNamespaceCreateRequest) (*FunctionsNamespace, *Response, error) - DeleteNamespace(context.Context, string) (*Response, error) - - ListTriggers(context.Context, string) ([]FunctionsTrigger, *Response, error) - GetTrigger(context.Context, string, string) (*FunctionsTrigger, *Response, error) - CreateTrigger(context.Context, string, *FunctionsTriggerCreateRequest) (*FunctionsTrigger, *Response, error) - UpdateTrigger(context.Context, string, string, *FunctionsTriggerUpdateRequest) (*FunctionsTrigger, *Response, error) - DeleteTrigger(context.Context, string, string) (*Response, error) -} - -type FunctionsServiceOp struct { - client *Client -} - -var _ FunctionsService = &FunctionsServiceOp{} - -type namespacesRoot struct { - Namespaces []FunctionsNamespace `json:"namespaces,omitempty"` -} - -type namespaceRoot struct { - Namespace *FunctionsNamespace `json:"namespace,omitempty"` -} - -type FunctionsNamespace struct { - ApiHost string `json:"api_host,omitempty"` - Namespace string `json:"namespace,omitempty"` - CreatedAt time.Time `json:"created_at,omitempty"` - UpdatedAt time.Time `json:"updated_at,omitempty"` - Label string `json:"label,omitempty"` - Region string `json:"region,omitempty"` - UUID string `json:"uuid,omitempty"` - Key string `json:"key,omitempty"` -} - -type FunctionsNamespaceCreateRequest struct { - Label string `json:"label"` - Region string `json:"region"` -} - -type triggersRoot struct { - Triggers []FunctionsTrigger `json:"triggers,omitempty"` -} - -type triggerRoot struct { - Trigger *FunctionsTrigger `json:"trigger,omitempty"` -} - -type FunctionsTrigger struct { - Namespace string `json:"namespace,omitempty"` - Function string `json:"function,omitempty"` - Type string `json:"type,omitempty"` - Name string `json:"name,omitempty"` - IsEnabled bool `json:"is_enabled"` - CreatedAt time.Time `json:"created_at,omitempty"` - UpdatedAt time.Time `json:"updated_at,omitempty"` - ScheduledDetails *TriggerScheduledDetails `json:"scheduled_details,omitempty"` - ScheduledRuns *TriggerScheduledRuns `json:"scheduled_runs,omitempty"` -} - -type TriggerScheduledDetails struct { - Cron string `json:"cron,omitempty"` - Body map[string]interface{} `json:"body,omitempty"` -} - -type TriggerScheduledRuns struct { - LastRunAt time.Time `json:"last_run_at,omitempty"` - NextRunAt time.Time `json:"next_run_at,omitempty"` -} - -type FunctionsTriggerCreateRequest struct { - Name string `json:"name"` - Type string `json:"type"` - Function string `json:"function"` - IsEnabled bool `json:"is_enabled"` - ScheduledDetails *TriggerScheduledDetails `json:"scheduled_details,omitempty"` -} - -type FunctionsTriggerUpdateRequest struct { - IsEnabled *bool `json:"is_enabled,omitempty"` - ScheduledDetails *TriggerScheduledDetails `json:"scheduled_details,omitempty"` -} - -// Gets a list of namespaces -func (s *FunctionsServiceOp) ListNamespaces(ctx context.Context) ([]FunctionsNamespace, *Response, error) { - req, err := s.client.NewRequest(ctx, http.MethodGet, functionsBasePath, nil) - if err != nil { - return nil, nil, err - } - nsRoot := new(namespacesRoot) - resp, err := s.client.Do(ctx, req, nsRoot) - if err != nil { - return nil, resp, err - } - return nsRoot.Namespaces, resp, nil -} - -// Gets a single namespace -func (s *FunctionsServiceOp) GetNamespace(ctx context.Context, namespace string) (*FunctionsNamespace, *Response, error) { - path := fmt.Sprintf(functionsNamespacePath, namespace) - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - nsRoot := new(namespaceRoot) - resp, err := s.client.Do(ctx, req, nsRoot) - if err != nil { - return nil, resp, err - } - return nsRoot.Namespace, resp, nil -} - -// Creates a namespace -func (s *FunctionsServiceOp) CreateNamespace(ctx context.Context, opts *FunctionsNamespaceCreateRequest) (*FunctionsNamespace, *Response, error) { - req, err := s.client.NewRequest(ctx, http.MethodPost, functionsBasePath, opts) - if err != nil { - return nil, nil, err - } - nsRoot := new(namespaceRoot) - resp, err := s.client.Do(ctx, req, nsRoot) - if err != nil { - return nil, resp, err - } - return nsRoot.Namespace, resp, nil -} - -// Delete a namespace -func (s *FunctionsServiceOp) DeleteNamespace(ctx context.Context, namespace string) (*Response, error) { - path := fmt.Sprintf(functionsNamespacePath, namespace) - - req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - return resp, nil -} - -// ListTriggers gets a list of triggers -func (s *FunctionsServiceOp) ListTriggers(ctx context.Context, namespace string) ([]FunctionsTrigger, *Response, error) { - path := fmt.Sprintf(functionsTriggerBasePath, namespace) - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(triggersRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Triggers, resp, nil -} - -// GetTrigger gets a single trigger -func (s *FunctionsServiceOp) GetTrigger(ctx context.Context, namespace string, trigger string) (*FunctionsTrigger, *Response, error) { - path := fmt.Sprintf(functionsTriggerBasePath+"/%s", namespace, trigger) - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(triggerRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Trigger, resp, nil -} - -// CreateTrigger creates a trigger -func (s *FunctionsServiceOp) CreateTrigger(ctx context.Context, namespace string, opts *FunctionsTriggerCreateRequest) (*FunctionsTrigger, *Response, error) { - path := fmt.Sprintf(functionsTriggerBasePath, namespace) - req, err := s.client.NewRequest(ctx, http.MethodPost, path, opts) - if err != nil { - return nil, nil, err - } - root := new(triggerRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Trigger, resp, nil -} - -// UpdateTrigger updates a trigger -func (s *FunctionsServiceOp) UpdateTrigger(ctx context.Context, namespace string, trigger string, opts *FunctionsTriggerUpdateRequest) (*FunctionsTrigger, *Response, error) { - path := fmt.Sprintf(functionsTriggerBasePath+"/%s", namespace, trigger) - req, err := s.client.NewRequest(ctx, http.MethodPut, path, opts) - - if err != nil { - return nil, nil, err - } - root := new(triggerRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Trigger, resp, nil -} - -// DeleteTrigger deletes a trigger -func (s *FunctionsServiceOp) DeleteTrigger(ctx context.Context, namespace string, trigger string) (*Response, error) { - path := fmt.Sprintf(functionsTriggerBasePath+"/%s", namespace, trigger) - req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil) - - if err != nil { - return nil, err - } - resp, err := s.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - return resp, nil -} diff --git a/vendor/github.com/digitalocean/godo/godo.go b/vendor/github.com/digitalocean/godo/godo.go deleted file mode 100644 index 2ba06e5..0000000 --- a/vendor/github.com/digitalocean/godo/godo.go +++ /dev/null @@ -1,674 +0,0 @@ -package godo - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "reflect" - "strconv" - "strings" - "sync" - "time" - - "github.com/google/go-querystring/query" - "github.com/hashicorp/go-retryablehttp" - "golang.org/x/oauth2" - "golang.org/x/time/rate" -) - -const ( - libraryVersion = "1.119.0" - defaultBaseURL = "https://api.digitalocean.com/" - userAgent = "godo/" + libraryVersion - mediaType = "application/json" - - headerRateLimit = "RateLimit-Limit" - headerRateRemaining = "RateLimit-Remaining" - headerRateReset = "RateLimit-Reset" - headerRequestID = "x-request-id" - internalHeaderRetryAttempts = "X-Godo-Retry-Attempts" - - defaultRetryMax = 4 - defaultRetryWaitMax = 30 - defaultRetryWaitMin = 1 -) - -// Client manages communication with DigitalOcean V2 API. -type Client struct { - // HTTP client used to communicate with the DO API. - HTTPClient *http.Client - - // Base URL for API requests. - BaseURL *url.URL - - // User agent for client - UserAgent string - - // Rate contains the current rate limit for the client as determined by the most recent - // API call. It is not thread-safe. Please consider using GetRate() instead. - Rate Rate - ratemtx sync.Mutex - - // Services used for communicating with the API - Account AccountService - Actions ActionsService - Apps AppsService - Balance BalanceService - BillingHistory BillingHistoryService - CDNs CDNService - Certificates CertificatesService - Databases DatabasesService - Domains DomainsService - Droplets DropletsService - DropletActions DropletActionsService - Firewalls FirewallsService - FloatingIPs FloatingIPsService - FloatingIPActions FloatingIPActionsService - Functions FunctionsService - Images ImagesService - ImageActions ImageActionsService - Invoices InvoicesService - Keys KeysService - Kubernetes KubernetesService - LoadBalancers LoadBalancersService - Monitoring MonitoringService - OneClick OneClickService - Projects ProjectsService - Regions RegionsService - Registry RegistryService - ReservedIPs ReservedIPsService - ReservedIPActions ReservedIPActionsService - Sizes SizesService - Snapshots SnapshotsService - Storage StorageService - StorageActions StorageActionsService - Tags TagsService - UptimeChecks UptimeChecksService - VPCs VPCsService - - // Optional function called after every successful request made to the DO APIs - onRequestCompleted RequestCompletionCallback - - // Optional extra HTTP headers to set on every request to the API. - headers map[string]string - - // Optional rate limiter to ensure QoS. - rateLimiter *rate.Limiter - - // Optional retry values. Setting the RetryConfig.RetryMax value enables automatically retrying requests - // that fail with 429 or 500-level response codes using the go-retryablehttp client - RetryConfig RetryConfig -} - -// RetryConfig sets the values used for enabling retries and backoffs for -// requests that fail with 429 or 500-level response codes using the go-retryablehttp client. -// RetryConfig.RetryMax must be configured to enable this behavior. RetryConfig.RetryWaitMin and -// RetryConfig.RetryWaitMax are optional, with the default values being 1.0 and 30.0, respectively. -// -// You can use -// -// godo.PtrTo(1.0) -// -// to explicitly set the RetryWaitMin and RetryWaitMax values. -// -// Note: Opting to use the go-retryablehttp client will overwrite any custom HTTP client passed into New(). -// Only the oauth2.TokenSource and Timeout will be maintained. -type RetryConfig struct { - RetryMax int - RetryWaitMin *float64 // Minimum time to wait - RetryWaitMax *float64 // Maximum time to wait - Logger interface{} // Customer logger instance. Must implement either go-retryablehttp.Logger or go-retryablehttp.LeveledLogger -} - -// RequestCompletionCallback defines the type of the request callback function -type RequestCompletionCallback func(*http.Request, *http.Response) - -// ListOptions specifies the optional parameters to various List methods that -// support pagination. -type ListOptions struct { - // For paginated result sets, page of results to retrieve. - Page int `url:"page,omitempty"` - - // For paginated result sets, the number of results to include per page. - PerPage int `url:"per_page,omitempty"` - - // Whether App responses should include project_id fields. The field will be empty if false or if omitted. (ListApps) - WithProjects bool `url:"with_projects,omitempty"` -} - -// TokenListOptions specifies the optional parameters to various List methods that support token pagination. -type TokenListOptions struct { - // For paginated result sets, page of results to retrieve. - Page int `url:"page,omitempty"` - - // For paginated result sets, the number of results to include per page. - PerPage int `url:"per_page,omitempty"` - - // For paginated result sets which support tokens, the token provided by the last set - // of results in order to retrieve the next set of results. This is expected to be faster - // than incrementing or decrementing the page number. - Token string `url:"page_token,omitempty"` -} - -// Response is a DigitalOcean response. This wraps the standard http.Response returned from DigitalOcean. -type Response struct { - *http.Response - - // Links that were returned with the response. These are parsed from - // request body and not the header. - Links *Links - - // Meta describes generic information about the response. - Meta *Meta - - // Monitoring URI - // Deprecated: This field is not populated. To poll for the status of a - // newly created Droplet, use Links.Actions[0].HREF - Monitor string - - Rate -} - -// An ErrorResponse reports the error caused by an API request -type ErrorResponse struct { - // HTTP response that caused this error - Response *http.Response - - // Error message - Message string `json:"message"` - - // RequestID returned from the API, useful to contact support. - RequestID string `json:"request_id"` - - // Attempts is the number of times the request was attempted when retries are enabled. - Attempts int -} - -// Rate contains the rate limit for the current client. -type Rate struct { - // The number of request per hour the client is currently limited to. - Limit int `json:"limit"` - - // The number of remaining requests the client can make this hour. - Remaining int `json:"remaining"` - - // The time at which the current rate limit will reset. - Reset Timestamp `json:"reset"` -} - -func addOptions(s string, opt interface{}) (string, error) { - v := reflect.ValueOf(opt) - - if v.Kind() == reflect.Ptr && v.IsNil() { - return s, nil - } - - origURL, err := url.Parse(s) - if err != nil { - return s, err - } - - origValues := origURL.Query() - - newValues, err := query.Values(opt) - if err != nil { - return s, err - } - - for k, v := range newValues { - origValues[k] = v - } - - origURL.RawQuery = origValues.Encode() - return origURL.String(), nil -} - -// NewFromToken returns a new DigitalOcean API client with the given API -// token. -func NewFromToken(token string) *Client { - cleanToken := strings.Trim(strings.TrimSpace(token), "'") - ctx := context.Background() - ts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: cleanToken}) - - oauthClient := oauth2.NewClient(ctx, ts) - client, err := New(oauthClient, WithRetryAndBackoffs( - RetryConfig{ - RetryMax: defaultRetryMax, - RetryWaitMin: PtrTo(float64(defaultRetryWaitMin)), - RetryWaitMax: PtrTo(float64(defaultRetryWaitMax)), - }, - )) - if err != nil { - panic(err) - } - - return client -} - -// NewClient returns a new DigitalOcean API client, using the given -// http.Client to perform all requests. -// -// Users who wish to pass their own http.Client should use this method. If -// you're in need of further customization, the godo.New method allows more -// options, such as setting a custom URL or a custom user agent string. -func NewClient(httpClient *http.Client) *Client { - if httpClient == nil { - httpClient = http.DefaultClient - } - - baseURL, _ := url.Parse(defaultBaseURL) - - c := &Client{HTTPClient: httpClient, BaseURL: baseURL, UserAgent: userAgent} - - c.Account = &AccountServiceOp{client: c} - c.Actions = &ActionsServiceOp{client: c} - c.Apps = &AppsServiceOp{client: c} - c.Balance = &BalanceServiceOp{client: c} - c.BillingHistory = &BillingHistoryServiceOp{client: c} - c.CDNs = &CDNServiceOp{client: c} - c.Certificates = &CertificatesServiceOp{client: c} - c.Databases = &DatabasesServiceOp{client: c} - c.Domains = &DomainsServiceOp{client: c} - c.Droplets = &DropletsServiceOp{client: c} - c.DropletActions = &DropletActionsServiceOp{client: c} - c.Firewalls = &FirewallsServiceOp{client: c} - c.FloatingIPs = &FloatingIPsServiceOp{client: c} - c.FloatingIPActions = &FloatingIPActionsServiceOp{client: c} - c.Functions = &FunctionsServiceOp{client: c} - c.Images = &ImagesServiceOp{client: c} - c.ImageActions = &ImageActionsServiceOp{client: c} - c.Invoices = &InvoicesServiceOp{client: c} - c.Keys = &KeysServiceOp{client: c} - c.Kubernetes = &KubernetesServiceOp{client: c} - c.LoadBalancers = &LoadBalancersServiceOp{client: c} - c.Monitoring = &MonitoringServiceOp{client: c} - c.OneClick = &OneClickServiceOp{client: c} - c.Projects = &ProjectsServiceOp{client: c} - c.Regions = &RegionsServiceOp{client: c} - c.Registry = &RegistryServiceOp{client: c} - c.ReservedIPs = &ReservedIPsServiceOp{client: c} - c.ReservedIPActions = &ReservedIPActionsServiceOp{client: c} - c.Sizes = &SizesServiceOp{client: c} - c.Snapshots = &SnapshotsServiceOp{client: c} - c.Storage = &StorageServiceOp{client: c} - c.StorageActions = &StorageActionsServiceOp{client: c} - c.Tags = &TagsServiceOp{client: c} - c.UptimeChecks = &UptimeChecksServiceOp{client: c} - c.VPCs = &VPCsServiceOp{client: c} - - c.headers = make(map[string]string) - - return c -} - -// ClientOpt are options for New. -type ClientOpt func(*Client) error - -// New returns a new DigitalOcean API client instance. -func New(httpClient *http.Client, opts ...ClientOpt) (*Client, error) { - c := NewClient(httpClient) - for _, opt := range opts { - if err := opt(c); err != nil { - return nil, err - } - } - - // if retryMax is set it will use the retryablehttp client. - if c.RetryConfig.RetryMax > 0 { - retryableClient := retryablehttp.NewClient() - retryableClient.RetryMax = c.RetryConfig.RetryMax - - if c.RetryConfig.RetryWaitMin != nil { - retryableClient.RetryWaitMin = time.Duration(*c.RetryConfig.RetryWaitMin * float64(time.Second)) - } - if c.RetryConfig.RetryWaitMax != nil { - retryableClient.RetryWaitMax = time.Duration(*c.RetryConfig.RetryWaitMax * float64(time.Second)) - } - - // By default this is nil and does not log. - retryableClient.Logger = c.RetryConfig.Logger - - // if timeout is set, it is maintained before overwriting client with StandardClient() - retryableClient.HTTPClient.Timeout = c.HTTPClient.Timeout - - // This custom ErrorHandler is required to provide errors that are consistent - // with a *godo.ErrorResponse and a non-nil *godo.Response while providing - // insight into retries using an internal header. - retryableClient.ErrorHandler = func(resp *http.Response, err error, numTries int) (*http.Response, error) { - if resp != nil { - resp.Header.Add(internalHeaderRetryAttempts, strconv.Itoa(numTries)) - - return resp, err - } - - return resp, err - } - - retryableClient.CheckRetry = func(ctx context.Context, resp *http.Response, err error) (bool, error) { - // In addition to the default retry policy, we also retry HTTP/2 INTERNAL_ERROR errors. - // See: https://github.com/golang/go/issues/51323 - if err != nil && strings.Contains(err.Error(), "INTERNAL_ERROR") && strings.Contains(reflect.TypeOf(err).String(), "http2") { - return true, nil - } - - return retryablehttp.DefaultRetryPolicy(ctx, resp, err) - } - - var source *oauth2.Transport - if _, ok := c.HTTPClient.Transport.(*oauth2.Transport); ok { - source = c.HTTPClient.Transport.(*oauth2.Transport) - } - c.HTTPClient = retryableClient.StandardClient() - c.HTTPClient.Transport = &oauth2.Transport{ - Base: c.HTTPClient.Transport, - Source: source.Source, - } - - } - - return c, nil -} - -// SetBaseURL is a client option for setting the base URL. -func SetBaseURL(bu string) ClientOpt { - return func(c *Client) error { - u, err := url.Parse(bu) - if err != nil { - return err - } - - c.BaseURL = u - return nil - } -} - -// SetUserAgent is a client option for setting the user agent. -func SetUserAgent(ua string) ClientOpt { - return func(c *Client) error { - c.UserAgent = fmt.Sprintf("%s %s", ua, c.UserAgent) - return nil - } -} - -// SetRequestHeaders sets optional HTTP headers on the client that are -// sent on each HTTP request. -func SetRequestHeaders(headers map[string]string) ClientOpt { - return func(c *Client) error { - for k, v := range headers { - c.headers[k] = v - } - return nil - } -} - -// SetStaticRateLimit sets an optional client-side rate limiter that restricts -// the number of queries per second that the client can send to enforce QoS. -func SetStaticRateLimit(rps float64) ClientOpt { - return func(c *Client) error { - c.rateLimiter = rate.NewLimiter(rate.Limit(rps), 1) - return nil - } -} - -// WithRetryAndBackoffs sets retry values. Setting the RetryConfig.RetryMax value enables automatically retrying requests -// that fail with 429 or 500-level response codes using the go-retryablehttp client -func WithRetryAndBackoffs(retryConfig RetryConfig) ClientOpt { - return func(c *Client) error { - c.RetryConfig.RetryMax = retryConfig.RetryMax - c.RetryConfig.RetryWaitMax = retryConfig.RetryWaitMax - c.RetryConfig.RetryWaitMin = retryConfig.RetryWaitMin - c.RetryConfig.Logger = retryConfig.Logger - return nil - } -} - -// NewRequest creates an API request. A relative URL can be provided in urlStr, which will be resolved to the -// BaseURL of the Client. Relative URLS should always be specified without a preceding slash. If specified, the -// value pointed to by body is JSON encoded and included in as the request body. -func (c *Client) NewRequest(ctx context.Context, method, urlStr string, body interface{}) (*http.Request, error) { - u, err := c.BaseURL.Parse(urlStr) - if err != nil { - return nil, err - } - - var req *http.Request - switch method { - case http.MethodGet, http.MethodHead, http.MethodOptions: - req, err = http.NewRequest(method, u.String(), nil) - if err != nil { - return nil, err - } - - default: - buf := new(bytes.Buffer) - if body != nil { - err = json.NewEncoder(buf).Encode(body) - if err != nil { - return nil, err - } - } - - req, err = http.NewRequest(method, u.String(), buf) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", mediaType) - } - - for k, v := range c.headers { - req.Header.Add(k, v) - } - - req.Header.Set("Accept", mediaType) - req.Header.Set("User-Agent", c.UserAgent) - - return req, nil -} - -// OnRequestCompleted sets the DO API request completion callback -func (c *Client) OnRequestCompleted(rc RequestCompletionCallback) { - c.onRequestCompleted = rc -} - -// GetRate returns the current rate limit for the client as determined by the most recent -// API call. It is thread-safe. -func (c *Client) GetRate() Rate { - c.ratemtx.Lock() - defer c.ratemtx.Unlock() - return c.Rate -} - -// newResponse creates a new Response for the provided http.Response -func newResponse(r *http.Response) *Response { - response := Response{Response: r} - response.populateRate() - - return &response -} - -// populateRate parses the rate related headers and populates the response Rate. -func (r *Response) populateRate() { - if limit := r.Header.Get(headerRateLimit); limit != "" { - r.Rate.Limit, _ = strconv.Atoi(limit) - } - if remaining := r.Header.Get(headerRateRemaining); remaining != "" { - r.Rate.Remaining, _ = strconv.Atoi(remaining) - } - if reset := r.Header.Get(headerRateReset); reset != "" { - if v, _ := strconv.ParseInt(reset, 10, 64); v != 0 { - r.Rate.Reset = Timestamp{time.Unix(v, 0)} - } - } -} - -// Do sends an API request and returns the API response. The API response is JSON decoded and stored in the value -// pointed to by v, or returned as an error if an API error has occurred. If v implements the io.Writer interface, -// the raw response will be written to v, without attempting to decode it. -func (c *Client) Do(ctx context.Context, req *http.Request, v interface{}) (*Response, error) { - if c.rateLimiter != nil { - err := c.rateLimiter.Wait(ctx) - if err != nil { - return nil, err - } - } - - resp, err := DoRequestWithClient(ctx, c.HTTPClient, req) - if err != nil { - return nil, err - } - if c.onRequestCompleted != nil { - c.onRequestCompleted(req, resp) - } - - defer func() { - // Ensure the response body is fully read and closed - // before we reconnect, so that we reuse the same TCPConnection. - // Close the previous response's body. But read at least some of - // the body so if it's small the underlying TCP connection will be - // re-used. No need to check for errors: if it fails, the Transport - // won't reuse it anyway. - const maxBodySlurpSize = 2 << 10 - if resp.ContentLength == -1 || resp.ContentLength <= maxBodySlurpSize { - io.CopyN(io.Discard, resp.Body, maxBodySlurpSize) - } - - if rerr := resp.Body.Close(); err == nil { - err = rerr - } - }() - - response := newResponse(resp) - c.ratemtx.Lock() - c.Rate = response.Rate - c.ratemtx.Unlock() - - err = CheckResponse(resp) - if err != nil { - return response, err - } - - if resp.StatusCode != http.StatusNoContent && v != nil { - if w, ok := v.(io.Writer); ok { - _, err = io.Copy(w, resp.Body) - if err != nil { - return nil, err - } - } else { - err = json.NewDecoder(resp.Body).Decode(v) - if err != nil { - return nil, err - } - } - } - - return response, err -} - -// DoRequest submits an HTTP request. -func DoRequest(ctx context.Context, req *http.Request) (*http.Response, error) { - return DoRequestWithClient(ctx, http.DefaultClient, req) -} - -// DoRequestWithClient submits an HTTP request using the specified client. -func DoRequestWithClient( - ctx context.Context, - client *http.Client, - req *http.Request) (*http.Response, error) { - req = req.WithContext(ctx) - return client.Do(req) -} - -func (r *ErrorResponse) Error() string { - var attempted string - if r.Attempts > 0 { - attempted = fmt.Sprintf("; giving up after %d attempt(s)", r.Attempts) - } - - if r.RequestID != "" { - return fmt.Sprintf("%v %v: %d (request %q) %v%s", - r.Response.Request.Method, r.Response.Request.URL, r.Response.StatusCode, r.RequestID, r.Message, attempted) - } - return fmt.Sprintf("%v %v: %d %v%s", - r.Response.Request.Method, r.Response.Request.URL, r.Response.StatusCode, r.Message, attempted) -} - -// CheckResponse checks the API response for errors, and returns them if present. A response is considered an -// error if it has a status code outside the 200 range. API error responses are expected to have either no response -// body, or a JSON response body that maps to ErrorResponse. Any other response body will be silently ignored. -// If the API error response does not include the request ID in its body, the one from its header will be used. -func CheckResponse(r *http.Response) error { - if c := r.StatusCode; c >= 200 && c <= 299 { - return nil - } - - errorResponse := &ErrorResponse{Response: r} - data, err := io.ReadAll(r.Body) - if err == nil && len(data) > 0 { - err := json.Unmarshal(data, errorResponse) - if err != nil { - errorResponse.Message = string(data) - } - } - - if errorResponse.RequestID == "" { - errorResponse.RequestID = r.Header.Get(headerRequestID) - } - - attempts, strconvErr := strconv.Atoi(r.Header.Get(internalHeaderRetryAttempts)) - if strconvErr == nil { - errorResponse.Attempts = attempts - } - - return errorResponse -} - -func (r Rate) String() string { - return Stringify(r) -} - -// PtrTo returns a pointer to the provided input. -func PtrTo[T any](v T) *T { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -// -// Deprecated: Use PtrTo instead. -func String(v string) *string { - p := new(string) - *p = v - return p -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -// -// Deprecated: Use PtrTo instead. -func Int(v int) *int { - p := new(int) - *p = v - return p -} - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -// -// Deprecated: Use PtrTo instead. -func Bool(v bool) *bool { - p := new(bool) - *p = v - return p -} - -// StreamToString converts a reader to a string -func StreamToString(stream io.Reader) string { - buf := new(bytes.Buffer) - _, _ = buf.ReadFrom(stream) - return buf.String() -} diff --git a/vendor/github.com/digitalocean/godo/image_actions.go b/vendor/github.com/digitalocean/godo/image_actions.go deleted file mode 100644 index 2ee508c..0000000 --- a/vendor/github.com/digitalocean/godo/image_actions.go +++ /dev/null @@ -1,117 +0,0 @@ -package godo - -import ( - "context" - "fmt" - "net/http" - "net/url" -) - -// ImageActionsService is an interface for interfacing with the image actions -// endpoints of the DigitalOcean API -// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Image-Actions -type ImageActionsService interface { - Get(context.Context, int, int) (*Action, *Response, error) - GetByURI(context.Context, string) (*Action, *Response, error) - Transfer(context.Context, int, *ActionRequest) (*Action, *Response, error) - Convert(context.Context, int) (*Action, *Response, error) -} - -// ImageActionsServiceOp handles communication with the image action related methods of the -// DigitalOcean API. -type ImageActionsServiceOp struct { - client *Client -} - -var _ ImageActionsService = &ImageActionsServiceOp{} - -// Transfer an image -func (i *ImageActionsServiceOp) Transfer(ctx context.Context, imageID int, transferRequest *ActionRequest) (*Action, *Response, error) { - if imageID < 1 { - return nil, nil, NewArgError("imageID", "cannot be less than 1") - } - - if transferRequest == nil { - return nil, nil, NewArgError("transferRequest", "cannot be nil") - } - - path := fmt.Sprintf("v2/images/%d/actions", imageID) - - req, err := i.client.NewRequest(ctx, http.MethodPost, path, transferRequest) - if err != nil { - return nil, nil, err - } - - root := new(actionRoot) - resp, err := i.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Event, resp, err -} - -// Convert an image to a snapshot -func (i *ImageActionsServiceOp) Convert(ctx context.Context, imageID int) (*Action, *Response, error) { - if imageID < 1 { - return nil, nil, NewArgError("imageID", "cannont be less than 1") - } - - path := fmt.Sprintf("v2/images/%d/actions", imageID) - - convertRequest := &ActionRequest{ - "type": "convert", - } - - req, err := i.client.NewRequest(ctx, http.MethodPost, path, convertRequest) - if err != nil { - return nil, nil, err - } - - root := new(actionRoot) - resp, err := i.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Event, resp, err -} - -// Get an action for a particular image by id. -func (i *ImageActionsServiceOp) Get(ctx context.Context, imageID, actionID int) (*Action, *Response, error) { - if imageID < 1 { - return nil, nil, NewArgError("imageID", "cannot be less than 1") - } - - if actionID < 1 { - return nil, nil, NewArgError("actionID", "cannot be less than 1") - } - - path := fmt.Sprintf("v2/images/%d/actions/%d", imageID, actionID) - return i.get(ctx, path) -} - -// GetByURI gets an action for a particular image by URI. -func (i *ImageActionsServiceOp) GetByURI(ctx context.Context, rawurl string) (*Action, *Response, error) { - u, err := url.Parse(rawurl) - if err != nil { - return nil, nil, err - } - - return i.get(ctx, u.Path) -} - -func (i *ImageActionsServiceOp) get(ctx context.Context, path string) (*Action, *Response, error) { - req, err := i.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(actionRoot) - resp, err := i.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Event, resp, err -} diff --git a/vendor/github.com/digitalocean/godo/images.go b/vendor/github.com/digitalocean/godo/images.go deleted file mode 100644 index 5db3747..0000000 --- a/vendor/github.com/digitalocean/godo/images.go +++ /dev/null @@ -1,248 +0,0 @@ -package godo - -import ( - "context" - "fmt" - "net/http" -) - -const imageBasePath = "v2/images" - -// ImagesService is an interface for interfacing with the images -// endpoints of the DigitalOcean API -// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Images -type ImagesService interface { - List(context.Context, *ListOptions) ([]Image, *Response, error) - ListDistribution(ctx context.Context, opt *ListOptions) ([]Image, *Response, error) - ListApplication(ctx context.Context, opt *ListOptions) ([]Image, *Response, error) - ListUser(ctx context.Context, opt *ListOptions) ([]Image, *Response, error) - ListByTag(ctx context.Context, tag string, opt *ListOptions) ([]Image, *Response, error) - GetByID(context.Context, int) (*Image, *Response, error) - GetBySlug(context.Context, string) (*Image, *Response, error) - Create(context.Context, *CustomImageCreateRequest) (*Image, *Response, error) - Update(context.Context, int, *ImageUpdateRequest) (*Image, *Response, error) - Delete(context.Context, int) (*Response, error) -} - -// ImagesServiceOp handles communication with the image related methods of the -// DigitalOcean API. -type ImagesServiceOp struct { - client *Client -} - -var _ ImagesService = &ImagesServiceOp{} - -// Image represents a DigitalOcean Image -type Image struct { - ID int `json:"id,float64,omitempty"` - Name string `json:"name,omitempty"` - Type string `json:"type,omitempty"` - Distribution string `json:"distribution,omitempty"` - Slug string `json:"slug,omitempty"` - Public bool `json:"public,omitempty"` - Regions []string `json:"regions,omitempty"` - MinDiskSize int `json:"min_disk_size,omitempty"` - SizeGigaBytes float64 `json:"size_gigabytes,omitempty"` - Created string `json:"created_at,omitempty"` - Description string `json:"description,omitempty"` - Tags []string `json:"tags,omitempty"` - Status string `json:"status,omitempty"` - ErrorMessage string `json:"error_message,omitempty"` -} - -// ImageUpdateRequest represents a request to update an image. -type ImageUpdateRequest struct { - Name string `json:"name,omitempty"` - Distribution string `json:"distribution,omitempty"` - Description string `json:"description,omitempty"` -} - -// CustomImageCreateRequest represents a request to create a custom image. -type CustomImageCreateRequest struct { - Name string `json:"name"` - Url string `json:"url"` - Region string `json:"region"` - Distribution string `json:"distribution,omitempty"` - Description string `json:"description,omitempty"` - Tags []string `json:"tags,omitempty"` -} - -type imageRoot struct { - Image *Image -} - -type imagesRoot struct { - Images []Image - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -type listImageOptions struct { - Private bool `url:"private,omitempty"` - Type string `url:"type,omitempty"` - Tag string `url:"tag_name,omitempty"` -} - -func (i Image) String() string { - return Stringify(i) -} - -// List lists all the images available. -func (s *ImagesServiceOp) List(ctx context.Context, opt *ListOptions) ([]Image, *Response, error) { - return s.list(ctx, opt, nil) -} - -// ListDistribution lists all the distribution images. -func (s *ImagesServiceOp) ListDistribution(ctx context.Context, opt *ListOptions) ([]Image, *Response, error) { - listOpt := listImageOptions{Type: "distribution"} - return s.list(ctx, opt, &listOpt) -} - -// ListApplication lists all the application images. -func (s *ImagesServiceOp) ListApplication(ctx context.Context, opt *ListOptions) ([]Image, *Response, error) { - listOpt := listImageOptions{Type: "application"} - return s.list(ctx, opt, &listOpt) -} - -// ListUser lists all the user images. -func (s *ImagesServiceOp) ListUser(ctx context.Context, opt *ListOptions) ([]Image, *Response, error) { - listOpt := listImageOptions{Private: true} - return s.list(ctx, opt, &listOpt) -} - -// ListByTag lists all images with a specific tag applied. -func (s *ImagesServiceOp) ListByTag(ctx context.Context, tag string, opt *ListOptions) ([]Image, *Response, error) { - listOpt := listImageOptions{Tag: tag} - return s.list(ctx, opt, &listOpt) -} - -// GetByID retrieves an image by id. -func (s *ImagesServiceOp) GetByID(ctx context.Context, imageID int) (*Image, *Response, error) { - if imageID < 1 { - return nil, nil, NewArgError("imageID", "cannot be less than 1") - } - - return s.get(ctx, interface{}(imageID)) -} - -// GetBySlug retrieves an image by slug. -func (s *ImagesServiceOp) GetBySlug(ctx context.Context, slug string) (*Image, *Response, error) { - if len(slug) < 1 { - return nil, nil, NewArgError("slug", "cannot be blank") - } - - return s.get(ctx, interface{}(slug)) -} - -// Create a new image -func (s *ImagesServiceOp) Create(ctx context.Context, createRequest *CustomImageCreateRequest) (*Image, *Response, error) { - if createRequest == nil { - return nil, nil, NewArgError("createRequest", "cannot be nil") - } - - req, err := s.client.NewRequest(ctx, http.MethodPost, imageBasePath, createRequest) - if err != nil { - return nil, nil, err - } - - root := new(imageRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Image, resp, err -} - -// Update an image name. -func (s *ImagesServiceOp) Update(ctx context.Context, imageID int, updateRequest *ImageUpdateRequest) (*Image, *Response, error) { - if imageID < 1 { - return nil, nil, NewArgError("imageID", "cannot be less than 1") - } - - if updateRequest == nil { - return nil, nil, NewArgError("updateRequest", "cannot be nil") - } - - path := fmt.Sprintf("%s/%d", imageBasePath, imageID) - req, err := s.client.NewRequest(ctx, http.MethodPut, path, updateRequest) - if err != nil { - return nil, nil, err - } - - root := new(imageRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Image, resp, err -} - -// Delete an image. -func (s *ImagesServiceOp) Delete(ctx context.Context, imageID int) (*Response, error) { - if imageID < 1 { - return nil, NewArgError("imageID", "cannot be less than 1") - } - - path := fmt.Sprintf("%s/%d", imageBasePath, imageID) - - req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - - return resp, err -} - -// Helper method for getting an individual image -func (s *ImagesServiceOp) get(ctx context.Context, ID interface{}) (*Image, *Response, error) { - path := fmt.Sprintf("%s/%v", imageBasePath, ID) - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(imageRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Image, resp, err -} - -// Helper method for listing images -func (s *ImagesServiceOp) list(ctx context.Context, opt *ListOptions, listOpt *listImageOptions) ([]Image, *Response, error) { - path := imageBasePath - path, err := addOptions(path, opt) - if err != nil { - return nil, nil, err - } - path, err = addOptions(path, listOpt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(imagesRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.Images, resp, err -} diff --git a/vendor/github.com/digitalocean/godo/invoices.go b/vendor/github.com/digitalocean/godo/invoices.go deleted file mode 100644 index 39bffbc..0000000 --- a/vendor/github.com/digitalocean/godo/invoices.go +++ /dev/null @@ -1,226 +0,0 @@ -package godo - -import ( - "bytes" - "context" - "fmt" - "net/http" - "time" -) - -const invoicesBasePath = "v2/customers/my/invoices" - -// InvoicesService is an interface for interfacing with the Invoice -// endpoints of the DigitalOcean API -// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Billing -type InvoicesService interface { - Get(context.Context, string, *ListOptions) (*Invoice, *Response, error) - GetPDF(context.Context, string) ([]byte, *Response, error) - GetCSV(context.Context, string) ([]byte, *Response, error) - List(context.Context, *ListOptions) (*InvoiceList, *Response, error) - GetSummary(context.Context, string) (*InvoiceSummary, *Response, error) -} - -// InvoicesServiceOp handles communication with the Invoice related methods of -// the DigitalOcean API. -type InvoicesServiceOp struct { - client *Client -} - -var _ InvoicesService = &InvoicesServiceOp{} - -// Invoice represents a DigitalOcean Invoice -type Invoice struct { - InvoiceItems []InvoiceItem `json:"invoice_items"` - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -// InvoiceItem represents a line-item on a DigitalOcean Invoice -type InvoiceItem struct { - Product string `json:"product"` - ResourceID string `json:"resource_id"` - ResourceUUID string `json:"resource_uuid"` - GroupDescription string `json:"group_description"` - Description string `json:"description"` - Amount string `json:"amount"` - Duration string `json:"duration"` - DurationUnit string `json:"duration_unit"` - StartTime time.Time `json:"start_time"` - EndTime time.Time `json:"end_time"` - ProjectName string `json:"project_name"` - Category string `json:"category"` -} - -// InvoiceList contains a paginated list of all of a customer's invoices. -// The InvoicePreview is the month-to-date usage generated by DigitalOcean. -type InvoiceList struct { - Invoices []InvoiceListItem `json:"invoices"` - InvoicePreview InvoiceListItem `json:"invoice_preview"` - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -// InvoiceListItem contains a small list of information about a customer's invoice. -// More information can be found in the Invoice or InvoiceSummary -type InvoiceListItem struct { - InvoiceUUID string `json:"invoice_uuid"` - Amount string `json:"amount"` - InvoicePeriod string `json:"invoice_period"` - UpdatedAt time.Time `json:"updated_at"` -} - -// InvoiceSummary contains metadata and summarized usage for an invoice generated by DigitalOcean -type InvoiceSummary struct { - InvoiceUUID string `json:"invoice_uuid"` - BillingPeriod string `json:"billing_period"` - Amount string `json:"amount"` - UserName string `json:"user_name"` - UserBillingAddress Address `json:"user_billing_address"` - UserCompany string `json:"user_company"` - UserEmail string `json:"user_email"` - ProductCharges InvoiceSummaryBreakdown `json:"product_charges"` - Overages InvoiceSummaryBreakdown `json:"overages"` - Taxes InvoiceSummaryBreakdown `json:"taxes"` - CreditsAndAdjustments InvoiceSummaryBreakdown `json:"credits_and_adjustments"` -} - -// Address represents the billing address of a customer -type Address struct { - AddressLine1 string `json:"address_line1"` - AddressLine2 string `json:"address_line2"` - City string `json:"city"` - Region string `json:"region"` - PostalCode string `json:"postal_code"` - CountryISO2Code string `json:"country_iso2_code"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` -} - -// InvoiceSummaryBreakdown is a grouped set of InvoiceItems from an invoice -type InvoiceSummaryBreakdown struct { - Name string `json:"name"` - Amount string `json:"amount"` - Items []InvoiceSummaryBreakdownItem `json:"items"` -} - -// InvoiceSummaryBreakdownItem further breaks down the InvoiceSummary by product -type InvoiceSummaryBreakdownItem struct { - Name string `json:"name"` - Amount string `json:"amount"` - Count string `json:"count"` -} - -func (i Invoice) String() string { - return Stringify(i) -} - -// Get detailed invoice items for an Invoice -func (s *InvoicesServiceOp) Get(ctx context.Context, invoiceUUID string, opt *ListOptions) (*Invoice, *Response, error) { - path := fmt.Sprintf("%s/%s", invoicesBasePath, invoiceUUID) - path, err := addOptions(path, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(Invoice) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root, resp, err -} - -// List invoices for a customer -func (s *InvoicesServiceOp) List(ctx context.Context, opt *ListOptions) (*InvoiceList, *Response, error) { - path := invoicesBasePath - path, err := addOptions(path, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(InvoiceList) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root, resp, err -} - -// GetSummary returns a summary of metadata and summarized usage for an Invoice -func (s *InvoicesServiceOp) GetSummary(ctx context.Context, invoiceUUID string) (*InvoiceSummary, *Response, error) { - path := fmt.Sprintf("%s/%s/summary", invoicesBasePath, invoiceUUID) - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(InvoiceSummary) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root, resp, err -} - -// GetPDF returns the pdf for an Invoice -func (s *InvoicesServiceOp) GetPDF(ctx context.Context, invoiceUUID string) ([]byte, *Response, error) { - path := fmt.Sprintf("%s/%s/pdf", invoicesBasePath, invoiceUUID) - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - var root bytes.Buffer - resp, err := s.client.Do(ctx, req, &root) - if err != nil { - return nil, resp, err - } - - return root.Bytes(), resp, err -} - -// GetCSV returns the csv for an Invoice -func (s *InvoicesServiceOp) GetCSV(ctx context.Context, invoiceUUID string) ([]byte, *Response, error) { - path := fmt.Sprintf("%s/%s/csv", invoicesBasePath, invoiceUUID) - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - var root bytes.Buffer - resp, err := s.client.Do(ctx, req, &root) - if err != nil { - return nil, resp, err - } - - return root.Bytes(), resp, err -} diff --git a/vendor/github.com/digitalocean/godo/keys.go b/vendor/github.com/digitalocean/godo/keys.go deleted file mode 100644 index cd0bd29..0000000 --- a/vendor/github.com/digitalocean/godo/keys.go +++ /dev/null @@ -1,230 +0,0 @@ -package godo - -import ( - "context" - "fmt" - "net/http" -) - -const keysBasePath = "v2/account/keys" - -// KeysService is an interface for interfacing with the SSH keys -// endpoints of the DigitalOcean API -// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/SSH-Keys -type KeysService interface { - List(context.Context, *ListOptions) ([]Key, *Response, error) - GetByID(context.Context, int) (*Key, *Response, error) - GetByFingerprint(context.Context, string) (*Key, *Response, error) - Create(context.Context, *KeyCreateRequest) (*Key, *Response, error) - UpdateByID(context.Context, int, *KeyUpdateRequest) (*Key, *Response, error) - UpdateByFingerprint(context.Context, string, *KeyUpdateRequest) (*Key, *Response, error) - DeleteByID(context.Context, int) (*Response, error) - DeleteByFingerprint(context.Context, string) (*Response, error) -} - -// KeysServiceOp handles communication with SSH key related method of the -// DigitalOcean API. -type KeysServiceOp struct { - client *Client -} - -var _ KeysService = &KeysServiceOp{} - -// Key represents a DigitalOcean Key. -type Key struct { - ID int `json:"id,float64,omitempty"` - Name string `json:"name,omitempty"` - Fingerprint string `json:"fingerprint,omitempty"` - PublicKey string `json:"public_key,omitempty"` -} - -// KeyUpdateRequest represents a request to update an SSH key stored in a DigitalOcean account. -type KeyUpdateRequest struct { - Name string `json:"name"` -} - -type keysRoot struct { - SSHKeys []Key `json:"ssh_keys"` - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -type keyRoot struct { - SSHKey *Key `json:"ssh_key"` -} - -func (s Key) String() string { - return Stringify(s) -} - -// KeyCreateRequest represents a request to create a new SSH key. -type KeyCreateRequest struct { - Name string `json:"name"` - PublicKey string `json:"public_key"` -} - -// List all SSH keys -func (s *KeysServiceOp) List(ctx context.Context, opt *ListOptions) ([]Key, *Response, error) { - path := keysBasePath - path, err := addOptions(path, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(keysRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.SSHKeys, resp, err -} - -// Performs a get given a path -func (s *KeysServiceOp) get(ctx context.Context, path string) (*Key, *Response, error) { - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(keyRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.SSHKey, resp, err -} - -// GetByID gets an SSH key by its ID -func (s *KeysServiceOp) GetByID(ctx context.Context, keyID int) (*Key, *Response, error) { - if keyID < 1 { - return nil, nil, NewArgError("keyID", "cannot be less than 1") - } - - path := fmt.Sprintf("%s/%d", keysBasePath, keyID) - return s.get(ctx, path) -} - -// GetByFingerprint gets an SSH key by its fingerprint -func (s *KeysServiceOp) GetByFingerprint(ctx context.Context, fingerprint string) (*Key, *Response, error) { - if len(fingerprint) < 1 { - return nil, nil, NewArgError("fingerprint", "cannot not be empty") - } - - path := fmt.Sprintf("%s/%s", keysBasePath, fingerprint) - return s.get(ctx, path) -} - -// Create an SSH key using a KeyCreateRequest -func (s *KeysServiceOp) Create(ctx context.Context, createRequest *KeyCreateRequest) (*Key, *Response, error) { - if createRequest == nil { - return nil, nil, NewArgError("createRequest", "cannot be nil") - } - - req, err := s.client.NewRequest(ctx, http.MethodPost, keysBasePath, createRequest) - if err != nil { - return nil, nil, err - } - - root := new(keyRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.SSHKey, resp, err -} - -// UpdateByID updates an SSH key name by ID. -func (s *KeysServiceOp) UpdateByID(ctx context.Context, keyID int, updateRequest *KeyUpdateRequest) (*Key, *Response, error) { - if keyID < 1 { - return nil, nil, NewArgError("keyID", "cannot be less than 1") - } - - if updateRequest == nil { - return nil, nil, NewArgError("updateRequest", "cannot be nil") - } - - path := fmt.Sprintf("%s/%d", keysBasePath, keyID) - req, err := s.client.NewRequest(ctx, "PUT", path, updateRequest) - if err != nil { - return nil, nil, err - } - - root := new(keyRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.SSHKey, resp, err -} - -// UpdateByFingerprint updates an SSH key name by fingerprint. -func (s *KeysServiceOp) UpdateByFingerprint(ctx context.Context, fingerprint string, updateRequest *KeyUpdateRequest) (*Key, *Response, error) { - if len(fingerprint) < 1 { - return nil, nil, NewArgError("fingerprint", "cannot be empty") - } - - if updateRequest == nil { - return nil, nil, NewArgError("updateRequest", "cannot be nil") - } - - path := fmt.Sprintf("%s/%s", keysBasePath, fingerprint) - req, err := s.client.NewRequest(ctx, "PUT", path, updateRequest) - if err != nil { - return nil, nil, err - } - - root := new(keyRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.SSHKey, resp, err -} - -// Delete an SSH key using a path -func (s *KeysServiceOp) delete(ctx context.Context, path string) (*Response, error) { - req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - - return resp, err -} - -// DeleteByID deletes an SSH key by its id -func (s *KeysServiceOp) DeleteByID(ctx context.Context, keyID int) (*Response, error) { - if keyID < 1 { - return nil, NewArgError("keyID", "cannot be less than 1") - } - - path := fmt.Sprintf("%s/%d", keysBasePath, keyID) - return s.delete(ctx, path) -} - -// DeleteByFingerprint deletes an SSH key by its fingerprint -func (s *KeysServiceOp) DeleteByFingerprint(ctx context.Context, fingerprint string) (*Response, error) { - if len(fingerprint) < 1 { - return nil, NewArgError("fingerprint", "cannot be empty") - } - - path := fmt.Sprintf("%s/%s", keysBasePath, fingerprint) - return s.delete(ctx, path) -} diff --git a/vendor/github.com/digitalocean/godo/kubernetes.go b/vendor/github.com/digitalocean/godo/kubernetes.go deleted file mode 100644 index 8ef9d24..0000000 --- a/vendor/github.com/digitalocean/godo/kubernetes.go +++ /dev/null @@ -1,980 +0,0 @@ -package godo - -import ( - "bytes" - "context" - "encoding" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strconv" - "strings" - "time" -) - -const ( - kubernetesBasePath = "/v2/kubernetes" - kubernetesClustersPath = kubernetesBasePath + "/clusters" - kubernetesOptionsPath = kubernetesBasePath + "/options" -) - -// KubernetesService is an interface for interfacing with the Kubernetes endpoints -// of the DigitalOcean API. -// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Kubernetes -type KubernetesService interface { - Create(context.Context, *KubernetesClusterCreateRequest) (*KubernetesCluster, *Response, error) - Get(context.Context, string) (*KubernetesCluster, *Response, error) - GetUser(context.Context, string) (*KubernetesClusterUser, *Response, error) - GetUpgrades(context.Context, string) ([]*KubernetesVersion, *Response, error) - GetKubeConfig(context.Context, string) (*KubernetesClusterConfig, *Response, error) - GetKubeConfigWithExpiry(context.Context, string, int64) (*KubernetesClusterConfig, *Response, error) - GetCredentials(context.Context, string, *KubernetesClusterCredentialsGetRequest) (*KubernetesClusterCredentials, *Response, error) - List(context.Context, *ListOptions) ([]*KubernetesCluster, *Response, error) - Update(context.Context, string, *KubernetesClusterUpdateRequest) (*KubernetesCluster, *Response, error) - Upgrade(context.Context, string, *KubernetesClusterUpgradeRequest) (*Response, error) - Delete(context.Context, string) (*Response, error) - DeleteSelective(context.Context, string, *KubernetesClusterDeleteSelectiveRequest) (*Response, error) - DeleteDangerous(context.Context, string) (*Response, error) - ListAssociatedResourcesForDeletion(context.Context, string) (*KubernetesAssociatedResources, *Response, error) - - CreateNodePool(ctx context.Context, clusterID string, req *KubernetesNodePoolCreateRequest) (*KubernetesNodePool, *Response, error) - GetNodePool(ctx context.Context, clusterID, poolID string) (*KubernetesNodePool, *Response, error) - ListNodePools(ctx context.Context, clusterID string, opts *ListOptions) ([]*KubernetesNodePool, *Response, error) - UpdateNodePool(ctx context.Context, clusterID, poolID string, req *KubernetesNodePoolUpdateRequest) (*KubernetesNodePool, *Response, error) - // RecycleNodePoolNodes is DEPRECATED please use DeleteNode - // The method will be removed in godo 2.0. - RecycleNodePoolNodes(ctx context.Context, clusterID, poolID string, req *KubernetesNodePoolRecycleNodesRequest) (*Response, error) - DeleteNodePool(ctx context.Context, clusterID, poolID string) (*Response, error) - DeleteNode(ctx context.Context, clusterID, poolID, nodeID string, req *KubernetesNodeDeleteRequest) (*Response, error) - - GetOptions(context.Context) (*KubernetesOptions, *Response, error) - AddRegistry(ctx context.Context, req *KubernetesClusterRegistryRequest) (*Response, error) - RemoveRegistry(ctx context.Context, req *KubernetesClusterRegistryRequest) (*Response, error) - - RunClusterlint(ctx context.Context, clusterID string, req *KubernetesRunClusterlintRequest) (string, *Response, error) - GetClusterlintResults(ctx context.Context, clusterID string, req *KubernetesGetClusterlintRequest) ([]*ClusterlintDiagnostic, *Response, error) -} - -var _ KubernetesService = &KubernetesServiceOp{} - -// KubernetesServiceOp handles communication with Kubernetes methods of the DigitalOcean API. -type KubernetesServiceOp struct { - client *Client -} - -// KubernetesClusterCreateRequest represents a request to create a Kubernetes cluster. -type KubernetesClusterCreateRequest struct { - Name string `json:"name,omitempty"` - RegionSlug string `json:"region,omitempty"` - VersionSlug string `json:"version,omitempty"` - Tags []string `json:"tags,omitempty"` - VPCUUID string `json:"vpc_uuid,omitempty"` - - // Create cluster with highly available control plane - HA bool `json:"ha"` - - NodePools []*KubernetesNodePoolCreateRequest `json:"node_pools,omitempty"` - - MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy"` - AutoUpgrade bool `json:"auto_upgrade"` - SurgeUpgrade bool `json:"surge_upgrade"` - ControlPlaneFirewall *KubernetesControlPlaneFirewall `json:"control_plane_firewall,omitempty"` -} - -// KubernetesClusterUpdateRequest represents a request to update a Kubernetes cluster. -type KubernetesClusterUpdateRequest struct { - Name string `json:"name,omitempty"` - Tags []string `json:"tags,omitempty"` - MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy,omitempty"` - AutoUpgrade *bool `json:"auto_upgrade,omitempty"` - SurgeUpgrade bool `json:"surge_upgrade,omitempty"` - ControlPlaneFirewall *KubernetesControlPlaneFirewall `json:"control_plane_firewall,omitempty"` - - // Convert cluster to run highly available control plane - HA *bool `json:"ha,omitempty"` -} - -// KubernetesClusterDeleteSelectiveRequest represents a delete selective request to delete a cluster and it's associated resources. -type KubernetesClusterDeleteSelectiveRequest struct { - Volumes []string `json:"volumes"` - VolumeSnapshots []string `json:"volume_snapshots"` - LoadBalancers []string `json:"load_balancers"` -} - -// KubernetesClusterUpgradeRequest represents a request to upgrade a Kubernetes cluster. -type KubernetesClusterUpgradeRequest struct { - VersionSlug string `json:"version,omitempty"` -} - -// Taint represents a Kubernetes taint that can be associated with a node pool -// (and, transitively, with all nodes of that pool). -type Taint struct { - Key string - Value string - Effect string -} - -func (t Taint) String() string { - if t.Value == "" { - return fmt.Sprintf("%s:%s", t.Key, t.Effect) - } - return fmt.Sprintf("%s=%s:%s", t.Key, t.Value, t.Effect) -} - -// KubernetesNodePoolCreateRequest represents a request to create a node pool for a -// Kubernetes cluster. -type KubernetesNodePoolCreateRequest struct { - Name string `json:"name,omitempty"` - Size string `json:"size,omitempty"` - Count int `json:"count,omitempty"` - Tags []string `json:"tags,omitempty"` - Labels map[string]string `json:"labels,omitempty"` - Taints []Taint `json:"taints,omitempty"` - AutoScale bool `json:"auto_scale,omitempty"` - MinNodes int `json:"min_nodes,omitempty"` - MaxNodes int `json:"max_nodes,omitempty"` -} - -// KubernetesNodePoolUpdateRequest represents a request to update a node pool in a -// Kubernetes cluster. -type KubernetesNodePoolUpdateRequest struct { - Name string `json:"name,omitempty"` - Count *int `json:"count,omitempty"` - Tags []string `json:"tags,omitempty"` - Labels map[string]string `json:"labels,omitempty"` - Taints *[]Taint `json:"taints,omitempty"` - AutoScale *bool `json:"auto_scale,omitempty"` - MinNodes *int `json:"min_nodes,omitempty"` - MaxNodes *int `json:"max_nodes,omitempty"` -} - -// KubernetesNodePoolRecycleNodesRequest is DEPRECATED please use DeleteNode -// The type will be removed in godo 2.0. -type KubernetesNodePoolRecycleNodesRequest struct { - Nodes []string `json:"nodes,omitempty"` -} - -// KubernetesNodeDeleteRequest is a request to delete a specific node in a node pool. -type KubernetesNodeDeleteRequest struct { - // Replace will cause a new node to be created to replace the deleted node. - Replace bool `json:"replace,omitempty"` - - // SkipDrain skips draining the node before deleting it. - SkipDrain bool `json:"skip_drain,omitempty"` -} - -// KubernetesClusterCredentialsGetRequest is a request to get cluster credentials. -type KubernetesClusterCredentialsGetRequest struct { - ExpirySeconds *int `json:"expiry_seconds,omitempty"` -} - -// KubernetesClusterRegistryRequest represents clusters to integrate with docr registry -type KubernetesClusterRegistryRequest struct { - ClusterUUIDs []string `json:"cluster_uuids,omitempty"` -} - -type KubernetesRunClusterlintRequest struct { - IncludeGroups []string `json:"include_groups"` - ExcludeGroups []string `json:"exclude_groups"` - IncludeChecks []string `json:"include_checks"` - ExcludeChecks []string `json:"exclude_checks"` -} - -type KubernetesGetClusterlintRequest struct { - RunId string `json:"run_id"` -} - -// KubernetesCluster represents a Kubernetes cluster. -type KubernetesCluster struct { - ID string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - RegionSlug string `json:"region,omitempty"` - VersionSlug string `json:"version,omitempty"` - ClusterSubnet string `json:"cluster_subnet,omitempty"` - ServiceSubnet string `json:"service_subnet,omitempty"` - IPv4 string `json:"ipv4,omitempty"` - Endpoint string `json:"endpoint,omitempty"` - Tags []string `json:"tags,omitempty"` - VPCUUID string `json:"vpc_uuid,omitempty"` - - // Cluster runs a highly available control plane - HA bool `json:"ha,omitempty"` - - NodePools []*KubernetesNodePool `json:"node_pools,omitempty"` - - MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy,omitempty"` - AutoUpgrade bool `json:"auto_upgrade,omitempty"` - SurgeUpgrade bool `json:"surge_upgrade,omitempty"` - RegistryEnabled bool `json:"registry_enabled,omitempty"` - ControlPlaneFirewall *KubernetesControlPlaneFirewall `json:"control_plane_firewall,omitempty"` - - Status *KubernetesClusterStatus `json:"status,omitempty"` - CreatedAt time.Time `json:"created_at,omitempty"` - UpdatedAt time.Time `json:"updated_at,omitempty"` -} - -// URN returns the Kubernetes cluster's ID in the format of DigitalOcean URN. -func (kc KubernetesCluster) URN() string { - return ToURN("Kubernetes", kc.ID) -} - -// KubernetesClusterUser represents a Kubernetes cluster user. -type KubernetesClusterUser struct { - Username string `json:"username,omitempty"` - Groups []string `json:"groups,omitempty"` -} - -// KubernetesClusterCredentials represents Kubernetes cluster credentials. -type KubernetesClusterCredentials struct { - Server string `json:"server"` - CertificateAuthorityData []byte `json:"certificate_authority_data"` - ClientCertificateData []byte `json:"client_certificate_data"` - ClientKeyData []byte `json:"client_key_data"` - Token string `json:"token"` - ExpiresAt time.Time `json:"expires_at"` -} - -// KubernetesMaintenancePolicy is a configuration to set the maintenance window -// of a cluster -type KubernetesMaintenancePolicy struct { - StartTime string `json:"start_time"` - Duration string `json:"duration"` - Day KubernetesMaintenancePolicyDay `json:"day"` -} - -// KubernetesControlPlaneFirewall represents Kubernetes cluster control plane firewall. -type KubernetesControlPlaneFirewall struct { - Enabled *bool `json:"enabled"` - AllowedAddresses []string `json:"allowed_addresses"` -} - -// KubernetesMaintenancePolicyDay represents the possible days of a maintenance -// window -type KubernetesMaintenancePolicyDay int - -const ( - // KubernetesMaintenanceDayAny sets the KubernetesMaintenancePolicyDay to any - // day of the week - KubernetesMaintenanceDayAny KubernetesMaintenancePolicyDay = iota - - // KubernetesMaintenanceDayMonday sets the KubernetesMaintenancePolicyDay to - // Monday - KubernetesMaintenanceDayMonday - - // KubernetesMaintenanceDayTuesday sets the KubernetesMaintenancePolicyDay to - // Tuesday - KubernetesMaintenanceDayTuesday - - // KubernetesMaintenanceDayWednesday sets the KubernetesMaintenancePolicyDay to - // Wednesday - KubernetesMaintenanceDayWednesday - - // KubernetesMaintenanceDayThursday sets the KubernetesMaintenancePolicyDay to - // Thursday - KubernetesMaintenanceDayThursday - - // KubernetesMaintenanceDayFriday sets the KubernetesMaintenancePolicyDay to - // Friday - KubernetesMaintenanceDayFriday - - // KubernetesMaintenanceDaySaturday sets the KubernetesMaintenancePolicyDay to - // Saturday - KubernetesMaintenanceDaySaturday - - // KubernetesMaintenanceDaySunday sets the KubernetesMaintenancePolicyDay to - // Sunday - KubernetesMaintenanceDaySunday -) - -var ( - days = [...]string{ - "any", - "monday", - "tuesday", - "wednesday", - "thursday", - "friday", - "saturday", - "sunday", - } - - toDay = map[string]KubernetesMaintenancePolicyDay{ - "any": KubernetesMaintenanceDayAny, - "monday": KubernetesMaintenanceDayMonday, - "tuesday": KubernetesMaintenanceDayTuesday, - "wednesday": KubernetesMaintenanceDayWednesday, - "thursday": KubernetesMaintenanceDayThursday, - "friday": KubernetesMaintenanceDayFriday, - "saturday": KubernetesMaintenanceDaySaturday, - "sunday": KubernetesMaintenanceDaySunday, - } -) - -// KubernetesMaintenanceToDay returns the appropriate KubernetesMaintenancePolicyDay for the given string. -func KubernetesMaintenanceToDay(day string) (KubernetesMaintenancePolicyDay, error) { - d, ok := toDay[day] - if !ok { - return 0, fmt.Errorf("unknown day: %q", day) - } - - return d, nil -} - -func (k KubernetesMaintenancePolicyDay) String() string { - if KubernetesMaintenanceDayAny <= k && k <= KubernetesMaintenanceDaySunday { - return days[k] - } - return fmt.Sprintf("%d !Weekday", k) - -} - -// UnmarshalJSON parses the JSON string into KubernetesMaintenancePolicyDay -func (k *KubernetesMaintenancePolicyDay) UnmarshalJSON(data []byte) error { - var val string - if err := json.Unmarshal(data, &val); err != nil { - return err - } - - parsed, err := KubernetesMaintenanceToDay(val) - if err != nil { - return err - } - *k = parsed - return nil -} - -// MarshalJSON returns the JSON string for KubernetesMaintenancePolicyDay -func (k KubernetesMaintenancePolicyDay) MarshalJSON() ([]byte, error) { - if KubernetesMaintenanceDayAny <= k && k <= KubernetesMaintenanceDaySunday { - return json.Marshal(days[k]) - } - - return nil, fmt.Errorf("invalid day: %d", k) -} - -// Possible states for a cluster. -const ( - KubernetesClusterStatusProvisioning = KubernetesClusterStatusState("provisioning") - KubernetesClusterStatusRunning = KubernetesClusterStatusState("running") - KubernetesClusterStatusDegraded = KubernetesClusterStatusState("degraded") - KubernetesClusterStatusError = KubernetesClusterStatusState("error") - KubernetesClusterStatusDeleted = KubernetesClusterStatusState("deleted") - KubernetesClusterStatusUpgrading = KubernetesClusterStatusState("upgrading") - KubernetesClusterStatusInvalid = KubernetesClusterStatusState("invalid") -) - -// KubernetesClusterStatusState represents states for a cluster. -type KubernetesClusterStatusState string - -var _ encoding.TextUnmarshaler = (*KubernetesClusterStatusState)(nil) - -// UnmarshalText unmarshals the state. -func (s *KubernetesClusterStatusState) UnmarshalText(text []byte) error { - switch KubernetesClusterStatusState(strings.ToLower(string(text))) { - case KubernetesClusterStatusProvisioning: - *s = KubernetesClusterStatusProvisioning - case KubernetesClusterStatusRunning: - *s = KubernetesClusterStatusRunning - case KubernetesClusterStatusDegraded: - *s = KubernetesClusterStatusDegraded - case KubernetesClusterStatusError: - *s = KubernetesClusterStatusError - case KubernetesClusterStatusDeleted: - *s = KubernetesClusterStatusDeleted - case KubernetesClusterStatusUpgrading: - *s = KubernetesClusterStatusUpgrading - case "", KubernetesClusterStatusInvalid: - *s = KubernetesClusterStatusInvalid - default: - return fmt.Errorf("unknown cluster state %q", string(text)) - } - return nil -} - -// KubernetesClusterStatus describes the status of a cluster. -type KubernetesClusterStatus struct { - State KubernetesClusterStatusState `json:"state,omitempty"` - Message string `json:"message,omitempty"` -} - -// KubernetesNodePool represents a node pool in a Kubernetes cluster. -type KubernetesNodePool struct { - ID string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - Size string `json:"size,omitempty"` - Count int `json:"count,omitempty"` - Tags []string `json:"tags,omitempty"` - Labels map[string]string `json:"labels,omitempty"` - Taints []Taint `json:"taints,omitempty"` - AutoScale bool `json:"auto_scale,omitempty"` - MinNodes int `json:"min_nodes,omitempty"` - MaxNodes int `json:"max_nodes,omitempty"` - - Nodes []*KubernetesNode `json:"nodes,omitempty"` -} - -// KubernetesNode represents a Node in a node pool in a Kubernetes cluster. -type KubernetesNode struct { - ID string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - Status *KubernetesNodeStatus `json:"status,omitempty"` - DropletID string `json:"droplet_id,omitempty"` - - CreatedAt time.Time `json:"created_at,omitempty"` - UpdatedAt time.Time `json:"updated_at,omitempty"` -} - -// KubernetesNodeStatus represents the status of a particular Node in a Kubernetes cluster. -type KubernetesNodeStatus struct { - State string `json:"state,omitempty"` - Message string `json:"message,omitempty"` -} - -// KubernetesOptions represents options available for creating Kubernetes clusters. -type KubernetesOptions struct { - Versions []*KubernetesVersion `json:"versions,omitempty"` - Regions []*KubernetesRegion `json:"regions,omitempty"` - Sizes []*KubernetesNodeSize `json:"sizes,omitempty"` -} - -// KubernetesVersion is a DigitalOcean Kubernetes release. -type KubernetesVersion struct { - Slug string `json:"slug,omitempty"` - KubernetesVersion string `json:"kubernetes_version,omitempty"` - SupportedFeatures []string `json:"supported_features,omitempty"` -} - -// KubernetesNodeSize is a node sizes supported for Kubernetes clusters. -type KubernetesNodeSize struct { - Name string `json:"name"` - Slug string `json:"slug"` -} - -// KubernetesRegion is a region usable by Kubernetes clusters. -type KubernetesRegion struct { - Name string `json:"name"` - Slug string `json:"slug"` -} - -// ClusterlintDiagnostic is a diagnostic returned from clusterlint. -type ClusterlintDiagnostic struct { - CheckName string `json:"check_name"` - Severity string `json:"severity"` - Message string `json:"message"` - Object *ClusterlintObject `json:"object"` -} - -// ClusterlintObject is the object a clusterlint diagnostic refers to. -type ClusterlintObject struct { - Kind string `json:"kind"` - Name string `json:"name"` - Namespace string `json:"namespace"` - Owners []*ClusterlintOwner `json:"owners,omitempty"` -} - -// ClusterlintOwner indicates the resource that owns the offending object. -type ClusterlintOwner struct { - Kind string `json:"kind"` - Name string `json:"name"` -} - -// KubernetesAssociatedResources represents a cluster's associated resources -type KubernetesAssociatedResources struct { - Volumes []*AssociatedResource `json:"volumes"` - VolumeSnapshots []*AssociatedResource `json:"volume_snapshots"` - LoadBalancers []*AssociatedResource `json:"load_balancers"` -} - -// AssociatedResource is the object to represent a Kubernetes cluster associated resource's ID and Name. -type AssociatedResource struct { - ID string `json:"id"` - Name string `json:"name"` -} - -type kubernetesClustersRoot struct { - Clusters []*KubernetesCluster `json:"kubernetes_clusters,omitempty"` - Links *Links `json:"links,omitempty"` - Meta *Meta `json:"meta"` -} - -type kubernetesClusterRoot struct { - Cluster *KubernetesCluster `json:"kubernetes_cluster,omitempty"` -} - -type kubernetesClusterUserRoot struct { - User *KubernetesClusterUser `json:"kubernetes_cluster_user,omitempty"` -} - -type kubernetesNodePoolRoot struct { - NodePool *KubernetesNodePool `json:"node_pool,omitempty"` -} - -type kubernetesNodePoolsRoot struct { - NodePools []*KubernetesNodePool `json:"node_pools,omitempty"` - Links *Links `json:"links,omitempty"` -} - -type kubernetesUpgradesRoot struct { - AvailableUpgradeVersions []*KubernetesVersion `json:"available_upgrade_versions,omitempty"` -} - -// Get retrieves the details of a Kubernetes cluster. -func (svc *KubernetesServiceOp) Get(ctx context.Context, clusterID string) (*KubernetesCluster, *Response, error) { - path := fmt.Sprintf("%s/%s", kubernetesClustersPath, clusterID) - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(kubernetesClusterRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Cluster, resp, nil -} - -// GetUser retrieves the details of a Kubernetes cluster user. -func (svc *KubernetesServiceOp) GetUser(ctx context.Context, clusterID string) (*KubernetesClusterUser, *Response, error) { - path := fmt.Sprintf("%s/%s/user", kubernetesClustersPath, clusterID) - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(kubernetesClusterUserRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.User, resp, nil -} - -// GetUpgrades retrieves versions a Kubernetes cluster can be upgraded to. An -// upgrade can be requested using `Upgrade`. -func (svc *KubernetesServiceOp) GetUpgrades(ctx context.Context, clusterID string) ([]*KubernetesVersion, *Response, error) { - path := fmt.Sprintf("%s/%s/upgrades", kubernetesClustersPath, clusterID) - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(kubernetesUpgradesRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, nil, err - } - return root.AvailableUpgradeVersions, resp, nil -} - -// Create creates a Kubernetes cluster. -func (svc *KubernetesServiceOp) Create(ctx context.Context, create *KubernetesClusterCreateRequest) (*KubernetesCluster, *Response, error) { - path := kubernetesClustersPath - req, err := svc.client.NewRequest(ctx, http.MethodPost, path, create) - if err != nil { - return nil, nil, err - } - root := new(kubernetesClusterRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Cluster, resp, nil -} - -// Delete deletes a Kubernetes cluster. There is no way to recover a cluster -// once it has been destroyed. -func (svc *KubernetesServiceOp) Delete(ctx context.Context, clusterID string) (*Response, error) { - path := fmt.Sprintf("%s/%s", kubernetesClustersPath, clusterID) - req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - resp, err := svc.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - return resp, nil -} - -// DeleteSelective deletes a Kubernetes cluster and the specified associated resources. -// Users can choose to delete specific volumes, volume snapshots or load balancers along with the cluster -// There is no way to recover a cluster or the specified resources once destroyed. -func (svc *KubernetesServiceOp) DeleteSelective(ctx context.Context, clusterID string, request *KubernetesClusterDeleteSelectiveRequest) (*Response, error) { - path := fmt.Sprintf("%s/%s/destroy_with_associated_resources/selective", kubernetesClustersPath, clusterID) - req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, request) - if err != nil { - return nil, err - } - resp, err := svc.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - return resp, nil -} - -// DeleteDangerous deletes a Kubernetes cluster and all its associated resources. There is no way to recover a cluster -// or it's associated resources once destroyed. -func (svc *KubernetesServiceOp) DeleteDangerous(ctx context.Context, clusterID string) (*Response, error) { - path := fmt.Sprintf("%s/%s/destroy_with_associated_resources/dangerous", kubernetesClustersPath, clusterID) - req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - resp, err := svc.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - return resp, nil -} - -// ListAssociatedResourcesForDeletion lists a Kubernetes cluster's resources that can be selected -// for deletion along with the cluster. See DeleteSelective -// Associated resources include volumes, volume snapshots and load balancers. -func (svc *KubernetesServiceOp) ListAssociatedResourcesForDeletion(ctx context.Context, clusterID string) (*KubernetesAssociatedResources, *Response, error) { - path := fmt.Sprintf("%s/%s/destroy_with_associated_resources", kubernetesClustersPath, clusterID) - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(KubernetesAssociatedResources) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root, resp, nil -} - -// List returns a list of the Kubernetes clusters visible with the caller's API token. -func (svc *KubernetesServiceOp) List(ctx context.Context, opts *ListOptions) ([]*KubernetesCluster, *Response, error) { - path := kubernetesClustersPath - path, err := addOptions(path, opts) - if err != nil { - return nil, nil, err - } - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(kubernetesClustersRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.Clusters, resp, nil -} - -// KubernetesClusterConfig is the content of a Kubernetes config file, which can be -// used to interact with your Kubernetes cluster using `kubectl`. -// See: https://kubernetes.io/docs/tasks/tools/install-kubectl/ -type KubernetesClusterConfig struct { - KubeconfigYAML []byte -} - -// GetKubeConfig returns a Kubernetes config file for the specified cluster. -func (svc *KubernetesServiceOp) GetKubeConfig(ctx context.Context, clusterID string) (*KubernetesClusterConfig, *Response, error) { - path := fmt.Sprintf("%s/%s/kubeconfig", kubernetesClustersPath, clusterID) - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - configBytes := bytes.NewBuffer(nil) - resp, err := svc.client.Do(ctx, req, configBytes) - if err != nil { - return nil, resp, err - } - res := &KubernetesClusterConfig{ - KubeconfigYAML: configBytes.Bytes(), - } - return res, resp, nil -} - -// GetKubeConfigWithExpiry returns a Kubernetes config file for the specified cluster with expiry_seconds. -func (svc *KubernetesServiceOp) GetKubeConfigWithExpiry(ctx context.Context, clusterID string, expirySeconds int64) (*KubernetesClusterConfig, *Response, error) { - path := fmt.Sprintf("%s/%s/kubeconfig", kubernetesClustersPath, clusterID) - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - q := req.URL.Query() - q.Add("expiry_seconds", fmt.Sprintf("%d", expirySeconds)) - req.URL.RawQuery = q.Encode() - configBytes := bytes.NewBuffer(nil) - resp, err := svc.client.Do(ctx, req, configBytes) - if err != nil { - return nil, resp, err - } - res := &KubernetesClusterConfig{ - KubeconfigYAML: configBytes.Bytes(), - } - return res, resp, nil -} - -// GetCredentials returns a Kubernetes API server credentials for the specified cluster. -func (svc *KubernetesServiceOp) GetCredentials(ctx context.Context, clusterID string, get *KubernetesClusterCredentialsGetRequest) (*KubernetesClusterCredentials, *Response, error) { - path := fmt.Sprintf("%s/%s/credentials", kubernetesClustersPath, clusterID) - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - q := req.URL.Query() - if get.ExpirySeconds != nil { - q.Add("expiry_seconds", strconv.Itoa(*get.ExpirySeconds)) - } - req.URL.RawQuery = q.Encode() - credentials := new(KubernetesClusterCredentials) - resp, err := svc.client.Do(ctx, req, credentials) - if err != nil { - return nil, nil, err - } - return credentials, resp, nil -} - -// Update updates a Kubernetes cluster's properties. -func (svc *KubernetesServiceOp) Update(ctx context.Context, clusterID string, update *KubernetesClusterUpdateRequest) (*KubernetesCluster, *Response, error) { - path := fmt.Sprintf("%s/%s", kubernetesClustersPath, clusterID) - req, err := svc.client.NewRequest(ctx, http.MethodPut, path, update) - if err != nil { - return nil, nil, err - } - root := new(kubernetesClusterRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Cluster, resp, nil -} - -// Upgrade upgrades a Kubernetes cluster to a new version. Valid upgrade -// versions for a given cluster can be retrieved with `GetUpgrades`. -func (svc *KubernetesServiceOp) Upgrade(ctx context.Context, clusterID string, upgrade *KubernetesClusterUpgradeRequest) (*Response, error) { - path := fmt.Sprintf("%s/%s/upgrade", kubernetesClustersPath, clusterID) - req, err := svc.client.NewRequest(ctx, http.MethodPost, path, upgrade) - if err != nil { - return nil, err - } - return svc.client.Do(ctx, req, nil) -} - -// CreateNodePool creates a new node pool in an existing Kubernetes cluster. -func (svc *KubernetesServiceOp) CreateNodePool(ctx context.Context, clusterID string, create *KubernetesNodePoolCreateRequest) (*KubernetesNodePool, *Response, error) { - path := fmt.Sprintf("%s/%s/node_pools", kubernetesClustersPath, clusterID) - req, err := svc.client.NewRequest(ctx, http.MethodPost, path, create) - if err != nil { - return nil, nil, err - } - root := new(kubernetesNodePoolRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.NodePool, resp, nil -} - -// GetNodePool retrieves an existing node pool in a Kubernetes cluster. -func (svc *KubernetesServiceOp) GetNodePool(ctx context.Context, clusterID, poolID string) (*KubernetesNodePool, *Response, error) { - path := fmt.Sprintf("%s/%s/node_pools/%s", kubernetesClustersPath, clusterID, poolID) - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(kubernetesNodePoolRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.NodePool, resp, nil -} - -// ListNodePools lists all the node pools found in a Kubernetes cluster. -func (svc *KubernetesServiceOp) ListNodePools(ctx context.Context, clusterID string, opts *ListOptions) ([]*KubernetesNodePool, *Response, error) { - path := fmt.Sprintf("%s/%s/node_pools", kubernetesClustersPath, clusterID) - path, err := addOptions(path, opts) - if err != nil { - return nil, nil, err - } - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(kubernetesNodePoolsRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.NodePools, resp, nil -} - -// UpdateNodePool updates the details of an existing node pool. -func (svc *KubernetesServiceOp) UpdateNodePool(ctx context.Context, clusterID, poolID string, update *KubernetesNodePoolUpdateRequest) (*KubernetesNodePool, *Response, error) { - path := fmt.Sprintf("%s/%s/node_pools/%s", kubernetesClustersPath, clusterID, poolID) - req, err := svc.client.NewRequest(ctx, http.MethodPut, path, update) - if err != nil { - return nil, nil, err - } - root := new(kubernetesNodePoolRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.NodePool, resp, nil -} - -// RecycleNodePoolNodes is DEPRECATED please use DeleteNode -// The method will be removed in godo 2.0. -func (svc *KubernetesServiceOp) RecycleNodePoolNodes(ctx context.Context, clusterID, poolID string, recycle *KubernetesNodePoolRecycleNodesRequest) (*Response, error) { - path := fmt.Sprintf("%s/%s/node_pools/%s/recycle", kubernetesClustersPath, clusterID, poolID) - req, err := svc.client.NewRequest(ctx, http.MethodPost, path, recycle) - if err != nil { - return nil, err - } - resp, err := svc.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - return resp, nil -} - -// DeleteNodePool deletes a node pool, and subsequently all the nodes in that pool. -func (svc *KubernetesServiceOp) DeleteNodePool(ctx context.Context, clusterID, poolID string) (*Response, error) { - path := fmt.Sprintf("%s/%s/node_pools/%s", kubernetesClustersPath, clusterID, poolID) - req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - resp, err := svc.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - return resp, nil -} - -// DeleteNode deletes a specific node in a node pool. -func (svc *KubernetesServiceOp) DeleteNode(ctx context.Context, clusterID, poolID, nodeID string, deleteReq *KubernetesNodeDeleteRequest) (*Response, error) { - path := fmt.Sprintf("%s/%s/node_pools/%s/nodes/%s", kubernetesClustersPath, clusterID, poolID, nodeID) - if deleteReq != nil { - v := make(url.Values) - if deleteReq.SkipDrain { - v.Set("skip_drain", "1") - } - if deleteReq.Replace { - v.Set("replace", "1") - } - if query := v.Encode(); query != "" { - path = path + "?" + query - } - } - - req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - resp, err := svc.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - return resp, nil -} - -type kubernetesOptionsRoot struct { - Options *KubernetesOptions `json:"options,omitempty"` - Links *Links `json:"links,omitempty"` -} - -// GetOptions returns options about the Kubernetes service, such as the versions available for -// cluster creation. -func (svc *KubernetesServiceOp) GetOptions(ctx context.Context) (*KubernetesOptions, *Response, error) { - path := kubernetesOptionsPath - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(kubernetesOptionsRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Options, resp, nil -} - -// AddRegistry integrates docr registry with all the specified clusters -func (svc *KubernetesServiceOp) AddRegistry(ctx context.Context, req *KubernetesClusterRegistryRequest) (*Response, error) { - path := fmt.Sprintf("%s/registry", kubernetesBasePath) - request, err := svc.client.NewRequest(ctx, http.MethodPost, path, req) - if err != nil { - return nil, err - } - resp, err := svc.client.Do(ctx, request, nil) - if err != nil { - return resp, err - } - return resp, nil -} - -// RemoveRegistry removes docr registry support for all the specified clusters -func (svc *KubernetesServiceOp) RemoveRegistry(ctx context.Context, req *KubernetesClusterRegistryRequest) (*Response, error) { - path := fmt.Sprintf("%s/registry", kubernetesBasePath) - request, err := svc.client.NewRequest(ctx, http.MethodDelete, path, req) - if err != nil { - return nil, err - } - resp, err := svc.client.Do(ctx, request, nil) - if err != nil { - return resp, err - } - return resp, nil -} - -type runClusterlintRoot struct { - RunID string `json:"run_id"` -} - -// RunClusterlint schedules a clusterlint run for the specified cluster -func (svc *KubernetesServiceOp) RunClusterlint(ctx context.Context, clusterID string, req *KubernetesRunClusterlintRequest) (string, *Response, error) { - path := fmt.Sprintf("%s/%s/clusterlint", kubernetesClustersPath, clusterID) - request, err := svc.client.NewRequest(ctx, http.MethodPost, path, req) - if err != nil { - return "", nil, err - } - root := new(runClusterlintRoot) - resp, err := svc.client.Do(ctx, request, root) - if err != nil { - return "", resp, err - } - return root.RunID, resp, nil -} - -type clusterlintDiagnosticsRoot struct { - Diagnostics []*ClusterlintDiagnostic -} - -// GetClusterlintResults fetches the diagnostics after clusterlint run completes -func (svc *KubernetesServiceOp) GetClusterlintResults(ctx context.Context, clusterID string, req *KubernetesGetClusterlintRequest) ([]*ClusterlintDiagnostic, *Response, error) { - path := fmt.Sprintf("%s/%s/clusterlint", kubernetesClustersPath, clusterID) - if req != nil { - v := make(url.Values) - if req.RunId != "" { - v.Set("run_id", req.RunId) - } - if query := v.Encode(); query != "" { - path = path + "?" + query - } - } - - request, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(clusterlintDiagnosticsRoot) - resp, err := svc.client.Do(ctx, request, root) - if err != nil { - return nil, resp, err - } - return root.Diagnostics, resp, nil -} diff --git a/vendor/github.com/digitalocean/godo/links.go b/vendor/github.com/digitalocean/godo/links.go deleted file mode 100644 index 4b5db97..0000000 --- a/vendor/github.com/digitalocean/godo/links.go +++ /dev/null @@ -1,123 +0,0 @@ -package godo - -import ( - "context" - "net/url" - "strconv" -) - -// Links manages links that are returned along with a List -type Links struct { - Pages *Pages `json:"pages,omitempty"` - Actions []LinkAction `json:"actions,omitempty"` -} - -// Pages are pages specified in Links -type Pages struct { - First string `json:"first,omitempty"` - Prev string `json:"prev,omitempty"` - Last string `json:"last,omitempty"` - Next string `json:"next,omitempty"` -} - -// LinkAction is a pointer to an action -type LinkAction struct { - ID int `json:"id,omitempty"` - Rel string `json:"rel,omitempty"` - HREF string `json:"href,omitempty"` -} - -// CurrentPage is current page of the list -func (l *Links) CurrentPage() (int, error) { - return l.Pages.current() -} - -// NextPageToken is the page token to request the next page of the list -func (l *Links) NextPageToken() (string, error) { - return l.Pages.nextPageToken() -} - -// PrevPageToken is the page token to request the previous page of the list -func (l *Links) PrevPageToken() (string, error) { - return l.Pages.prevPageToken() -} - -func (p *Pages) current() (int, error) { - switch { - case p == nil: - return 1, nil - case p.Prev == "" && p.Next != "": - return 1, nil - case p.Prev != "": - prevPage, err := pageForURL(p.Prev) - if err != nil { - return 0, err - } - - return prevPage + 1, nil - } - - return 0, nil -} - -func (p *Pages) nextPageToken() (string, error) { - if p == nil || p.Next == "" { - return "", nil - } - token, err := pageTokenFromURL(p.Next) - if err != nil { - return "", err - } - return token, nil -} - -func (p *Pages) prevPageToken() (string, error) { - if p == nil || p.Prev == "" { - return "", nil - } - token, err := pageTokenFromURL(p.Prev) - if err != nil { - return "", err - } - return token, nil -} - -// IsLastPage returns true if the current page is the last -func (l *Links) IsLastPage() bool { - if l.Pages == nil { - return true - } - return l.Pages.isLast() -} - -func (p *Pages) isLast() bool { - return p.Next == "" -} - -func pageForURL(urlText string) (int, error) { - u, err := url.ParseRequestURI(urlText) - if err != nil { - return 0, err - } - - pageStr := u.Query().Get("page") - page, err := strconv.Atoi(pageStr) - if err != nil { - return 0, err - } - - return page, nil -} - -func pageTokenFromURL(urlText string) (string, error) { - u, err := url.ParseRequestURI(urlText) - if err != nil { - return "", err - } - return u.Query().Get("page_token"), nil -} - -// Get a link action by id. -func (la *LinkAction) Get(ctx context.Context, client *Client) (*Action, *Response, error) { - return client.Actions.Get(ctx, la.ID) -} diff --git a/vendor/github.com/digitalocean/godo/load_balancers.go b/vendor/github.com/digitalocean/godo/load_balancers.go deleted file mode 100644 index a24952b..0000000 --- a/vendor/github.com/digitalocean/godo/load_balancers.go +++ /dev/null @@ -1,502 +0,0 @@ -package godo - -import ( - "context" - "fmt" - "net/http" -) - -const ( - cachePath = "cache" - dropletsPath = "droplets" - forwardingRulesPath = "forwarding_rules" - loadBalancersBasePath = "/v2/load_balancers" -) - -const ( - // Load Balancer types - LoadBalancerTypeGlobal = "GLOBAL" - LoadBalancerTypeRegional = "REGIONAL" - LoadBalancerTypeRegionalNetwork = "REGIONAL_NETWORK" - - // Load Balancer network types - LoadBalancerNetworkTypeExternal = "EXTERNAL" - LoadBalancerNetworkTypeInternal = "INTERNAL" -) - -// LoadBalancersService is an interface for managing load balancers with the DigitalOcean API. -// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Load-Balancers -type LoadBalancersService interface { - Get(context.Context, string) (*LoadBalancer, *Response, error) - List(context.Context, *ListOptions) ([]LoadBalancer, *Response, error) - Create(context.Context, *LoadBalancerRequest) (*LoadBalancer, *Response, error) - Update(ctx context.Context, lbID string, lbr *LoadBalancerRequest) (*LoadBalancer, *Response, error) - Delete(ctx context.Context, lbID string) (*Response, error) - AddDroplets(ctx context.Context, lbID string, dropletIDs ...int) (*Response, error) - RemoveDroplets(ctx context.Context, lbID string, dropletIDs ...int) (*Response, error) - AddForwardingRules(ctx context.Context, lbID string, rules ...ForwardingRule) (*Response, error) - RemoveForwardingRules(ctx context.Context, lbID string, rules ...ForwardingRule) (*Response, error) - PurgeCache(ctx context.Context, lbID string) (*Response, error) -} - -// LoadBalancer represents a DigitalOcean load balancer configuration. -// Tags can only be provided upon the creation of a Load Balancer. -type LoadBalancer struct { - ID string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - IP string `json:"ip,omitempty"` - // SizeSlug is mutually exclusive with SizeUnit. Only one should be specified - SizeSlug string `json:"size,omitempty"` - // SizeUnit is mutually exclusive with SizeSlug. Only one should be specified - SizeUnit uint32 `json:"size_unit,omitempty"` - Type string `json:"type,omitempty"` - Algorithm string `json:"algorithm,omitempty"` - Status string `json:"status,omitempty"` - Created string `json:"created_at,omitempty"` - ForwardingRules []ForwardingRule `json:"forwarding_rules,omitempty"` - HealthCheck *HealthCheck `json:"health_check,omitempty"` - StickySessions *StickySessions `json:"sticky_sessions,omitempty"` - Region *Region `json:"region,omitempty"` - DropletIDs []int `json:"droplet_ids,omitempty"` - Tag string `json:"tag,omitempty"` - Tags []string `json:"tags,omitempty"` - RedirectHttpToHttps bool `json:"redirect_http_to_https,omitempty"` - EnableProxyProtocol bool `json:"enable_proxy_protocol,omitempty"` - EnableBackendKeepalive bool `json:"enable_backend_keepalive,omitempty"` - VPCUUID string `json:"vpc_uuid,omitempty"` - DisableLetsEncryptDNSRecords *bool `json:"disable_lets_encrypt_dns_records,omitempty"` - ValidateOnly bool `json:"validate_only,omitempty"` - ProjectID string `json:"project_id,omitempty"` - HTTPIdleTimeoutSeconds *uint64 `json:"http_idle_timeout_seconds,omitempty"` - Firewall *LBFirewall `json:"firewall,omitempty"` - Domains []*LBDomain `json:"domains,omitempty"` - GLBSettings *GLBSettings `json:"glb_settings,omitempty"` - TargetLoadBalancerIDs []string `json:"target_load_balancer_ids,omitempty"` - Network string `json:"network,omitempty"` -} - -// String creates a human-readable description of a LoadBalancer. -func (l LoadBalancer) String() string { - return Stringify(l) -} - -// URN returns the load balancer ID in a valid DO API URN form. -func (l LoadBalancer) URN() string { - return ToURN("LoadBalancer", l.ID) -} - -// AsRequest creates a LoadBalancerRequest that can be submitted to Update with the current values of the LoadBalancer. -// Modifying the returned LoadBalancerRequest will not modify the original LoadBalancer. -func (l LoadBalancer) AsRequest() *LoadBalancerRequest { - r := LoadBalancerRequest{ - Name: l.Name, - Algorithm: l.Algorithm, - SizeSlug: l.SizeSlug, - SizeUnit: l.SizeUnit, - Type: l.Type, - ForwardingRules: append([]ForwardingRule(nil), l.ForwardingRules...), - DropletIDs: append([]int(nil), l.DropletIDs...), - Tag: l.Tag, - RedirectHttpToHttps: l.RedirectHttpToHttps, - EnableProxyProtocol: l.EnableProxyProtocol, - EnableBackendKeepalive: l.EnableBackendKeepalive, - VPCUUID: l.VPCUUID, - DisableLetsEncryptDNSRecords: l.DisableLetsEncryptDNSRecords, - ValidateOnly: l.ValidateOnly, - ProjectID: l.ProjectID, - HTTPIdleTimeoutSeconds: l.HTTPIdleTimeoutSeconds, - TargetLoadBalancerIDs: append([]string(nil), l.TargetLoadBalancerIDs...), - Network: l.Network, - } - - if l.DisableLetsEncryptDNSRecords != nil { - *r.DisableLetsEncryptDNSRecords = *l.DisableLetsEncryptDNSRecords - } - - if l.HealthCheck != nil { - r.HealthCheck = &HealthCheck{} - *r.HealthCheck = *l.HealthCheck - } - - if l.StickySessions != nil { - r.StickySessions = &StickySessions{} - *r.StickySessions = *l.StickySessions - } - - if l.Region != nil { - r.Region = l.Region.Slug - } - - if l.Firewall != nil { - r.Firewall = l.Firewall.deepCopy() - } - - for _, domain := range l.Domains { - lbDomain := &LBDomain{} - *lbDomain = *domain - lbDomain.VerificationErrorReasons = append([]string(nil), domain.VerificationErrorReasons...) - lbDomain.SSLValidationErrorReasons = append([]string(nil), domain.SSLValidationErrorReasons...) - r.Domains = append(r.Domains, lbDomain) - } - - if l.GLBSettings != nil { - r.GLBSettings = l.GLBSettings.deepCopy() - } - - return &r -} - -// ForwardingRule represents load balancer forwarding rules. -type ForwardingRule struct { - EntryProtocol string `json:"entry_protocol,omitempty"` - EntryPort int `json:"entry_port,omitempty"` - TargetProtocol string `json:"target_protocol,omitempty"` - TargetPort int `json:"target_port,omitempty"` - CertificateID string `json:"certificate_id,omitempty"` - TlsPassthrough bool `json:"tls_passthrough,omitempty"` -} - -// String creates a human-readable description of a ForwardingRule. -func (f ForwardingRule) String() string { - return Stringify(f) -} - -// HealthCheck represents optional load balancer health check rules. -type HealthCheck struct { - Protocol string `json:"protocol,omitempty"` - Port int `json:"port,omitempty"` - Path string `json:"path,omitempty"` - CheckIntervalSeconds int `json:"check_interval_seconds,omitempty"` - ResponseTimeoutSeconds int `json:"response_timeout_seconds,omitempty"` - HealthyThreshold int `json:"healthy_threshold,omitempty"` - UnhealthyThreshold int `json:"unhealthy_threshold,omitempty"` - ProxyProtocol *bool `json:"proxy_protocol,omitempty"` -} - -// String creates a human-readable description of a HealthCheck. -func (h HealthCheck) String() string { - return Stringify(h) -} - -// StickySessions represents optional load balancer session affinity rules. -type StickySessions struct { - Type string `json:"type,omitempty"` - CookieName string `json:"cookie_name,omitempty"` - CookieTtlSeconds int `json:"cookie_ttl_seconds,omitempty"` -} - -// String creates a human-readable description of a StickySessions instance. -func (s StickySessions) String() string { - return Stringify(s) -} - -// LBFirewall holds the allow and deny rules for a loadbalancer's firewall. -// Currently, allow and deny rules support cidrs and ips. -// Please use the helper methods (IPSourceFirewall/CIDRSourceFirewall) to format the allow/deny rules. -type LBFirewall struct { - Allow []string `json:"allow,omitempty"` - Deny []string `json:"deny,omitempty"` -} - -func (lbf *LBFirewall) deepCopy() *LBFirewall { - return &LBFirewall{ - Allow: append([]string(nil), lbf.Allow...), - Deny: append([]string(nil), lbf.Deny...), - } -} - -// IPSourceFirewall takes an IP (string) and returns a formatted ip source firewall rule -func IPSourceFirewall(ip string) string { return fmt.Sprintf("ip:%s", ip) } - -// CIDRSourceFirewall takes a CIDR notation IP address and prefix length string -// like "192.0.2.0/24" and returns a formatted cidr source firewall rule -func CIDRSourceFirewall(cidr string) string { return fmt.Sprintf("cidr:%s", cidr) } - -// String creates a human-readable description of an LBFirewall instance. -func (f LBFirewall) String() string { - return Stringify(f) -} - -// LoadBalancerRequest represents the configuration to be applied to an existing or a new load balancer. -type LoadBalancerRequest struct { - Name string `json:"name,omitempty"` - Algorithm string `json:"algorithm,omitempty"` - Region string `json:"region,omitempty"` - // SizeSlug is mutually exclusive with SizeUnit. Only one should be specified - SizeSlug string `json:"size,omitempty"` - // SizeUnit is mutually exclusive with SizeSlug. Only one should be specified - SizeUnit uint32 `json:"size_unit,omitempty"` - Type string `json:"type,omitempty"` - ForwardingRules []ForwardingRule `json:"forwarding_rules,omitempty"` - HealthCheck *HealthCheck `json:"health_check,omitempty"` - StickySessions *StickySessions `json:"sticky_sessions,omitempty"` - DropletIDs []int `json:"droplet_ids,omitempty"` - Tag string `json:"tag,omitempty"` - Tags []string `json:"tags,omitempty"` - RedirectHttpToHttps bool `json:"redirect_http_to_https,omitempty"` - EnableProxyProtocol bool `json:"enable_proxy_protocol,omitempty"` - EnableBackendKeepalive bool `json:"enable_backend_keepalive,omitempty"` - VPCUUID string `json:"vpc_uuid,omitempty"` - DisableLetsEncryptDNSRecords *bool `json:"disable_lets_encrypt_dns_records,omitempty"` - ValidateOnly bool `json:"validate_only,omitempty"` - ProjectID string `json:"project_id,omitempty"` - HTTPIdleTimeoutSeconds *uint64 `json:"http_idle_timeout_seconds,omitempty"` - Firewall *LBFirewall `json:"firewall,omitempty"` - Domains []*LBDomain `json:"domains,omitempty"` - GLBSettings *GLBSettings `json:"glb_settings,omitempty"` - TargetLoadBalancerIDs []string `json:"target_load_balancer_ids,omitempty"` - Network string `json:"network,omitempty"` -} - -// String creates a human-readable description of a LoadBalancerRequest. -func (l LoadBalancerRequest) String() string { - return Stringify(l) -} - -type forwardingRulesRequest struct { - Rules []ForwardingRule `json:"forwarding_rules,omitempty"` -} - -func (l forwardingRulesRequest) String() string { - return Stringify(l) -} - -type dropletIDsRequest struct { - IDs []int `json:"droplet_ids,omitempty"` -} - -func (l dropletIDsRequest) String() string { - return Stringify(l) -} - -// LBDomain defines domain names required to ingress traffic to a Global LB -type LBDomain struct { - // Name defines the domain fqdn - Name string `json:"name"` - // IsManaged indicates if the domain is DO-managed - IsManaged bool `json:"is_managed"` - // CertificateID indicates ID of a TLS certificate - CertificateID string `json:"certificate_id,omitempty"` - // Status indicates the domain validation status - Status string `json:"status,omitempty"` - // VerificationErrorReasons indicates any domain verification errors - VerificationErrorReasons []string `json:"verification_error_reasons,omitempty"` - // SSLValidationErrorReasons indicates any domain SSL validation errors - SSLValidationErrorReasons []string `json:"ssl_validation_error_reasons,omitempty"` -} - -// String creates a human-readable description of a LBDomain -func (d LBDomain) String() string { - return Stringify(d) -} - -// GLBSettings define settings for configuring a Global LB -type GLBSettings struct { - // TargetProtocol is the outgoing traffic protocol. - TargetProtocol string `json:"target_protocol"` - // EntryPort is the outgoing traffic port. - TargetPort uint32 `json:"target_port"` - // CDNSettings is the CDN configurations - CDN *CDNSettings `json:"cdn"` - // RegionPriorities embeds regional priority information for regional active-passive failover policy - RegionPriorities map[string]uint32 `json:"region_priorities,omitempty"` - // FailoverThreshold embeds failover threshold percentage for regional active-passive failover policy - FailoverThreshold uint32 `json:"failover_threshold,omitempty"` -} - -// String creates a human-readable description of a GLBSettings -func (s GLBSettings) String() string { - return Stringify(s) -} - -func (s GLBSettings) deepCopy() *GLBSettings { - settings := &GLBSettings{ - TargetProtocol: s.TargetProtocol, - TargetPort: s.TargetPort, - RegionPriorities: s.RegionPriorities, - FailoverThreshold: s.FailoverThreshold, - } - if s.CDN != nil { - settings.CDN = &CDNSettings{IsEnabled: s.CDN.IsEnabled} - } - return settings -} - -// CDNSettings define CDN settings for a Global LB -type CDNSettings struct { - // IsEnabled is the caching enabled flag - IsEnabled bool `json:"is_enabled"` -} - -// String creates a human-readable description of a CDNSettings -func (c CDNSettings) String() string { - return Stringify(c) -} - -type loadBalancersRoot struct { - LoadBalancers []LoadBalancer `json:"load_balancers"` - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -type loadBalancerRoot struct { - LoadBalancer *LoadBalancer `json:"load_balancer"` -} - -// LoadBalancersServiceOp handles communication with load balancer-related methods of the DigitalOcean API. -type LoadBalancersServiceOp struct { - client *Client -} - -var _ LoadBalancersService = &LoadBalancersServiceOp{} - -// Get an existing load balancer by its identifier. -func (l *LoadBalancersServiceOp) Get(ctx context.Context, lbID string) (*LoadBalancer, *Response, error) { - path := fmt.Sprintf("%s/%s", loadBalancersBasePath, lbID) - - req, err := l.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(loadBalancerRoot) - resp, err := l.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.LoadBalancer, resp, err -} - -// List load balancers, with optional pagination. -func (l *LoadBalancersServiceOp) List(ctx context.Context, opt *ListOptions) ([]LoadBalancer, *Response, error) { - path, err := addOptions(loadBalancersBasePath, opt) - if err != nil { - return nil, nil, err - } - - req, err := l.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(loadBalancersRoot) - resp, err := l.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.LoadBalancers, resp, err -} - -// Create a new load balancer with a given configuration. -func (l *LoadBalancersServiceOp) Create(ctx context.Context, lbr *LoadBalancerRequest) (*LoadBalancer, *Response, error) { - req, err := l.client.NewRequest(ctx, http.MethodPost, loadBalancersBasePath, lbr) - if err != nil { - return nil, nil, err - } - - root := new(loadBalancerRoot) - resp, err := l.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.LoadBalancer, resp, err -} - -// Update an existing load balancer with new configuration. -func (l *LoadBalancersServiceOp) Update(ctx context.Context, lbID string, lbr *LoadBalancerRequest) (*LoadBalancer, *Response, error) { - path := fmt.Sprintf("%s/%s", loadBalancersBasePath, lbID) - - req, err := l.client.NewRequest(ctx, "PUT", path, lbr) - if err != nil { - return nil, nil, err - } - - root := new(loadBalancerRoot) - resp, err := l.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.LoadBalancer, resp, err -} - -// Delete a load balancer by its identifier. -func (l *LoadBalancersServiceOp) Delete(ctx context.Context, ldID string) (*Response, error) { - path := fmt.Sprintf("%s/%s", loadBalancersBasePath, ldID) - - req, err := l.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - - return l.client.Do(ctx, req, nil) -} - -// AddDroplets adds droplets to a load balancer. -func (l *LoadBalancersServiceOp) AddDroplets(ctx context.Context, lbID string, dropletIDs ...int) (*Response, error) { - path := fmt.Sprintf("%s/%s/%s", loadBalancersBasePath, lbID, dropletsPath) - - req, err := l.client.NewRequest(ctx, http.MethodPost, path, &dropletIDsRequest{IDs: dropletIDs}) - if err != nil { - return nil, err - } - - return l.client.Do(ctx, req, nil) -} - -// RemoveDroplets removes droplets from a load balancer. -func (l *LoadBalancersServiceOp) RemoveDroplets(ctx context.Context, lbID string, dropletIDs ...int) (*Response, error) { - path := fmt.Sprintf("%s/%s/%s", loadBalancersBasePath, lbID, dropletsPath) - - req, err := l.client.NewRequest(ctx, http.MethodDelete, path, &dropletIDsRequest{IDs: dropletIDs}) - if err != nil { - return nil, err - } - - return l.client.Do(ctx, req, nil) -} - -// AddForwardingRules adds forwarding rules to a load balancer. -func (l *LoadBalancersServiceOp) AddForwardingRules(ctx context.Context, lbID string, rules ...ForwardingRule) (*Response, error) { - path := fmt.Sprintf("%s/%s/%s", loadBalancersBasePath, lbID, forwardingRulesPath) - - req, err := l.client.NewRequest(ctx, http.MethodPost, path, &forwardingRulesRequest{Rules: rules}) - if err != nil { - return nil, err - } - - return l.client.Do(ctx, req, nil) -} - -// RemoveForwardingRules removes forwarding rules from a load balancer. -func (l *LoadBalancersServiceOp) RemoveForwardingRules(ctx context.Context, lbID string, rules ...ForwardingRule) (*Response, error) { - path := fmt.Sprintf("%s/%s/%s", loadBalancersBasePath, lbID, forwardingRulesPath) - - req, err := l.client.NewRequest(ctx, http.MethodDelete, path, &forwardingRulesRequest{Rules: rules}) - if err != nil { - return nil, err - } - - return l.client.Do(ctx, req, nil) -} - -// PurgeCache purges the CDN cache of a global load balancer by its identifier. -func (l *LoadBalancersServiceOp) PurgeCache(ctx context.Context, ldID string) (*Response, error) { - path := fmt.Sprintf("%s/%s/%s", loadBalancersBasePath, ldID, cachePath) - - req, err := l.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - - return l.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/digitalocean/godo/meta.go b/vendor/github.com/digitalocean/godo/meta.go deleted file mode 100644 index d0b7017..0000000 --- a/vendor/github.com/digitalocean/godo/meta.go +++ /dev/null @@ -1,6 +0,0 @@ -package godo - -// Meta describes generic information about a response. -type Meta struct { - Total int `json:"total"` -} diff --git a/vendor/github.com/digitalocean/godo/metrics/metrics.go b/vendor/github.com/digitalocean/godo/metrics/metrics.go deleted file mode 100644 index cc2ac6a..0000000 --- a/vendor/github.com/digitalocean/godo/metrics/metrics.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package metrics is a minimal copy of github.com/prometheus/common/model -// providing types to work with the Prometheus-style results in a DigitalOcean -// Monitoring metrics response. -package metrics - -import ( - "fmt" - "sort" - "strings" -) - -const ( - // MetricNameLabel is the label name indicating the metric name of a - // timeseries. - MetricNameLabel = "__name__" -) - -// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet -// may be fully-qualified down to the point where it may resolve to a single -// Metric in the data store or not. All operations that occur within the realm -// of a LabelSet can emit a vector of Metric entities to which the LabelSet may -// match. -type LabelSet map[LabelName]LabelValue - -func (l LabelSet) String() string { - lstrs := make([]string, 0, len(l)) - for l, v := range l { - lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) - } - - sort.Strings(lstrs) - return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) -} - -// A LabelValue is an associated value for a MetricLabelName. -type LabelValue string - -// A LabelName is a key for a Metric. -type LabelName string - -// A Metric is similar to a LabelSet, but the key difference is that a Metric is -// a singleton and refers to one and only one stream of samples. -type Metric LabelSet - -func (m Metric) String() string { - metricName, hasName := m[MetricNameLabel] - numLabels := len(m) - 1 - if !hasName { - numLabels = len(m) - } - labelStrings := make([]string, 0, numLabels) - for label, value := range m { - if label != MetricNameLabel { - labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) - } - } - - switch numLabels { - case 0: - if hasName { - return string(metricName) - } - return "{}" - default: - sort.Strings(labelStrings) - return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) - } -} diff --git a/vendor/github.com/digitalocean/godo/metrics/time.go b/vendor/github.com/digitalocean/godo/metrics/time.go deleted file mode 100644 index 2d50795..0000000 --- a/vendor/github.com/digitalocean/godo/metrics/time.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metrics - -import ( - "fmt" - "math" - "strconv" - "strings" - "time" -) - -const ( - // MinimumTick is the minimum supported time resolution. This has to be - // at least time.Second in order for the code below to work. - minimumTick = time.Millisecond - // second is the Time duration equivalent to one second. - second = int64(time.Second / minimumTick) - // The number of nanoseconds per minimum tick. - nanosPerTick = int64(minimumTick / time.Nanosecond) - - // Earliest is the earliest Time representable. Handy for - // initializing a high watermark. - Earliest = Time(math.MinInt64) - // Latest is the latest Time representable. Handy for initializing - // a low watermark. - Latest = Time(math.MaxInt64) -) - -// Time is the number of milliseconds since the epoch -// (1970-01-01 00:00 UTC) excluding leap seconds. -type Time int64 - -// Interval describes an interval between two timestamps. -type Interval struct { - Start, End Time -} - -// Now returns the current time as a Time. -func Now() Time { - return TimeFromUnixNano(time.Now().UnixNano()) -} - -// TimeFromUnix returns the Time equivalent to the Unix Time t -// provided in seconds. -func TimeFromUnix(t int64) Time { - return Time(t * second) -} - -// TimeFromUnixNano returns the Time equivalent to the Unix Time -// t provided in nanoseconds. -func TimeFromUnixNano(t int64) Time { - return Time(t / nanosPerTick) -} - -// Equal reports whether two Times represent the same instant. -func (t Time) Equal(o Time) bool { - return t == o -} - -// Before reports whether the Time t is before o. -func (t Time) Before(o Time) bool { - return t < o -} - -// After reports whether the Time t is after o. -func (t Time) After(o Time) bool { - return t > o -} - -// Add returns the Time t + d. -func (t Time) Add(d time.Duration) Time { - return t + Time(d/minimumTick) -} - -// Sub returns the Duration t - o. -func (t Time) Sub(o Time) time.Duration { - return time.Duration(t-o) * minimumTick -} - -// Time returns the time.Time representation of t. -func (t Time) Time() time.Time { - return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) -} - -// Unix returns t as a Unix time, the number of seconds elapsed -// since January 1, 1970 UTC. -func (t Time) Unix() int64 { - return int64(t) / second -} - -// UnixNano returns t as a Unix time, the number of nanoseconds elapsed -// since January 1, 1970 UTC. -func (t Time) UnixNano() int64 { - return int64(t) * nanosPerTick -} - -// The number of digits after the dot. -var dotPrecision = int(math.Log10(float64(second))) - -// String returns a string representation of the Time. -func (t Time) String() string { - return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) -} - -// MarshalJSON implements the json.Marshaler interface. -func (t Time) MarshalJSON() ([]byte, error) { - return []byte(t.String()), nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (t *Time) UnmarshalJSON(b []byte) error { - p := strings.Split(string(b), ".") - switch len(p) { - case 1: - v, err := strconv.ParseInt(string(p[0]), 10, 64) - if err != nil { - return err - } - *t = Time(v * second) - - case 2: - v, err := strconv.ParseInt(string(p[0]), 10, 64) - if err != nil { - return err - } - v *= second - - prec := dotPrecision - len(p[1]) - if prec < 0 { - p[1] = p[1][:dotPrecision] - } else if prec > 0 { - p[1] = p[1] + strings.Repeat("0", prec) - } - - va, err := strconv.ParseInt(p[1], 10, 32) - if err != nil { - return err - } - - // If the value was something like -0.1 the negative is lost in the - // parsing because of the leading zero, this ensures that we capture it. - if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 { - *t = Time(v+va) * -1 - } else { - *t = Time(v + va) - } - - default: - return fmt.Errorf("invalid time %q", string(b)) - } - return nil -} diff --git a/vendor/github.com/digitalocean/godo/metrics/values.go b/vendor/github.com/digitalocean/godo/metrics/values.go deleted file mode 100644 index ae39ef2..0000000 --- a/vendor/github.com/digitalocean/godo/metrics/values.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metrics - -import ( - "encoding/json" - "fmt" - "math" - "strconv" - "strings" -) - -// A SampleValue is a representation of a value for a given sample at a given time. -type SampleValue float64 - -// UnmarshalJSON implements json.Unmarshaler. -func (v *SampleValue) UnmarshalJSON(b []byte) error { - if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("sample value must be a quoted string") - } - f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) - if err != nil { - return err - } - *v = SampleValue(f) - return nil -} - -// MarshalJSON implements json.Marshaler. -func (v SampleValue) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -func (v SampleValue) String() string { - return strconv.FormatFloat(float64(v), 'f', -1, 64) -} - -// Equal returns true if the value of v and o is equal or if both are NaN. Note -// that v==o is false if both are NaN. If you want the conventional float -// behavior, use == to compare two SampleValues. -func (v SampleValue) Equal(o SampleValue) bool { - if v == o { - return true - } - return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) -} - -// SamplePair pairs a SampleValue with a Timestamp. -type SamplePair struct { - Timestamp Time - Value SampleValue -} - -func (s SamplePair) String() string { - return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *SamplePair) UnmarshalJSON(b []byte) error { - v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} - return json.Unmarshal(b, &v) -} - -// MarshalJSON implements json.Marshaler. -func (s SamplePair) MarshalJSON() ([]byte, error) { - t, err := json.Marshal(s.Timestamp) - if err != nil { - return nil, err - } - v, err := json.Marshal(s.Value) - if err != nil { - return nil, err - } - return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil -} - -// SampleStream is a stream of Values belonging to an attached COWMetric. -type SampleStream struct { - Metric Metric `json:"metric"` - Values []SamplePair `json:"values"` -} - -func (ss SampleStream) String() string { - vals := make([]string, len(ss.Values)) - for i, v := range ss.Values { - vals[i] = v.String() - } - return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) -} diff --git a/vendor/github.com/digitalocean/godo/monitoring.go b/vendor/github.com/digitalocean/godo/monitoring.go deleted file mode 100644 index 937bb8d..0000000 --- a/vendor/github.com/digitalocean/godo/monitoring.go +++ /dev/null @@ -1,374 +0,0 @@ -package godo - -import ( - "context" - "fmt" - "net/http" - "time" - - "github.com/digitalocean/godo/metrics" -) - -const ( - monitoringBasePath = "v2/monitoring" - alertPolicyBasePath = monitoringBasePath + "/alerts" - dropletMetricsBasePath = monitoringBasePath + "/metrics/droplet" - - DropletCPUUtilizationPercent = "v1/insights/droplet/cpu" - DropletMemoryUtilizationPercent = "v1/insights/droplet/memory_utilization_percent" - DropletDiskUtilizationPercent = "v1/insights/droplet/disk_utilization_percent" - DropletPublicOutboundBandwidthRate = "v1/insights/droplet/public_outbound_bandwidth" - DropletPublicInboundBandwidthRate = "v1/insights/droplet/public_inbound_bandwidth" - DropletPrivateOutboundBandwidthRate = "v1/insights/droplet/private_outbound_bandwidth" - DropletPrivateInboundBandwidthRate = "v1/insights/droplet/private_inbound_bandwidth" - DropletDiskReadRate = "v1/insights/droplet/disk_read" - DropletDiskWriteRate = "v1/insights/droplet/disk_write" - DropletOneMinuteLoadAverage = "v1/insights/droplet/load_1" - DropletFiveMinuteLoadAverage = "v1/insights/droplet/load_5" - DropletFifteenMinuteLoadAverage = "v1/insights/droplet/load_15" - - LoadBalancerCPUUtilizationPercent = "v1/insights/lbaas/avg_cpu_utilization_percent" - LoadBalancerConnectionUtilizationPercent = "v1/insights/lbaas/connection_utilization_percent" - LoadBalancerDropletHealth = "v1/insights/lbaas/droplet_health" - LoadBalancerTLSUtilizationPercent = "v1/insights/lbaas/tls_connections_per_second_utilization_percent" - LoadBalancerIncreaseInHTTPErrorRatePercentage5xx = "v1/insights/lbaas/increase_in_http_error_rate_percentage_5xx" - LoadBalancerIncreaseInHTTPErrorRatePercentage4xx = "v1/insights/lbaas/increase_in_http_error_rate_percentage_4xx" - LoadBalancerIncreaseInHTTPErrorRateCount5xx = "v1/insights/lbaas/increase_in_http_error_rate_count_5xx" - LoadBalancerIncreaseInHTTPErrorRateCount4xx = "v1/insights/lbaas/increase_in_http_error_rate_count_4xx" - LoadBalancerHighHttpResponseTime = "v1/insights/lbaas/high_http_request_response_time" - LoadBalancerHighHttpResponseTime50P = "v1/insights/lbaas/high_http_request_response_time_50p" - LoadBalancerHighHttpResponseTime95P = "v1/insights/lbaas/high_http_request_response_time_95p" - LoadBalancerHighHttpResponseTime99P = "v1/insights/lbaas/high_http_request_response_time_99p" - - DbaasFifteenMinuteLoadAverage = "v1/dbaas/alerts/load_15_alerts" - DbaasMemoryUtilizationPercent = "v1/dbaas/alerts/memory_utilization_alerts" - DbaasDiskUtilizationPercent = "v1/dbaas/alerts/disk_utilization_alerts" - DbaasCPUUtilizationPercent = "v1/dbaas/alerts/cpu_alerts" -) - -// MonitoringService is an interface for interfacing with the -// monitoring endpoints of the DigitalOcean API -// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Monitoring -type MonitoringService interface { - ListAlertPolicies(context.Context, *ListOptions) ([]AlertPolicy, *Response, error) - GetAlertPolicy(context.Context, string) (*AlertPolicy, *Response, error) - CreateAlertPolicy(context.Context, *AlertPolicyCreateRequest) (*AlertPolicy, *Response, error) - UpdateAlertPolicy(context.Context, string, *AlertPolicyUpdateRequest) (*AlertPolicy, *Response, error) - DeleteAlertPolicy(context.Context, string) (*Response, error) - - GetDropletBandwidth(context.Context, *DropletBandwidthMetricsRequest) (*MetricsResponse, *Response, error) - GetDropletAvailableMemory(context.Context, *DropletMetricsRequest) (*MetricsResponse, *Response, error) - GetDropletCPU(context.Context, *DropletMetricsRequest) (*MetricsResponse, *Response, error) - GetDropletFilesystemFree(context.Context, *DropletMetricsRequest) (*MetricsResponse, *Response, error) - GetDropletFilesystemSize(context.Context, *DropletMetricsRequest) (*MetricsResponse, *Response, error) - GetDropletLoad1(context.Context, *DropletMetricsRequest) (*MetricsResponse, *Response, error) - GetDropletLoad5(context.Context, *DropletMetricsRequest) (*MetricsResponse, *Response, error) - GetDropletLoad15(context.Context, *DropletMetricsRequest) (*MetricsResponse, *Response, error) - GetDropletCachedMemory(context.Context, *DropletMetricsRequest) (*MetricsResponse, *Response, error) - GetDropletFreeMemory(context.Context, *DropletMetricsRequest) (*MetricsResponse, *Response, error) - GetDropletTotalMemory(context.Context, *DropletMetricsRequest) (*MetricsResponse, *Response, error) -} - -// MonitoringServiceOp handles communication with monitoring related methods of the -// DigitalOcean API. -type MonitoringServiceOp struct { - client *Client -} - -var _ MonitoringService = &MonitoringServiceOp{} - -// AlertPolicy represents a DigitalOcean alert policy -type AlertPolicy struct { - UUID string `json:"uuid"` - Type string `json:"type"` - Description string `json:"description"` - Compare AlertPolicyComp `json:"compare"` - Value float32 `json:"value"` - Window string `json:"window"` - Entities []string `json:"entities"` - Tags []string `json:"tags"` - Alerts Alerts `json:"alerts"` - Enabled bool `json:"enabled"` -} - -// Alerts represents the alerts section of an alert policy -type Alerts struct { - Slack []SlackDetails `json:"slack"` - Email []string `json:"email"` -} - -// SlackDetails represents the details required to send a slack alert -type SlackDetails struct { - URL string `json:"url"` - Channel string `json:"channel"` -} - -// AlertPolicyComp represents an alert policy comparison operation -type AlertPolicyComp string - -const ( - // GreaterThan is the comparison > - GreaterThan AlertPolicyComp = "GreaterThan" - // LessThan is the comparison < - LessThan AlertPolicyComp = "LessThan" -) - -// AlertPolicyCreateRequest holds the info for creating a new alert policy -type AlertPolicyCreateRequest struct { - Type string `json:"type"` - Description string `json:"description"` - Compare AlertPolicyComp `json:"compare"` - Value float32 `json:"value"` - Window string `json:"window"` - Entities []string `json:"entities"` - Tags []string `json:"tags"` - Alerts Alerts `json:"alerts"` - Enabled *bool `json:"enabled"` -} - -// AlertPolicyUpdateRequest holds the info for updating an existing alert policy -type AlertPolicyUpdateRequest struct { - Type string `json:"type"` - Description string `json:"description"` - Compare AlertPolicyComp `json:"compare"` - Value float32 `json:"value"` - Window string `json:"window"` - Entities []string `json:"entities"` - Tags []string `json:"tags"` - Alerts Alerts `json:"alerts"` - Enabled *bool `json:"enabled"` -} - -type alertPoliciesRoot struct { - AlertPolicies []AlertPolicy `json:"policies"` - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -type alertPolicyRoot struct { - AlertPolicy *AlertPolicy `json:"policy,omitempty"` -} - -// DropletMetricsRequest holds the information needed to retrieve Droplet various metrics. -type DropletMetricsRequest struct { - HostID string - Start time.Time - End time.Time -} - -// DropletBandwidthMetricsRequest holds the information needed to retrieve Droplet bandwidth metrics. -type DropletBandwidthMetricsRequest struct { - DropletMetricsRequest - Interface string - Direction string -} - -// MetricsResponse holds a Metrics query response. -type MetricsResponse struct { - Status string `json:"status"` - Data MetricsData `json:"data"` -} - -// MetricsData holds the data portion of a Metrics response. -type MetricsData struct { - ResultType string `json:"resultType"` - Result []metrics.SampleStream `json:"result"` -} - -// ListAlertPolicies all alert policies -func (s *MonitoringServiceOp) ListAlertPolicies(ctx context.Context, opt *ListOptions) ([]AlertPolicy, *Response, error) { - path := alertPolicyBasePath - path, err := addOptions(path, opt) - - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(alertPoliciesRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - return root.AlertPolicies, resp, err -} - -// GetAlertPolicy gets a single alert policy -func (s *MonitoringServiceOp) GetAlertPolicy(ctx context.Context, uuid string) (*AlertPolicy, *Response, error) { - path := fmt.Sprintf("%s/%s", alertPolicyBasePath, uuid) - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(alertPolicyRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.AlertPolicy, resp, err -} - -// CreateAlertPolicy creates a new alert policy -func (s *MonitoringServiceOp) CreateAlertPolicy(ctx context.Context, createRequest *AlertPolicyCreateRequest) (*AlertPolicy, *Response, error) { - if createRequest == nil { - return nil, nil, NewArgError("createRequest", "cannot be nil") - } - - req, err := s.client.NewRequest(ctx, http.MethodPost, alertPolicyBasePath, createRequest) - if err != nil { - return nil, nil, err - } - - root := new(alertPolicyRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.AlertPolicy, resp, err -} - -// UpdateAlertPolicy updates an existing alert policy -func (s *MonitoringServiceOp) UpdateAlertPolicy(ctx context.Context, uuid string, updateRequest *AlertPolicyUpdateRequest) (*AlertPolicy, *Response, error) { - if uuid == "" { - return nil, nil, NewArgError("uuid", "cannot be empty") - } - if updateRequest == nil { - return nil, nil, NewArgError("updateRequest", "cannot be nil") - } - - path := fmt.Sprintf("%s/%s", alertPolicyBasePath, uuid) - req, err := s.client.NewRequest(ctx, http.MethodPut, path, updateRequest) - if err != nil { - return nil, nil, err - } - - root := new(alertPolicyRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.AlertPolicy, resp, err -} - -// DeleteAlertPolicy deletes an existing alert policy -func (s *MonitoringServiceOp) DeleteAlertPolicy(ctx context.Context, uuid string) (*Response, error) { - if uuid == "" { - return nil, NewArgError("uuid", "cannot be empty") - } - - path := fmt.Sprintf("%s/%s", alertPolicyBasePath, uuid) - req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - - return resp, err -} - -// GetDropletBandwidth retrieves Droplet bandwidth metrics. -func (s *MonitoringServiceOp) GetDropletBandwidth(ctx context.Context, args *DropletBandwidthMetricsRequest) (*MetricsResponse, *Response, error) { - path := dropletMetricsBasePath + "/bandwidth" - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - q := req.URL.Query() - q.Add("host_id", args.HostID) - q.Add("interface", args.Interface) - q.Add("direction", args.Direction) - q.Add("start", fmt.Sprintf("%d", args.Start.Unix())) - q.Add("end", fmt.Sprintf("%d", args.End.Unix())) - req.URL.RawQuery = q.Encode() - - root := new(MetricsResponse) - resp, err := s.client.Do(ctx, req, root) - - return root, resp, err -} - -// GetDropletCPU retrieves Droplet CPU metrics. -func (s *MonitoringServiceOp) GetDropletCPU(ctx context.Context, args *DropletMetricsRequest) (*MetricsResponse, *Response, error) { - return s.getDropletMetrics(ctx, "/cpu", args) -} - -// GetDropletFilesystemFree retrieves Droplet filesystem free metrics. -func (s *MonitoringServiceOp) GetDropletFilesystemFree(ctx context.Context, args *DropletMetricsRequest) (*MetricsResponse, *Response, error) { - return s.getDropletMetrics(ctx, "/filesystem_free", args) -} - -// GetDropletFilesystemSize retrieves Droplet filesystem size metrics. -func (s *MonitoringServiceOp) GetDropletFilesystemSize(ctx context.Context, args *DropletMetricsRequest) (*MetricsResponse, *Response, error) { - return s.getDropletMetrics(ctx, "/filesystem_size", args) -} - -// GetDropletLoad1 retrieves Droplet load 1 metrics. -func (s *MonitoringServiceOp) GetDropletLoad1(ctx context.Context, args *DropletMetricsRequest) (*MetricsResponse, *Response, error) { - return s.getDropletMetrics(ctx, "/load_1", args) -} - -// GetDropletLoad5 retrieves Droplet load 5 metrics. -func (s *MonitoringServiceOp) GetDropletLoad5(ctx context.Context, args *DropletMetricsRequest) (*MetricsResponse, *Response, error) { - return s.getDropletMetrics(ctx, "/load_5", args) -} - -// GetDropletLoad15 retrieves Droplet load 15 metrics. -func (s *MonitoringServiceOp) GetDropletLoad15(ctx context.Context, args *DropletMetricsRequest) (*MetricsResponse, *Response, error) { - return s.getDropletMetrics(ctx, "/load_15", args) -} - -// GetDropletCachedMemory retrieves Droplet cached memory metrics. -func (s *MonitoringServiceOp) GetDropletCachedMemory(ctx context.Context, args *DropletMetricsRequest) (*MetricsResponse, *Response, error) { - return s.getDropletMetrics(ctx, "/memory_cached", args) -} - -// GetDropletFreeMemory retrieves Droplet free memory metrics. -func (s *MonitoringServiceOp) GetDropletFreeMemory(ctx context.Context, args *DropletMetricsRequest) (*MetricsResponse, *Response, error) { - return s.getDropletMetrics(ctx, "/memory_free", args) -} - -// GetDropletTotalMemory retrieves Droplet total memory metrics. -func (s *MonitoringServiceOp) GetDropletTotalMemory(ctx context.Context, args *DropletMetricsRequest) (*MetricsResponse, *Response, error) { - return s.getDropletMetrics(ctx, "/memory_total", args) -} - -// GetDropletAvailableMemory retrieves Droplet available memory metrics. -func (s *MonitoringServiceOp) GetDropletAvailableMemory(ctx context.Context, args *DropletMetricsRequest) (*MetricsResponse, *Response, error) { - return s.getDropletMetrics(ctx, "/memory_available", args) -} - -func (s *MonitoringServiceOp) getDropletMetrics(ctx context.Context, path string, args *DropletMetricsRequest) (*MetricsResponse, *Response, error) { - fullPath := dropletMetricsBasePath + path - req, err := s.client.NewRequest(ctx, http.MethodGet, fullPath, nil) - if err != nil { - return nil, nil, err - } - - q := req.URL.Query() - q.Add("host_id", args.HostID) - q.Add("start", fmt.Sprintf("%d", args.Start.Unix())) - q.Add("end", fmt.Sprintf("%d", args.End.Unix())) - req.URL.RawQuery = q.Encode() - - root := new(MetricsResponse) - resp, err := s.client.Do(ctx, req, root) - - return root, resp, err -} diff --git a/vendor/github.com/digitalocean/godo/projects.go b/vendor/github.com/digitalocean/godo/projects.go deleted file mode 100644 index b59134b..0000000 --- a/vendor/github.com/digitalocean/godo/projects.go +++ /dev/null @@ -1,309 +0,0 @@ -package godo - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "path" -) - -const ( - // DefaultProject is the ID you should use if you are working with your - // default project. - DefaultProject = "default" - - projectsBasePath = "/v2/projects" -) - -// ProjectsService is an interface for creating and managing Projects with the DigitalOcean API. -// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Projects -type ProjectsService interface { - List(context.Context, *ListOptions) ([]Project, *Response, error) - GetDefault(context.Context) (*Project, *Response, error) - Get(context.Context, string) (*Project, *Response, error) - Create(context.Context, *CreateProjectRequest) (*Project, *Response, error) - Update(context.Context, string, *UpdateProjectRequest) (*Project, *Response, error) - Delete(context.Context, string) (*Response, error) - - ListResources(context.Context, string, *ListOptions) ([]ProjectResource, *Response, error) - AssignResources(context.Context, string, ...interface{}) ([]ProjectResource, *Response, error) -} - -// ProjectsServiceOp handles communication with Projects methods of the DigitalOcean API. -type ProjectsServiceOp struct { - client *Client -} - -// Project represents a DigitalOcean Project configuration. -type Project struct { - ID string `json:"id"` - OwnerUUID string `json:"owner_uuid"` - OwnerID uint64 `json:"owner_id"` - Name string `json:"name"` - Description string `json:"description"` - Purpose string `json:"purpose"` - Environment string `json:"environment"` - IsDefault bool `json:"is_default"` - CreatedAt string `json:"created_at"` - UpdatedAt string `json:"updated_at"` -} - -// String creates a human-readable description of a Project. -func (p Project) String() string { - return Stringify(p) -} - -// CreateProjectRequest represents the request to create a new project. -type CreateProjectRequest struct { - Name string `json:"name"` - Description string `json:"description"` - Purpose string `json:"purpose"` - Environment string `json:"environment"` -} - -// UpdateProjectRequest represents the request to update project information. -// This type expects certain attribute types, but is built this way to allow -// nil values as well. See `updateProjectRequest` for the "real" types. -type UpdateProjectRequest struct { - Name interface{} - Description interface{} - Purpose interface{} - Environment interface{} - IsDefault interface{} -} - -type updateProjectRequest struct { - Name *string `json:"name"` - Description *string `json:"description"` - Purpose *string `json:"purpose"` - Environment *string `json:"environment"` - IsDefault *bool `json:"is_default"` -} - -// MarshalJSON takes an UpdateRequest and converts it to the "typed" request -// which is sent to the projects API. This is a PATCH request, which allows -// partial attributes, so `null` values are OK. -func (upr *UpdateProjectRequest) MarshalJSON() ([]byte, error) { - d := &updateProjectRequest{} - if str, ok := upr.Name.(string); ok { - d.Name = &str - } - if str, ok := upr.Description.(string); ok { - d.Description = &str - } - if str, ok := upr.Purpose.(string); ok { - d.Purpose = &str - } - if str, ok := upr.Environment.(string); ok { - d.Environment = &str - } - if val, ok := upr.IsDefault.(bool); ok { - d.IsDefault = &val - } - - return json.Marshal(d) -} - -type assignResourcesRequest struct { - Resources []string `json:"resources"` -} - -// ProjectResource is the projects API's representation of a resource. -type ProjectResource struct { - URN string `json:"urn"` - AssignedAt string `json:"assigned_at"` - Links *ProjectResourceLinks `json:"links"` - Status string `json:"status,omitempty"` -} - -// ProjectResourceLinks specify the link for more information about the resource. -type ProjectResourceLinks struct { - Self string `json:"self"` -} - -type projectsRoot struct { - Projects []Project `json:"projects"` - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -type projectRoot struct { - Project *Project `json:"project"` -} - -type projectResourcesRoot struct { - Resources []ProjectResource `json:"resources"` - Links *Links `json:"links,omitempty"` - Meta *Meta `json:"meta"` -} - -var _ ProjectsService = &ProjectsServiceOp{} - -// List Projects. -func (p *ProjectsServiceOp) List(ctx context.Context, opts *ListOptions) ([]Project, *Response, error) { - path, err := addOptions(projectsBasePath, opts) - if err != nil { - return nil, nil, err - } - - req, err := p.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(projectsRoot) - resp, err := p.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.Projects, resp, err -} - -// GetDefault project. -func (p *ProjectsServiceOp) GetDefault(ctx context.Context) (*Project, *Response, error) { - return p.getHelper(ctx, "default") -} - -// Get retrieves a single project by its ID. -func (p *ProjectsServiceOp) Get(ctx context.Context, projectID string) (*Project, *Response, error) { - return p.getHelper(ctx, projectID) -} - -// Create a new project. -func (p *ProjectsServiceOp) Create(ctx context.Context, cr *CreateProjectRequest) (*Project, *Response, error) { - req, err := p.client.NewRequest(ctx, http.MethodPost, projectsBasePath, cr) - if err != nil { - return nil, nil, err - } - - root := new(projectRoot) - resp, err := p.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Project, resp, err -} - -// Update an existing project. -func (p *ProjectsServiceOp) Update(ctx context.Context, projectID string, ur *UpdateProjectRequest) (*Project, *Response, error) { - path := path.Join(projectsBasePath, projectID) - req, err := p.client.NewRequest(ctx, http.MethodPatch, path, ur) - if err != nil { - return nil, nil, err - } - - root := new(projectRoot) - resp, err := p.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Project, resp, err -} - -// Delete an existing project. You cannot have any resources in a project -// before deleting it. See the API documentation for more details. -func (p *ProjectsServiceOp) Delete(ctx context.Context, projectID string) (*Response, error) { - path := path.Join(projectsBasePath, projectID) - req, err := p.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - - return p.client.Do(ctx, req, nil) -} - -// ListResources lists all resources in a project. -func (p *ProjectsServiceOp) ListResources(ctx context.Context, projectID string, opts *ListOptions) ([]ProjectResource, *Response, error) { - basePath := path.Join(projectsBasePath, projectID, "resources") - path, err := addOptions(basePath, opts) - if err != nil { - return nil, nil, err - } - - req, err := p.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(projectResourcesRoot) - resp, err := p.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.Resources, resp, err -} - -// AssignResources assigns one or more resources to a project. AssignResources -// accepts resources in two possible formats: -// 1. The resource type, like `&Droplet{ID: 1}` or `&FloatingIP{IP: "1.2.3.4"}` -// 2. A valid DO URN as a string, like "do:droplet:1234" -// -// There is no unassign. To move a resource to another project, just assign -// it to that other project. -func (p *ProjectsServiceOp) AssignResources(ctx context.Context, projectID string, resources ...interface{}) ([]ProjectResource, *Response, error) { - path := path.Join(projectsBasePath, projectID, "resources") - - ar := &assignResourcesRequest{ - Resources: make([]string, len(resources)), - } - - for i, resource := range resources { - switch resource := resource.(type) { - case ResourceWithURN: - ar.Resources[i] = resource.URN() - case string: - ar.Resources[i] = resource - default: - return nil, nil, fmt.Errorf("%T must either be a string or have a valid URN method", resource) - } - } - req, err := p.client.NewRequest(ctx, http.MethodPost, path, ar) - if err != nil { - return nil, nil, err - } - - root := new(projectResourcesRoot) - resp, err := p.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - - return root.Resources, resp, err -} - -func (p *ProjectsServiceOp) getHelper(ctx context.Context, projectID string) (*Project, *Response, error) { - path := path.Join(projectsBasePath, projectID) - - req, err := p.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(projectRoot) - resp, err := p.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Project, resp, err -} diff --git a/vendor/github.com/digitalocean/godo/regions.go b/vendor/github.com/digitalocean/godo/regions.go deleted file mode 100644 index ea82f2f..0000000 --- a/vendor/github.com/digitalocean/godo/regions.go +++ /dev/null @@ -1,68 +0,0 @@ -package godo - -import ( - "context" - "net/http" -) - -// RegionsService is an interface for interfacing with the regions -// endpoints of the DigitalOcean API -// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Regions -type RegionsService interface { - List(context.Context, *ListOptions) ([]Region, *Response, error) -} - -// RegionsServiceOp handles communication with the region related methods of the -// DigitalOcean API. -type RegionsServiceOp struct { - client *Client -} - -var _ RegionsService = &RegionsServiceOp{} - -// Region represents a DigitalOcean Region -type Region struct { - Slug string `json:"slug,omitempty"` - Name string `json:"name,omitempty"` - Sizes []string `json:"sizes,omitempty"` - Available bool `json:"available,omitempty"` - Features []string `json:"features,omitempty"` -} - -type regionsRoot struct { - Regions []Region - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -func (r Region) String() string { - return Stringify(r) -} - -// List all regions -func (s *RegionsServiceOp) List(ctx context.Context, opt *ListOptions) ([]Region, *Response, error) { - path := "v2/regions" - path, err := addOptions(path, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(regionsRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.Regions, resp, err -} diff --git a/vendor/github.com/digitalocean/godo/registry.go b/vendor/github.com/digitalocean/godo/registry.go deleted file mode 100644 index b0c2432..0000000 --- a/vendor/github.com/digitalocean/godo/registry.go +++ /dev/null @@ -1,612 +0,0 @@ -package godo - -import ( - "bytes" - "context" - "fmt" - "net/http" - "net/url" - "strconv" - "time" -) - -const ( - registryPath = "/v2/registry" - // RegistryServer is the hostname of the DigitalOcean registry service - RegistryServer = "registry.digitalocean.com" -) - -// RegistryService is an interface for interfacing with the Registry endpoints -// of the DigitalOcean API. -// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Container-Registry -type RegistryService interface { - Create(context.Context, *RegistryCreateRequest) (*Registry, *Response, error) - Get(context.Context) (*Registry, *Response, error) - Delete(context.Context) (*Response, error) - DockerCredentials(context.Context, *RegistryDockerCredentialsRequest) (*DockerCredentials, *Response, error) - ListRepositories(context.Context, string, *ListOptions) ([]*Repository, *Response, error) - ListRepositoriesV2(context.Context, string, *TokenListOptions) ([]*RepositoryV2, *Response, error) - ListRepositoryTags(context.Context, string, string, *ListOptions) ([]*RepositoryTag, *Response, error) - DeleteTag(context.Context, string, string, string) (*Response, error) - ListRepositoryManifests(context.Context, string, string, *ListOptions) ([]*RepositoryManifest, *Response, error) - DeleteManifest(context.Context, string, string, string) (*Response, error) - StartGarbageCollection(context.Context, string, ...*StartGarbageCollectionRequest) (*GarbageCollection, *Response, error) - GetGarbageCollection(context.Context, string) (*GarbageCollection, *Response, error) - ListGarbageCollections(context.Context, string, *ListOptions) ([]*GarbageCollection, *Response, error) - UpdateGarbageCollection(context.Context, string, string, *UpdateGarbageCollectionRequest) (*GarbageCollection, *Response, error) - GetOptions(context.Context) (*RegistryOptions, *Response, error) - GetSubscription(context.Context) (*RegistrySubscription, *Response, error) - UpdateSubscription(context.Context, *RegistrySubscriptionUpdateRequest) (*RegistrySubscription, *Response, error) - ValidateName(context.Context, *RegistryValidateNameRequest) (*Response, error) -} - -var _ RegistryService = &RegistryServiceOp{} - -// RegistryServiceOp handles communication with Registry methods of the DigitalOcean API. -type RegistryServiceOp struct { - client *Client -} - -// RegistryCreateRequest represents a request to create a registry. -type RegistryCreateRequest struct { - Name string `json:"name,omitempty"` - SubscriptionTierSlug string `json:"subscription_tier_slug,omitempty"` - Region string `json:"region,omitempty"` -} - -// RegistryDockerCredentialsRequest represents a request to retrieve docker -// credentials for a registry. -type RegistryDockerCredentialsRequest struct { - ReadWrite bool `json:"read_write"` - ExpirySeconds *int `json:"expiry_seconds,omitempty"` -} - -// Registry represents a registry. -type Registry struct { - Name string `json:"name,omitempty"` - StorageUsageBytes uint64 `json:"storage_usage_bytes,omitempty"` - StorageUsageBytesUpdatedAt time.Time `json:"storage_usage_bytes_updated_at,omitempty"` - CreatedAt time.Time `json:"created_at,omitempty"` - Region string `json:"region,omitempty"` -} - -// Repository represents a repository -type Repository struct { - RegistryName string `json:"registry_name,omitempty"` - Name string `json:"name,omitempty"` - LatestTag *RepositoryTag `json:"latest_tag,omitempty"` - TagCount uint64 `json:"tag_count,omitempty"` -} - -// RepositoryV2 represents a repository in the V2 format -type RepositoryV2 struct { - RegistryName string `json:"registry_name,omitempty"` - Name string `json:"name,omitempty"` - TagCount uint64 `json:"tag_count,omitempty"` - ManifestCount uint64 `json:"manifest_count,omitempty"` - LatestManifest *RepositoryManifest `json:"latest_manifest,omitempty"` -} - -// RepositoryTag represents a repository tag -type RepositoryTag struct { - RegistryName string `json:"registry_name,omitempty"` - Repository string `json:"repository,omitempty"` - Tag string `json:"tag,omitempty"` - ManifestDigest string `json:"manifest_digest,omitempty"` - CompressedSizeBytes uint64 `json:"compressed_size_bytes,omitempty"` - SizeBytes uint64 `json:"size_bytes,omitempty"` - UpdatedAt time.Time `json:"updated_at,omitempty"` -} - -// RepositoryManifest represents a repository manifest -type RepositoryManifest struct { - RegistryName string `json:"registry_name,omitempty"` - Repository string `json:"repository,omitempty"` - Digest string `json:"digest,omitempty"` - CompressedSizeBytes uint64 `json:"compressed_size_bytes,omitempty"` - SizeBytes uint64 `json:"size_bytes,omitempty"` - UpdatedAt time.Time `json:"updated_at,omitempty"` - Tags []string `json:"tags,omitempty"` - Blobs []*Blob `json:"blobs,omitempty"` -} - -// Blob represents a registry blob -type Blob struct { - Digest string `json:"digest,omitempty"` - CompressedSizeBytes uint64 `json:"compressed_size_bytes,omitempty"` -} - -type registryRoot struct { - Registry *Registry `json:"registry,omitempty"` -} - -type repositoriesRoot struct { - Repositories []*Repository `json:"repositories,omitempty"` - Links *Links `json:"links,omitempty"` - Meta *Meta `json:"meta"` -} - -type repositoriesV2Root struct { - Repositories []*RepositoryV2 `json:"repositories,omitempty"` - Links *Links `json:"links,omitempty"` - Meta *Meta `json:"meta"` -} - -type repositoryTagsRoot struct { - Tags []*RepositoryTag `json:"tags,omitempty"` - Links *Links `json:"links,omitempty"` - Meta *Meta `json:"meta"` -} - -type repositoryManifestsRoot struct { - Manifests []*RepositoryManifest `json:"manifests,omitempty"` - Links *Links `json:"links,omitempty"` - Meta *Meta `json:"meta"` -} - -// GarbageCollection represents a garbage collection. -type GarbageCollection struct { - UUID string `json:"uuid"` - RegistryName string `json:"registry_name"` - Status string `json:"status"` - Type GarbageCollectionType `json:"type"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` - BlobsDeleted uint64 `json:"blobs_deleted"` - FreedBytes uint64 `json:"freed_bytes"` -} - -type garbageCollectionRoot struct { - GarbageCollection *GarbageCollection `json:"garbage_collection,omitempty"` -} - -type garbageCollectionsRoot struct { - GarbageCollections []*GarbageCollection `json:"garbage_collections,omitempty"` - Links *Links `json:"links,omitempty"` - Meta *Meta `json:"meta"` -} - -type GarbageCollectionType string - -const ( - // GCTypeUntaggedManifestsOnly indicates that a garbage collection should - // only delete untagged manifests. - GCTypeUntaggedManifestsOnly = GarbageCollectionType("untagged manifests only") - // GCTypeUnreferencedBlobsOnly indicates that a garbage collection should - // only delete unreferenced blobs. - GCTypeUnreferencedBlobsOnly = GarbageCollectionType("unreferenced blobs only") - // GCTypeUntaggedManifestsAndUnreferencedBlobs indicates that a garbage - // collection should delete both untagged manifests and unreferenced blobs. - GCTypeUntaggedManifestsAndUnreferencedBlobs = GarbageCollectionType("untagged manifests and unreferenced blobs") -) - -// StartGarbageCollectionRequest represents options to a garbage collection -// start request. -type StartGarbageCollectionRequest struct { - Type GarbageCollectionType `json:"type"` -} - -// UpdateGarbageCollectionRequest represents a request to update a garbage -// collection. -type UpdateGarbageCollectionRequest struct { - Cancel bool `json:"cancel"` -} - -// RegistryOptions are options for users when creating or updating a registry. -type RegistryOptions struct { - SubscriptionTiers []*RegistrySubscriptionTier `json:"subscription_tiers,omitempty"` - AvailableRegions []string `json:"available_regions"` -} - -type registryOptionsRoot struct { - Options *RegistryOptions `json:"options"` -} - -// RegistrySubscriptionTier is a subscription tier for container registry. -type RegistrySubscriptionTier struct { - Name string `json:"name"` - Slug string `json:"slug"` - IncludedRepositories uint64 `json:"included_repositories"` - IncludedStorageBytes uint64 `json:"included_storage_bytes"` - AllowStorageOverage bool `json:"allow_storage_overage"` - IncludedBandwidthBytes uint64 `json:"included_bandwidth_bytes"` - MonthlyPriceInCents uint64 `json:"monthly_price_in_cents"` - Eligible bool `json:"eligible,omitempty"` - // EligibilityReasons is included when Eligible is false, and indicates the - // reasons why this tier is not available to the user. - EligibilityReasons []string `json:"eligibility_reasons,omitempty"` -} - -// RegistrySubscription is a user's subscription. -type RegistrySubscription struct { - Tier *RegistrySubscriptionTier `json:"tier"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` -} - -type registrySubscriptionRoot struct { - Subscription *RegistrySubscription `json:"subscription"` -} - -// RegistrySubscriptionUpdateRequest represents a request to update the -// subscription plan for a registry. -type RegistrySubscriptionUpdateRequest struct { - TierSlug string `json:"tier_slug"` -} - -// RegistryValidateNameRequest represents a request to validate that a -// container registry name is available for use. -type RegistryValidateNameRequest struct { - Name string `json:"name"` -} - -// Get retrieves the details of a Registry. -func (svc *RegistryServiceOp) Get(ctx context.Context) (*Registry, *Response, error) { - req, err := svc.client.NewRequest(ctx, http.MethodGet, registryPath, nil) - if err != nil { - return nil, nil, err - } - root := new(registryRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Registry, resp, nil -} - -// Create creates a registry. -func (svc *RegistryServiceOp) Create(ctx context.Context, create *RegistryCreateRequest) (*Registry, *Response, error) { - req, err := svc.client.NewRequest(ctx, http.MethodPost, registryPath, create) - if err != nil { - return nil, nil, err - } - root := new(registryRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Registry, resp, nil -} - -// Delete deletes a registry. There is no way to recover a registry once it has -// been destroyed. -func (svc *RegistryServiceOp) Delete(ctx context.Context) (*Response, error) { - req, err := svc.client.NewRequest(ctx, http.MethodDelete, registryPath, nil) - if err != nil { - return nil, err - } - resp, err := svc.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - return resp, nil -} - -// DockerCredentials is the content of a Docker config file -// that is used by the docker CLI -// See: https://docs.docker.com/engine/reference/commandline/cli/#configjson-properties -type DockerCredentials struct { - DockerConfigJSON []byte -} - -// DockerCredentials retrieves a Docker config file containing the registry's credentials. -func (svc *RegistryServiceOp) DockerCredentials(ctx context.Context, request *RegistryDockerCredentialsRequest) (*DockerCredentials, *Response, error) { - path := fmt.Sprintf("%s/%s", registryPath, "docker-credentials") - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - q := req.URL.Query() - q.Add("read_write", strconv.FormatBool(request.ReadWrite)) - if request.ExpirySeconds != nil { - q.Add("expiry_seconds", strconv.Itoa(*request.ExpirySeconds)) - } - req.URL.RawQuery = q.Encode() - - var buf bytes.Buffer - resp, err := svc.client.Do(ctx, req, &buf) - if err != nil { - return nil, resp, err - } - - dc := &DockerCredentials{ - DockerConfigJSON: buf.Bytes(), - } - return dc, resp, nil -} - -// ListRepositories returns a list of the Repositories visible with the registry's credentials. -func (svc *RegistryServiceOp) ListRepositories(ctx context.Context, registry string, opts *ListOptions) ([]*Repository, *Response, error) { - path := fmt.Sprintf("%s/%s/repositories", registryPath, registry) - path, err := addOptions(path, opts) - if err != nil { - return nil, nil, err - } - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(repositoriesRoot) - - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.Repositories, resp, nil -} - -// ListRepositoriesV2 returns a list of the Repositories in a registry. -func (svc *RegistryServiceOp) ListRepositoriesV2(ctx context.Context, registry string, opts *TokenListOptions) ([]*RepositoryV2, *Response, error) { - path := fmt.Sprintf("%s/%s/repositoriesV2", registryPath, registry) - path, err := addOptions(path, opts) - if err != nil { - return nil, nil, err - } - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(repositoriesV2Root) - - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - resp.Links = root.Links - resp.Meta = root.Meta - - return root.Repositories, resp, nil -} - -// ListRepositoryTags returns a list of the RepositoryTags available within the given repository. -func (svc *RegistryServiceOp) ListRepositoryTags(ctx context.Context, registry, repository string, opts *ListOptions) ([]*RepositoryTag, *Response, error) { - path := fmt.Sprintf("%s/%s/repositories/%s/tags", registryPath, registry, url.PathEscape(repository)) - path, err := addOptions(path, opts) - if err != nil { - return nil, nil, err - } - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(repositoryTagsRoot) - - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.Tags, resp, nil -} - -// DeleteTag deletes a tag within a given repository. -func (svc *RegistryServiceOp) DeleteTag(ctx context.Context, registry, repository, tag string) (*Response, error) { - path := fmt.Sprintf("%s/%s/repositories/%s/tags/%s", registryPath, registry, url.PathEscape(repository), tag) - req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - resp, err := svc.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - - return resp, nil -} - -// ListRepositoryManifests returns a list of the RepositoryManifests available within the given repository. -func (svc *RegistryServiceOp) ListRepositoryManifests(ctx context.Context, registry, repository string, opts *ListOptions) ([]*RepositoryManifest, *Response, error) { - path := fmt.Sprintf("%s/%s/repositories/%s/digests", registryPath, registry, url.PathEscape(repository)) - path, err := addOptions(path, opts) - if err != nil { - return nil, nil, err - } - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(repositoryManifestsRoot) - - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - resp.Links = root.Links - resp.Meta = root.Meta - - return root.Manifests, resp, nil -} - -// DeleteManifest deletes a manifest by its digest within a given repository. -func (svc *RegistryServiceOp) DeleteManifest(ctx context.Context, registry, repository, digest string) (*Response, error) { - path := fmt.Sprintf("%s/%s/repositories/%s/digests/%s", registryPath, registry, url.PathEscape(repository), digest) - req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - resp, err := svc.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - - return resp, nil -} - -// StartGarbageCollection requests a garbage collection for the specified -// registry. -func (svc *RegistryServiceOp) StartGarbageCollection(ctx context.Context, registry string, request ...*StartGarbageCollectionRequest) (*GarbageCollection, *Response, error) { - path := fmt.Sprintf("%s/%s/garbage-collection", registryPath, registry) - var requestParams interface{} - if len(request) < 1 { - // default to only garbage collecting unreferenced blobs for backwards - // compatibility - requestParams = &StartGarbageCollectionRequest{ - Type: GCTypeUnreferencedBlobsOnly, - } - } else { - requestParams = request[0] - } - req, err := svc.client.NewRequest(ctx, http.MethodPost, path, requestParams) - if err != nil { - return nil, nil, err - } - - root := new(garbageCollectionRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.GarbageCollection, resp, err -} - -// GetGarbageCollection retrieves the currently-active garbage collection for -// the specified registry; if there are no active garbage collections, then -// return a 404/NotFound error. There can only be one active garbage -// collection on a registry. -func (svc *RegistryServiceOp) GetGarbageCollection(ctx context.Context, registry string) (*GarbageCollection, *Response, error) { - path := fmt.Sprintf("%s/%s/garbage-collection", registryPath, registry) - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(garbageCollectionRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.GarbageCollection, resp, nil -} - -// ListGarbageCollections retrieves all garbage collections (active and -// inactive) for the specified registry. -func (svc *RegistryServiceOp) ListGarbageCollections(ctx context.Context, registry string, opts *ListOptions) ([]*GarbageCollection, *Response, error) { - path := fmt.Sprintf("%s/%s/garbage-collections", registryPath, registry) - path, err := addOptions(path, opts) - if err != nil { - return nil, nil, err - } - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(garbageCollectionsRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - if root.Links != nil { - resp.Links = root.Links - } - if root.Meta != nil { - resp.Meta = root.Meta - } - - return root.GarbageCollections, resp, nil -} - -// UpdateGarbageCollection updates the specified garbage collection for the -// specified registry. While only the currently-active garbage collection can -// be updated we still require the exact garbage collection to be specified to -// avoid race conditions that might may arise from issuing an update to the -// implicit "currently-active" garbage collection. Returns the updated garbage -// collection. -func (svc *RegistryServiceOp) UpdateGarbageCollection(ctx context.Context, registry, gcUUID string, request *UpdateGarbageCollectionRequest) (*GarbageCollection, *Response, error) { - path := fmt.Sprintf("%s/%s/garbage-collection/%s", registryPath, registry, gcUUID) - req, err := svc.client.NewRequest(ctx, http.MethodPut, path, request) - if err != nil { - return nil, nil, err - } - - root := new(garbageCollectionRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.GarbageCollection, resp, nil -} - -// GetOptions returns options the user can use when creating or updating a -// registry. -func (svc *RegistryServiceOp) GetOptions(ctx context.Context) (*RegistryOptions, *Response, error) { - path := fmt.Sprintf("%s/options", registryPath) - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(registryOptionsRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Options, resp, nil -} - -// GetSubscription retrieves the user's subscription. -func (svc *RegistryServiceOp) GetSubscription(ctx context.Context) (*RegistrySubscription, *Response, error) { - path := fmt.Sprintf("%s/subscription", registryPath) - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - root := new(registrySubscriptionRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Subscription, resp, nil -} - -// UpdateSubscription updates the user's registry subscription. -func (svc *RegistryServiceOp) UpdateSubscription(ctx context.Context, request *RegistrySubscriptionUpdateRequest) (*RegistrySubscription, *Response, error) { - path := fmt.Sprintf("%s/subscription", registryPath) - req, err := svc.client.NewRequest(ctx, http.MethodPost, path, request) - if err != nil { - return nil, nil, err - } - root := new(registrySubscriptionRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Subscription, resp, nil -} - -// ValidateName validates that a container registry name is available for use. -func (svc *RegistryServiceOp) ValidateName(ctx context.Context, request *RegistryValidateNameRequest) (*Response, error) { - path := fmt.Sprintf("%s/validate-name", registryPath) - req, err := svc.client.NewRequest(ctx, http.MethodPost, path, request) - if err != nil { - return nil, err - } - resp, err := svc.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - return resp, nil -} diff --git a/vendor/github.com/digitalocean/godo/reserved_ips.go b/vendor/github.com/digitalocean/godo/reserved_ips.go deleted file mode 100644 index 5370c14..0000000 --- a/vendor/github.com/digitalocean/godo/reserved_ips.go +++ /dev/null @@ -1,148 +0,0 @@ -package godo - -import ( - "context" - "fmt" - "net/http" -) - -const resourceType = "ReservedIP" -const reservedIPsBasePath = "v2/reserved_ips" - -// ReservedIPsService is an interface for interfacing with the reserved IPs -// endpoints of the Digital Ocean API. -// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Reserved-IPs -type ReservedIPsService interface { - List(context.Context, *ListOptions) ([]ReservedIP, *Response, error) - Get(context.Context, string) (*ReservedIP, *Response, error) - Create(context.Context, *ReservedIPCreateRequest) (*ReservedIP, *Response, error) - Delete(context.Context, string) (*Response, error) -} - -// ReservedIPsServiceOp handles communication with the reserved IPs related methods of the -// DigitalOcean API. -type ReservedIPsServiceOp struct { - client *Client -} - -var _ ReservedIPsService = &ReservedIPsServiceOp{} - -// ReservedIP represents a Digital Ocean reserved IP. -type ReservedIP struct { - Region *Region `json:"region"` - Droplet *Droplet `json:"droplet"` - IP string `json:"ip"` - ProjectID string `json:"project_id"` - Locked bool `json:"locked"` -} - -func (f ReservedIP) String() string { - return Stringify(f) -} - -// URN returns the reserved IP in a valid DO API URN form. -func (f ReservedIP) URN() string { - return ToURN(resourceType, f.IP) -} - -type reservedIPsRoot struct { - ReservedIPs []ReservedIP `json:"reserved_ips"` - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -type reservedIPRoot struct { - ReservedIP *ReservedIP `json:"reserved_ip"` - Links *Links `json:"links,omitempty"` -} - -// ReservedIPCreateRequest represents a request to create a reserved IP. -// Specify DropletID to assign the reserved IP to a Droplet or Region -// to reserve it to the region. -type ReservedIPCreateRequest struct { - Region string `json:"region,omitempty"` - DropletID int `json:"droplet_id,omitempty"` - ProjectID string `json:"project_id,omitempty"` -} - -// List all reserved IPs. -func (r *ReservedIPsServiceOp) List(ctx context.Context, opt *ListOptions) ([]ReservedIP, *Response, error) { - path := reservedIPsBasePath - path, err := addOptions(path, opt) - if err != nil { - return nil, nil, err - } - - req, err := r.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(reservedIPsRoot) - resp, err := r.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.ReservedIPs, resp, err -} - -// Get an individual reserved IP. -func (r *ReservedIPsServiceOp) Get(ctx context.Context, ip string) (*ReservedIP, *Response, error) { - path := fmt.Sprintf("%s/%s", reservedIPsBasePath, ip) - - req, err := r.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(reservedIPRoot) - resp, err := r.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.ReservedIP, resp, err -} - -// Create a reserved IP. If the DropletID field of the request is not empty, -// the reserved IP will also be assigned to the droplet. -func (r *ReservedIPsServiceOp) Create(ctx context.Context, createRequest *ReservedIPCreateRequest) (*ReservedIP, *Response, error) { - path := reservedIPsBasePath - - req, err := r.client.NewRequest(ctx, http.MethodPost, path, createRequest) - if err != nil { - return nil, nil, err - } - - root := new(reservedIPRoot) - resp, err := r.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - - return root.ReservedIP, resp, err -} - -// Delete a reserved IP. -func (r *ReservedIPsServiceOp) Delete(ctx context.Context, ip string) (*Response, error) { - path := fmt.Sprintf("%s/%s", reservedIPsBasePath, ip) - - req, err := r.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - - resp, err := r.client.Do(ctx, req, nil) - - return resp, err -} diff --git a/vendor/github.com/digitalocean/godo/reserved_ips_actions.go b/vendor/github.com/digitalocean/godo/reserved_ips_actions.go deleted file mode 100644 index 8a9e240..0000000 --- a/vendor/github.com/digitalocean/godo/reserved_ips_actions.go +++ /dev/null @@ -1,109 +0,0 @@ -package godo - -import ( - "context" - "fmt" - "net/http" -) - -// ReservedIPActionsService is an interface for interfacing with the -// reserved IPs actions endpoints of the Digital Ocean API. -// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Reserved-IP-Actions -type ReservedIPActionsService interface { - Assign(ctx context.Context, ip string, dropletID int) (*Action, *Response, error) - Unassign(ctx context.Context, ip string) (*Action, *Response, error) - Get(ctx context.Context, ip string, actionID int) (*Action, *Response, error) - List(ctx context.Context, ip string, opt *ListOptions) ([]Action, *Response, error) -} - -// ReservedIPActionsServiceOp handles communication with the reserved IPs -// action related methods of the DigitalOcean API. -type ReservedIPActionsServiceOp struct { - client *Client -} - -// Assign a reserved IP to a droplet. -func (s *ReservedIPActionsServiceOp) Assign(ctx context.Context, ip string, dropletID int) (*Action, *Response, error) { - request := &ActionRequest{ - "type": "assign", - "droplet_id": dropletID, - } - return s.doAction(ctx, ip, request) -} - -// Unassign a rerserved IP from the droplet it is currently assigned to. -func (s *ReservedIPActionsServiceOp) Unassign(ctx context.Context, ip string) (*Action, *Response, error) { - request := &ActionRequest{"type": "unassign"} - return s.doAction(ctx, ip, request) -} - -// Get an action for a particular reserved IP by id. -func (s *ReservedIPActionsServiceOp) Get(ctx context.Context, ip string, actionID int) (*Action, *Response, error) { - path := fmt.Sprintf("%s/%d", reservedIPActionPath(ip), actionID) - return s.get(ctx, path) -} - -// List the actions for a particular reserved IP. -func (s *ReservedIPActionsServiceOp) List(ctx context.Context, ip string, opt *ListOptions) ([]Action, *Response, error) { - path := reservedIPActionPath(ip) - path, err := addOptions(path, opt) - if err != nil { - return nil, nil, err - } - - return s.list(ctx, path) -} - -func (s *ReservedIPActionsServiceOp) doAction(ctx context.Context, ip string, request *ActionRequest) (*Action, *Response, error) { - path := reservedIPActionPath(ip) - - req, err := s.client.NewRequest(ctx, http.MethodPost, path, request) - if err != nil { - return nil, nil, err - } - - root := new(actionRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Event, resp, err -} - -func (s *ReservedIPActionsServiceOp) get(ctx context.Context, path string) (*Action, *Response, error) { - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(actionRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Event, resp, err -} - -func (s *ReservedIPActionsServiceOp) list(ctx context.Context, path string) ([]Action, *Response, error) { - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(actionsRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - - return root.Actions, resp, err -} - -func reservedIPActionPath(ip string) string { - return fmt.Sprintf("%s/%s/actions", reservedIPsBasePath, ip) -} diff --git a/vendor/github.com/digitalocean/godo/sizes.go b/vendor/github.com/digitalocean/godo/sizes.go deleted file mode 100644 index a3cb745..0000000 --- a/vendor/github.com/digitalocean/godo/sizes.go +++ /dev/null @@ -1,73 +0,0 @@ -package godo - -import ( - "context" - "net/http" -) - -// SizesService is an interface for interfacing with the size -// endpoints of the DigitalOcean API -// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Sizes -type SizesService interface { - List(context.Context, *ListOptions) ([]Size, *Response, error) -} - -// SizesServiceOp handles communication with the size related methods of the -// DigitalOcean API. -type SizesServiceOp struct { - client *Client -} - -var _ SizesService = &SizesServiceOp{} - -// Size represents a DigitalOcean Size -type Size struct { - Slug string `json:"slug,omitempty"` - Memory int `json:"memory,omitempty"` - Vcpus int `json:"vcpus,omitempty"` - Disk int `json:"disk,omitempty"` - PriceMonthly float64 `json:"price_monthly,omitempty"` - PriceHourly float64 `json:"price_hourly,omitempty"` - Regions []string `json:"regions,omitempty"` - Available bool `json:"available,omitempty"` - Transfer float64 `json:"transfer,omitempty"` - Description string `json:"description,omitempty"` -} - -func (s Size) String() string { - return Stringify(s) -} - -type sizesRoot struct { - Sizes []Size - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -// List all images -func (s *SizesServiceOp) List(ctx context.Context, opt *ListOptions) ([]Size, *Response, error) { - path := "v2/sizes" - path, err := addOptions(path, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(sizesRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.Sizes, resp, err -} diff --git a/vendor/github.com/digitalocean/godo/snapshots.go b/vendor/github.com/digitalocean/godo/snapshots.go deleted file mode 100644 index 13a06ca..0000000 --- a/vendor/github.com/digitalocean/godo/snapshots.go +++ /dev/null @@ -1,142 +0,0 @@ -package godo - -import ( - "context" - "fmt" - "net/http" -) - -const snapshotBasePath = "v2/snapshots" - -// SnapshotsService is an interface for interfacing with the snapshots -// endpoints of the DigitalOcean API -// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Snapshots -type SnapshotsService interface { - List(context.Context, *ListOptions) ([]Snapshot, *Response, error) - ListVolume(context.Context, *ListOptions) ([]Snapshot, *Response, error) - ListDroplet(context.Context, *ListOptions) ([]Snapshot, *Response, error) - Get(context.Context, string) (*Snapshot, *Response, error) - Delete(context.Context, string) (*Response, error) -} - -// SnapshotsServiceOp handles communication with the snapshot related methods of the -// DigitalOcean API. -type SnapshotsServiceOp struct { - client *Client -} - -var _ SnapshotsService = &SnapshotsServiceOp{} - -// Snapshot represents a DigitalOcean Snapshot -type Snapshot struct { - ID string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - ResourceID string `json:"resource_id,omitempty"` - ResourceType string `json:"resource_type,omitempty"` - Regions []string `json:"regions,omitempty"` - MinDiskSize int `json:"min_disk_size,omitempty"` - SizeGigaBytes float64 `json:"size_gigabytes,omitempty"` - Created string `json:"created_at,omitempty"` - Tags []string `json:"tags,omitempty"` -} - -type snapshotRoot struct { - Snapshot *Snapshot `json:"snapshot"` -} - -type snapshotsRoot struct { - Snapshots []Snapshot `json:"snapshots"` - Links *Links `json:"links,omitempty"` - Meta *Meta `json:"meta,omitempty"` -} - -type listSnapshotOptions struct { - ResourceType string `url:"resource_type,omitempty"` -} - -func (s Snapshot) String() string { - return Stringify(s) -} - -// List lists all the snapshots available. -func (s *SnapshotsServiceOp) List(ctx context.Context, opt *ListOptions) ([]Snapshot, *Response, error) { - return s.list(ctx, opt, nil) -} - -// ListDroplet lists all the Droplet snapshots. -func (s *SnapshotsServiceOp) ListDroplet(ctx context.Context, opt *ListOptions) ([]Snapshot, *Response, error) { - listOpt := listSnapshotOptions{ResourceType: "droplet"} - return s.list(ctx, opt, &listOpt) -} - -// ListVolume lists all the volume snapshots. -func (s *SnapshotsServiceOp) ListVolume(ctx context.Context, opt *ListOptions) ([]Snapshot, *Response, error) { - listOpt := listSnapshotOptions{ResourceType: "volume"} - return s.list(ctx, opt, &listOpt) -} - -// Get retrieves a snapshot by id. -func (s *SnapshotsServiceOp) Get(ctx context.Context, snapshotID string) (*Snapshot, *Response, error) { - return s.get(ctx, snapshotID) -} - -// Delete an snapshot. -func (s *SnapshotsServiceOp) Delete(ctx context.Context, snapshotID string) (*Response, error) { - path := fmt.Sprintf("%s/%s", snapshotBasePath, snapshotID) - - req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - - return resp, err -} - -// Helper method for getting an individual snapshot -func (s *SnapshotsServiceOp) get(ctx context.Context, ID string) (*Snapshot, *Response, error) { - path := fmt.Sprintf("%s/%s", snapshotBasePath, ID) - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(snapshotRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Snapshot, resp, err -} - -// Helper method for listing snapshots -func (s *SnapshotsServiceOp) list(ctx context.Context, opt *ListOptions, listOpt *listSnapshotOptions) ([]Snapshot, *Response, error) { - path := snapshotBasePath - path, err := addOptions(path, opt) - if err != nil { - return nil, nil, err - } - path, err = addOptions(path, listOpt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(snapshotsRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - - return root.Snapshots, resp, err -} diff --git a/vendor/github.com/digitalocean/godo/storage.go b/vendor/github.com/digitalocean/godo/storage.go deleted file mode 100644 index 7700ffa..0000000 --- a/vendor/github.com/digitalocean/godo/storage.go +++ /dev/null @@ -1,262 +0,0 @@ -package godo - -import ( - "context" - "fmt" - "net/http" - "time" -) - -const ( - storageBasePath = "v2" - storageAllocPath = storageBasePath + "/volumes" - storageSnapPath = storageBasePath + "/snapshots" -) - -// StorageService is an interface for interfacing with the storage -// endpoints of the Digital Ocean API. -// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Block-Storage -type StorageService interface { - ListVolumes(context.Context, *ListVolumeParams) ([]Volume, *Response, error) - GetVolume(context.Context, string) (*Volume, *Response, error) - CreateVolume(context.Context, *VolumeCreateRequest) (*Volume, *Response, error) - DeleteVolume(context.Context, string) (*Response, error) - ListSnapshots(ctx context.Context, volumeID string, opts *ListOptions) ([]Snapshot, *Response, error) - GetSnapshot(context.Context, string) (*Snapshot, *Response, error) - CreateSnapshot(context.Context, *SnapshotCreateRequest) (*Snapshot, *Response, error) - DeleteSnapshot(context.Context, string) (*Response, error) -} - -// StorageServiceOp handles communication with the storage volumes related methods of the -// DigitalOcean API. -type StorageServiceOp struct { - client *Client -} - -// ListVolumeParams stores the options you can set for a ListVolumeCall -type ListVolumeParams struct { - Region string `json:"region"` - Name string `json:"name"` - ListOptions *ListOptions `json:"list_options,omitempty"` -} - -var _ StorageService = &StorageServiceOp{} - -// Volume represents a Digital Ocean block store volume. -type Volume struct { - ID string `json:"id"` - Region *Region `json:"region"` - Name string `json:"name"` - SizeGigaBytes int64 `json:"size_gigabytes"` - Description string `json:"description"` - DropletIDs []int `json:"droplet_ids"` - CreatedAt time.Time `json:"created_at"` - FilesystemType string `json:"filesystem_type"` - FilesystemLabel string `json:"filesystem_label"` - Tags []string `json:"tags"` -} - -func (f Volume) String() string { - return Stringify(f) -} - -// URN returns the volume ID as a valid DO API URN -func (f Volume) URN() string { - return ToURN("Volume", f.ID) -} - -type storageVolumesRoot struct { - Volumes []Volume `json:"volumes"` - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -type storageVolumeRoot struct { - Volume *Volume `json:"volume"` - Links *Links `json:"links,omitempty"` -} - -// VolumeCreateRequest represents a request to create a block store -// volume. -type VolumeCreateRequest struct { - Region string `json:"region"` - Name string `json:"name"` - Description string `json:"description"` - SizeGigaBytes int64 `json:"size_gigabytes"` - SnapshotID string `json:"snapshot_id"` - FilesystemType string `json:"filesystem_type"` - FilesystemLabel string `json:"filesystem_label"` - Tags []string `json:"tags"` -} - -// ListVolumes lists all storage volumes. -func (svc *StorageServiceOp) ListVolumes(ctx context.Context, params *ListVolumeParams) ([]Volume, *Response, error) { - path := storageAllocPath - if params != nil { - if params.Region != "" && params.Name != "" { - path = fmt.Sprintf("%s?name=%s®ion=%s", path, params.Name, params.Region) - } else if params.Region != "" { - path = fmt.Sprintf("%s?region=%s", path, params.Region) - } else if params.Name != "" { - path = fmt.Sprintf("%s?name=%s", path, params.Name) - } - - if params.ListOptions != nil { - var err error - path, err = addOptions(path, params.ListOptions) - if err != nil { - return nil, nil, err - } - } - } - - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(storageVolumesRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.Volumes, resp, nil -} - -// CreateVolume creates a storage volume. The name must be unique. -func (svc *StorageServiceOp) CreateVolume(ctx context.Context, createRequest *VolumeCreateRequest) (*Volume, *Response, error) { - path := storageAllocPath - - req, err := svc.client.NewRequest(ctx, http.MethodPost, path, createRequest) - if err != nil { - return nil, nil, err - } - - root := new(storageVolumeRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Volume, resp, nil -} - -// GetVolume retrieves an individual storage volume. -func (svc *StorageServiceOp) GetVolume(ctx context.Context, id string) (*Volume, *Response, error) { - path := fmt.Sprintf("%s/%s", storageAllocPath, id) - - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(storageVolumeRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Volume, resp, nil -} - -// DeleteVolume deletes a storage volume. -func (svc *StorageServiceOp) DeleteVolume(ctx context.Context, id string) (*Response, error) { - path := fmt.Sprintf("%s/%s", storageAllocPath, id) - - req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - return svc.client.Do(ctx, req, nil) -} - -// SnapshotCreateRequest represents a request to create a block store -// volume. -type SnapshotCreateRequest struct { - VolumeID string `json:"volume_id"` - Name string `json:"name"` - Description string `json:"description"` - Tags []string `json:"tags"` -} - -// ListSnapshots lists all snapshots related to a storage volume. -func (svc *StorageServiceOp) ListSnapshots(ctx context.Context, volumeID string, opt *ListOptions) ([]Snapshot, *Response, error) { - path := fmt.Sprintf("%s/%s/snapshots", storageAllocPath, volumeID) - path, err := addOptions(path, opt) - if err != nil { - return nil, nil, err - } - - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(snapshotsRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.Snapshots, resp, nil -} - -// CreateSnapshot creates a snapshot of a storage volume. -func (svc *StorageServiceOp) CreateSnapshot(ctx context.Context, createRequest *SnapshotCreateRequest) (*Snapshot, *Response, error) { - path := fmt.Sprintf("%s/%s/snapshots", storageAllocPath, createRequest.VolumeID) - - req, err := svc.client.NewRequest(ctx, http.MethodPost, path, createRequest) - if err != nil { - return nil, nil, err - } - - root := new(snapshotRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.Snapshot, resp, nil -} - -// GetSnapshot retrieves an individual snapshot. -func (svc *StorageServiceOp) GetSnapshot(ctx context.Context, id string) (*Snapshot, *Response, error) { - path := fmt.Sprintf("%s/%s", storageSnapPath, id) - - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(snapshotRoot) - resp, err := svc.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Snapshot, resp, nil -} - -// DeleteSnapshot deletes a snapshot. -func (svc *StorageServiceOp) DeleteSnapshot(ctx context.Context, id string) (*Response, error) { - path := fmt.Sprintf("%s/%s", storageSnapPath, id) - - req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - return svc.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/digitalocean/godo/storage_actions.go b/vendor/github.com/digitalocean/godo/storage_actions.go deleted file mode 100644 index 49e30cf..0000000 --- a/vendor/github.com/digitalocean/godo/storage_actions.go +++ /dev/null @@ -1,132 +0,0 @@ -package godo - -import ( - "context" - "fmt" - "net/http" -) - -// StorageActionsService is an interface for interfacing with the -// storage actions endpoints of the Digital Ocean API. -// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Block-Storage-Actions -type StorageActionsService interface { - Attach(ctx context.Context, volumeID string, dropletID int) (*Action, *Response, error) - DetachByDropletID(ctx context.Context, volumeID string, dropletID int) (*Action, *Response, error) - Get(ctx context.Context, volumeID string, actionID int) (*Action, *Response, error) - List(ctx context.Context, volumeID string, opt *ListOptions) ([]Action, *Response, error) - Resize(ctx context.Context, volumeID string, sizeGigabytes int, regionSlug string) (*Action, *Response, error) -} - -// StorageActionsServiceOp handles communication with the storage volumes -// action related methods of the DigitalOcean API. -type StorageActionsServiceOp struct { - client *Client -} - -// StorageAttachment represents the attachment of a block storage -// volume to a specific Droplet under the device name. -type StorageAttachment struct { - DropletID int `json:"droplet_id"` -} - -// Attach a storage volume to a Droplet. -func (s *StorageActionsServiceOp) Attach(ctx context.Context, volumeID string, dropletID int) (*Action, *Response, error) { - request := &ActionRequest{ - "type": "attach", - "droplet_id": dropletID, - } - return s.doAction(ctx, volumeID, request) -} - -// DetachByDropletID a storage volume from a Droplet by Droplet ID. -func (s *StorageActionsServiceOp) DetachByDropletID(ctx context.Context, volumeID string, dropletID int) (*Action, *Response, error) { - request := &ActionRequest{ - "type": "detach", - "droplet_id": dropletID, - } - return s.doAction(ctx, volumeID, request) -} - -// Get an action for a particular storage volume by id. -func (s *StorageActionsServiceOp) Get(ctx context.Context, volumeID string, actionID int) (*Action, *Response, error) { - path := fmt.Sprintf("%s/%d", storageAllocationActionPath(volumeID), actionID) - return s.get(ctx, path) -} - -// List the actions for a particular storage volume. -func (s *StorageActionsServiceOp) List(ctx context.Context, volumeID string, opt *ListOptions) ([]Action, *Response, error) { - path := storageAllocationActionPath(volumeID) - path, err := addOptions(path, opt) - if err != nil { - return nil, nil, err - } - - return s.list(ctx, path) -} - -// Resize a storage volume. -func (s *StorageActionsServiceOp) Resize(ctx context.Context, volumeID string, sizeGigabytes int, regionSlug string) (*Action, *Response, error) { - request := &ActionRequest{ - "type": "resize", - "size_gigabytes": sizeGigabytes, - "region": regionSlug, - } - return s.doAction(ctx, volumeID, request) -} - -func (s *StorageActionsServiceOp) doAction(ctx context.Context, volumeID string, request *ActionRequest) (*Action, *Response, error) { - path := storageAllocationActionPath(volumeID) - - req, err := s.client.NewRequest(ctx, http.MethodPost, path, request) - if err != nil { - return nil, nil, err - } - - root := new(actionRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Event, resp, err -} - -func (s *StorageActionsServiceOp) get(ctx context.Context, path string) (*Action, *Response, error) { - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(actionRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Event, resp, err -} - -func (s *StorageActionsServiceOp) list(ctx context.Context, path string) ([]Action, *Response, error) { - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(actionsRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.Actions, resp, err -} - -func storageAllocationActionPath(volumeID string) string { - return fmt.Sprintf("%s/%s/actions", storageAllocPath, volumeID) -} diff --git a/vendor/github.com/digitalocean/godo/strings.go b/vendor/github.com/digitalocean/godo/strings.go deleted file mode 100644 index f92893e..0000000 --- a/vendor/github.com/digitalocean/godo/strings.go +++ /dev/null @@ -1,104 +0,0 @@ -package godo - -import ( - "bytes" - "fmt" - "io" - "reflect" - "strings" -) - -var timestampType = reflect.TypeOf(Timestamp{}) - -// ResourceWithURN is an interface for interfacing with the types -// that implement the URN method. -type ResourceWithURN interface { - URN() string -} - -// ToURN converts the resource type and ID to a valid DO API URN. -func ToURN(resourceType string, id interface{}) string { - return fmt.Sprintf("%s:%s:%v", "do", strings.ToLower(resourceType), id) -} - -// Stringify attempts to create a string representation of DigitalOcean types -func Stringify(message interface{}) string { - var buf bytes.Buffer - v := reflect.ValueOf(message) - stringifyValue(&buf, v) - return buf.String() -} - -// stringifyValue was graciously cargoculted from the goprotubuf library -func stringifyValue(w io.Writer, val reflect.Value) { - if val.Kind() == reflect.Ptr && val.IsNil() { - _, _ = w.Write([]byte("")) - return - } - - v := reflect.Indirect(val) - - switch v.Kind() { - case reflect.String: - fmt.Fprintf(w, `"%s"`, v) - case reflect.Slice: - stringifySlice(w, v) - return - case reflect.Struct: - stringifyStruct(w, v) - default: - if v.CanInterface() { - fmt.Fprint(w, v.Interface()) - } - } -} - -func stringifySlice(w io.Writer, v reflect.Value) { - _, _ = w.Write([]byte{'['}) - for i := 0; i < v.Len(); i++ { - if i > 0 { - _, _ = w.Write([]byte{' '}) - } - - stringifyValue(w, v.Index(i)) - } - - _, _ = w.Write([]byte{']'}) -} - -func stringifyStruct(w io.Writer, v reflect.Value) { - if v.Type().Name() != "" { - _, _ = w.Write([]byte(v.Type().String())) - } - - // special handling of Timestamp values - if v.Type() == timestampType { - fmt.Fprintf(w, "{%s}", v.Interface()) - return - } - - _, _ = w.Write([]byte{'{'}) - - var sep bool - for i := 0; i < v.NumField(); i++ { - fv := v.Field(i) - if fv.Kind() == reflect.Ptr && fv.IsNil() { - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - continue - } - - if sep { - _, _ = w.Write([]byte(", ")) - } else { - sep = true - } - - _, _ = w.Write([]byte(v.Type().Field(i).Name)) - _, _ = w.Write([]byte{':'}) - stringifyValue(w, fv) - } - - _, _ = w.Write([]byte{'}'}) -} diff --git a/vendor/github.com/digitalocean/godo/tags.go b/vendor/github.com/digitalocean/godo/tags.go deleted file mode 100644 index a19a4b0..0000000 --- a/vendor/github.com/digitalocean/godo/tags.go +++ /dev/null @@ -1,247 +0,0 @@ -package godo - -import ( - "context" - "fmt" - "net/http" -) - -const tagsBasePath = "v2/tags" - -// TagsService is an interface for interfacing with the tags -// endpoints of the DigitalOcean API -// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Tags -type TagsService interface { - List(context.Context, *ListOptions) ([]Tag, *Response, error) - Get(context.Context, string) (*Tag, *Response, error) - Create(context.Context, *TagCreateRequest) (*Tag, *Response, error) - Delete(context.Context, string) (*Response, error) - - TagResources(context.Context, string, *TagResourcesRequest) (*Response, error) - UntagResources(context.Context, string, *UntagResourcesRequest) (*Response, error) -} - -// TagsServiceOp handles communication with tag related method of the -// DigitalOcean API. -type TagsServiceOp struct { - client *Client -} - -var _ TagsService = &TagsServiceOp{} - -// ResourceType represents a class of resource, currently only droplet are supported -type ResourceType string - -const ( - // DropletResourceType holds the string representing our ResourceType of Droplet. - DropletResourceType ResourceType = "droplet" - // ImageResourceType holds the string representing our ResourceType of Image. - ImageResourceType ResourceType = "image" - // VolumeResourceType holds the string representing our ResourceType of Volume. - VolumeResourceType ResourceType = "volume" - // LoadBalancerResourceType holds the string representing our ResourceType of LoadBalancer. - LoadBalancerResourceType ResourceType = "load_balancer" - // VolumeSnapshotResourceType holds the string representing our ResourceType for storage Snapshots. - VolumeSnapshotResourceType ResourceType = "volume_snapshot" - // DatabaseResourceType holds the string representing our ResourceType of Database. - DatabaseResourceType ResourceType = "database" -) - -// Resource represent a single resource for associating/disassociating with tags -type Resource struct { - ID string `json:"resource_id,omitempty"` - Type ResourceType `json:"resource_type,omitempty"` -} - -// TaggedResources represent the set of resources a tag is attached to -type TaggedResources struct { - Count int `json:"count"` - LastTaggedURI string `json:"last_tagged_uri,omitempty"` - Droplets *TaggedDropletsResources `json:"droplets,omitempty"` - Images *TaggedImagesResources `json:"images"` - Volumes *TaggedVolumesResources `json:"volumes"` - VolumeSnapshots *TaggedVolumeSnapshotsResources `json:"volume_snapshots"` - Databases *TaggedDatabasesResources `json:"databases"` -} - -// TaggedDropletsResources represent the droplet resources a tag is attached to -type TaggedDropletsResources struct { - Count int `json:"count,float64,omitempty"` - LastTagged *Droplet `json:"last_tagged,omitempty"` - LastTaggedURI string `json:"last_tagged_uri,omitempty"` -} - -// TaggedResourcesData represent the generic resources a tag is attached to -type TaggedResourcesData struct { - Count int `json:"count,float64,omitempty"` - LastTaggedURI string `json:"last_tagged_uri,omitempty"` -} - -// TaggedImagesResources represent the image resources a tag is attached to -type TaggedImagesResources TaggedResourcesData - -// TaggedVolumesResources represent the volume resources a tag is attached to -type TaggedVolumesResources TaggedResourcesData - -// TaggedVolumeSnapshotsResources represent the volume snapshot resources a tag is attached to -type TaggedVolumeSnapshotsResources TaggedResourcesData - -// TaggedDatabasesResources represent the database resources a tag is attached to -type TaggedDatabasesResources TaggedResourcesData - -// Tag represent DigitalOcean tag -type Tag struct { - Name string `json:"name,omitempty"` - Resources *TaggedResources `json:"resources,omitempty"` -} - -// TagCreateRequest represents the JSON structure of a request of that type. -type TagCreateRequest struct { - Name string `json:"name"` -} - -// TagResourcesRequest represents the JSON structure of a request of that type. -type TagResourcesRequest struct { - Resources []Resource `json:"resources"` -} - -// UntagResourcesRequest represents the JSON structure of a request of that type. -type UntagResourcesRequest struct { - Resources []Resource `json:"resources"` -} - -type tagsRoot struct { - Tags []Tag `json:"tags"` - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -type tagRoot struct { - Tag *Tag `json:"tag"` -} - -// List all tags -func (s *TagsServiceOp) List(ctx context.Context, opt *ListOptions) ([]Tag, *Response, error) { - path := tagsBasePath - path, err := addOptions(path, opt) - - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(tagsRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.Tags, resp, err -} - -// Get a single tag -func (s *TagsServiceOp) Get(ctx context.Context, name string) (*Tag, *Response, error) { - path := fmt.Sprintf("%s/%s", tagsBasePath, name) - - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(tagRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Tag, resp, err -} - -// Create a new tag -func (s *TagsServiceOp) Create(ctx context.Context, createRequest *TagCreateRequest) (*Tag, *Response, error) { - if createRequest == nil { - return nil, nil, NewArgError("createRequest", "cannot be nil") - } - - req, err := s.client.NewRequest(ctx, http.MethodPost, tagsBasePath, createRequest) - if err != nil { - return nil, nil, err - } - - root := new(tagRoot) - resp, err := s.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Tag, resp, err -} - -// Delete an existing tag -func (s *TagsServiceOp) Delete(ctx context.Context, name string) (*Response, error) { - if name == "" { - return nil, NewArgError("name", "cannot be empty") - } - - path := fmt.Sprintf("%s/%s", tagsBasePath, name) - req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - - return resp, err -} - -// TagResources associates resources with a given Tag. -func (s *TagsServiceOp) TagResources(ctx context.Context, name string, tagRequest *TagResourcesRequest) (*Response, error) { - if name == "" { - return nil, NewArgError("name", "cannot be empty") - } - - if tagRequest == nil { - return nil, NewArgError("tagRequest", "cannot be nil") - } - - path := fmt.Sprintf("%s/%s/resources", tagsBasePath, name) - req, err := s.client.NewRequest(ctx, http.MethodPost, path, tagRequest) - if err != nil { - return nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - - return resp, err -} - -// UntagResources dissociates resources with a given Tag. -func (s *TagsServiceOp) UntagResources(ctx context.Context, name string, untagRequest *UntagResourcesRequest) (*Response, error) { - if name == "" { - return nil, NewArgError("name", "cannot be empty") - } - - if untagRequest == nil { - return nil, NewArgError("tagRequest", "cannot be nil") - } - - path := fmt.Sprintf("%s/%s/resources", tagsBasePath, name) - req, err := s.client.NewRequest(ctx, http.MethodDelete, path, untagRequest) - if err != nil { - return nil, err - } - - resp, err := s.client.Do(ctx, req, nil) - - return resp, err -} diff --git a/vendor/github.com/digitalocean/godo/timestamp.go b/vendor/github.com/digitalocean/godo/timestamp.go deleted file mode 100644 index 37a28e5..0000000 --- a/vendor/github.com/digitalocean/godo/timestamp.go +++ /dev/null @@ -1,35 +0,0 @@ -package godo - -import ( - "strconv" - "time" -) - -// Timestamp represents a time that can be unmarshalled from a JSON string -// formatted as either an RFC3339 or Unix timestamp. All -// exported methods of time.Time can be called on Timestamp. -type Timestamp struct { - time.Time -} - -func (t Timestamp) String() string { - return t.Time.String() -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -// Time is expected in RFC3339 or Unix format. -func (t *Timestamp) UnmarshalJSON(data []byte) error { - str := string(data) - i, err := strconv.ParseInt(str, 10, 64) - if err == nil { - t.Time = time.Unix(i, 0) - } else { - t.Time, err = time.Parse(`"`+time.RFC3339+`"`, str) - } - return err -} - -// Equal reports whether t and u are equal based on time.Equal -func (t Timestamp) Equal(u Timestamp) bool { - return t.Time.Equal(u.Time) -} diff --git a/vendor/github.com/digitalocean/godo/uptime.go b/vendor/github.com/digitalocean/godo/uptime.go deleted file mode 100644 index f312e0e..0000000 --- a/vendor/github.com/digitalocean/godo/uptime.go +++ /dev/null @@ -1,351 +0,0 @@ -package godo - -import ( - "context" - "fmt" - "net/http" - "path" -) - -const ( - uptimeChecksBasePath = "/v2/uptime/checks" - // UptimeAlertGreaterThan is the comparison > - UptimeAlertGreaterThan UptimeAlertComp = "greater_than" - // UptimeAlertLessThan is the comparison < - UptimeAlertLessThan UptimeAlertComp = "less_than" -) - -// UptimeChecksService is an interface for creating and managing Uptime checks with the DigitalOcean API. -// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Uptime -type UptimeChecksService interface { - List(context.Context, *ListOptions) ([]UptimeCheck, *Response, error) - Get(context.Context, string) (*UptimeCheck, *Response, error) - GetState(context.Context, string) (*UptimeCheckState, *Response, error) - Create(context.Context, *CreateUptimeCheckRequest) (*UptimeCheck, *Response, error) - Update(context.Context, string, *UpdateUptimeCheckRequest) (*UptimeCheck, *Response, error) - Delete(context.Context, string) (*Response, error) - GetAlert(context.Context, string, string) (*UptimeAlert, *Response, error) - ListAlerts(context.Context, string, *ListOptions) ([]UptimeAlert, *Response, error) - CreateAlert(context.Context, string, *CreateUptimeAlertRequest) (*UptimeAlert, *Response, error) - UpdateAlert(context.Context, string, string, *UpdateUptimeAlertRequest) (*UptimeAlert, *Response, error) - DeleteAlert(context.Context, string, string) (*Response, error) -} - -// UptimeChecksServiceOp handles communication with Uptime Check methods of the DigitalOcean API. -type UptimeChecksServiceOp struct { - client *Client -} - -// UptimeCheck represents a DigitalOcean UptimeCheck configuration. -type UptimeCheck struct { - ID string `json:"id"` - Name string `json:"name"` - Type string `json:"type"` - Target string `json:"target"` - Regions []string `json:"regions"` - Enabled bool `json:"enabled"` -} - -// UptimeAlert represents a DigitalOcean Uptime Alert configuration. -type UptimeAlert struct { - ID string `json:"id"` - Name string `json:"name"` - Type string `json:"type"` - Threshold int `json:"threshold"` - Comparison UptimeAlertComp `json:"comparison"` - Notifications *Notifications `json:"notifications"` - Period string `json:"period"` -} - -// Notifications represents a DigitalOcean Notifications configuration. -type Notifications struct { - Email []string `json:"email"` - Slack []SlackDetails `json:"slack"` -} - -// UptimeCheckState represents a DigitalOcean Uptime Check's state configuration. -type UptimeCheckState struct { - Regions map[string]UptimeRegion `json:"regions"` - PreviousOutage UptimePreviousOutage `json:"previous_outage"` -} - -type UptimeRegion struct { - Status string `json:"status"` - StatusChangedAt string `json:"status_changed_at"` - ThirtyDayUptimePercentage float32 `json:"thirty_day_uptime_percentage"` -} - -// UptimePreviousOutage represents a DigitalOcean Uptime Check's previous outage configuration. -type UptimePreviousOutage struct { - Region string `json:"region"` - StartedAt string `json:"started_at"` - EndedAt string `json:"ended_at"` - DurationSeconds int `json:"duration_seconds"` -} - -// CreateUptimeCheckRequest represents the request to create a new uptime check. -type CreateUptimeCheckRequest struct { - Name string `json:"name"` - Type string `json:"type"` - Target string `json:"target"` - Regions []string `json:"regions"` - Enabled bool `json:"enabled"` -} - -// UpdateUptimeCheckRequest represents the request to update uptime check information. -type UpdateUptimeCheckRequest struct { - Name string `json:"name"` - Type string `json:"type"` - Target string `json:"target"` - Regions []string `json:"regions"` - Enabled bool `json:"enabled"` -} - -// CreateUptimeUptimeAlertRequest represents the request to create a new Uptime Alert. -type CreateUptimeAlertRequest struct { - Name string `json:"name"` - Type string `json:"type"` - Threshold int `json:"threshold"` - Comparison UptimeAlertComp `json:"comparison"` - Notifications *Notifications `json:"notifications"` - Period string `json:"period"` -} - -// UpdateUptimeAlertRequest represents the request to update an alert. -type UpdateUptimeAlertRequest struct { - Name string `json:"name"` - Type string `json:"type"` - Threshold int `json:"threshold"` - Comparison UptimeAlertComp `json:"comparison"` - Notifications *Notifications `json:"notifications"` - Period string `json:"period"` -} - -// UptimeAlertComp represents an uptime alert comparison operation -type UptimeAlertComp string - -type uptimeChecksRoot struct { - UptimeChecks []UptimeCheck `json:"checks"` - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -type uptimeCheckStateRoot struct { - UptimeCheckState UptimeCheckState `json:"state"` -} - -type uptimeAlertsRoot struct { - UptimeAlerts []UptimeAlert `json:"alerts"` - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -type uptimeCheckRoot struct { - UptimeCheck *UptimeCheck `json:"check"` -} - -type uptimeAlertRoot struct { - UptimeAlert *UptimeAlert `json:"alert"` -} - -var _ UptimeChecksService = &UptimeChecksServiceOp{} - -// List Checks. -func (p *UptimeChecksServiceOp) List(ctx context.Context, opts *ListOptions) ([]UptimeCheck, *Response, error) { - path, err := addOptions(uptimeChecksBasePath, opts) - if err != nil { - return nil, nil, err - } - - req, err := p.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(uptimeChecksRoot) - resp, err := p.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.UptimeChecks, resp, err -} - -// GetState of uptime check. -func (p *UptimeChecksServiceOp) GetState(ctx context.Context, uptimeCheckID string) (*UptimeCheckState, *Response, error) { - path := path.Join(uptimeChecksBasePath, uptimeCheckID, "/state") - - req, err := p.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(uptimeCheckStateRoot) - resp, err := p.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return &root.UptimeCheckState, resp, err -} - -// Get retrieves a single uptime check by its ID. -func (p *UptimeChecksServiceOp) Get(ctx context.Context, uptimeCheckID string) (*UptimeCheck, *Response, error) { - path := path.Join(uptimeChecksBasePath, uptimeCheckID) - - req, err := p.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(uptimeCheckRoot) - resp, err := p.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.UptimeCheck, resp, err -} - -// Create a new uptime check. -func (p *UptimeChecksServiceOp) Create(ctx context.Context, cr *CreateUptimeCheckRequest) (*UptimeCheck, *Response, error) { - req, err := p.client.NewRequest(ctx, http.MethodPost, uptimeChecksBasePath, cr) - if err != nil { - return nil, nil, err - } - - root := new(uptimeCheckRoot) - resp, err := p.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.UptimeCheck, resp, err -} - -// Update an uptime check. -func (p *UptimeChecksServiceOp) Update(ctx context.Context, uptimeCheckID string, ur *UpdateUptimeCheckRequest) (*UptimeCheck, *Response, error) { - path := path.Join(uptimeChecksBasePath, uptimeCheckID) - req, err := p.client.NewRequest(ctx, http.MethodPut, path, ur) - if err != nil { - return nil, nil, err - } - - root := new(uptimeCheckRoot) - resp, err := p.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.UptimeCheck, resp, err -} - -// Delete an existing uptime check. -func (p *UptimeChecksServiceOp) Delete(ctx context.Context, uptimeCheckID string) (*Response, error) { - path := path.Join(uptimeChecksBasePath, uptimeCheckID) - req, err := p.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - - return p.client.Do(ctx, req, nil) -} - -// alerts - -// ListAlerts lists alerts for a check. -func (p *UptimeChecksServiceOp) ListAlerts(ctx context.Context, uptimeCheckID string, opts *ListOptions) ([]UptimeAlert, *Response, error) { - fullPath := path.Join(uptimeChecksBasePath, uptimeCheckID, "/alerts") - path, err := addOptions(fullPath, opts) - if err != nil { - return nil, nil, err - } - - req, err := p.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(uptimeAlertsRoot) - resp, err := p.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.UptimeAlerts, resp, err -} - -// CreateAlert creates a new check alert. -func (p *UptimeChecksServiceOp) CreateAlert(ctx context.Context, uptimeCheckID string, cr *CreateUptimeAlertRequest) (*UptimeAlert, *Response, error) { - fullPath := path.Join(uptimeChecksBasePath, uptimeCheckID, "/alerts") - req, err := p.client.NewRequest(ctx, http.MethodPost, fullPath, cr) - if err != nil { - return nil, nil, err - } - - root := new(uptimeAlertRoot) - resp, err := p.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.UptimeAlert, resp, err -} - -// GetAlert retrieves a single uptime check alert by its ID. -func (p *UptimeChecksServiceOp) GetAlert(ctx context.Context, uptimeCheckID string, alertID string) (*UptimeAlert, *Response, error) { - path := fmt.Sprintf("v2/uptime/checks/%s/alerts/%s", uptimeCheckID, alertID) - - req, err := p.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(uptimeAlertRoot) - resp, err := p.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.UptimeAlert, resp, err -} - -// UpdateAlert updates an check's alert. -func (p *UptimeChecksServiceOp) UpdateAlert(ctx context.Context, uptimeCheckID string, alertID string, ur *UpdateUptimeAlertRequest) (*UptimeAlert, *Response, error) { - path := path.Join(uptimeChecksBasePath, uptimeCheckID, "/alerts/", alertID) - req, err := p.client.NewRequest(ctx, http.MethodPut, path, ur) - if err != nil { - return nil, nil, err - } - - root := new(uptimeAlertRoot) - resp, err := p.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.UptimeAlert, resp, err -} - -// DeleteAlert deletes an existing check's alert. -func (p *UptimeChecksServiceOp) DeleteAlert(ctx context.Context, uptimeCheckID string, alertID string) (*Response, error) { - path := path.Join(uptimeChecksBasePath, uptimeCheckID, "/alerts/", alertID) - req, err := p.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - - return p.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/digitalocean/godo/vpc_peerings.go b/vendor/github.com/digitalocean/godo/vpc_peerings.go deleted file mode 100644 index e6dfc04..0000000 --- a/vendor/github.com/digitalocean/godo/vpc_peerings.go +++ /dev/null @@ -1,199 +0,0 @@ -package godo - -import ( - "context" - "net/http" - "time" -) - -const vpcPeeringsPath = "/v2/vpc_peerings" - -type vpcPeeringRoot struct { - VPCPeering *VPCPeering `json:"vpc_peering"` -} - -type vpcPeeringsRoot struct { - VPCPeerings []*VPCPeering `json:"vpc_peerings"` - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -// VPCPeering represents a DigitalOcean Virtual Private Cloud Peering configuration. -type VPCPeering struct { - // ID is the generated ID of the VPC Peering - ID string `json:"id"` - // Name is the name of the VPC Peering - Name string `json:"name"` - // VPCIDs is the IDs of the pair of VPCs between which a peering is created - VPCIDs []string `json:"vpc_ids"` - // CreatedAt is time when this VPC Peering was first created - CreatedAt time.Time `json:"created_at"` - // Status is the status of the VPC Peering - Status string `json:"status"` -} - -// VPCPeeringCreateRequest represents a request to create a Virtual Private Cloud Peering -// for a list of associated VPC IDs. -type VPCPeeringCreateRequest struct { - // Name is the name of the VPC Peering - Name string `json:"name"` - // VPCIDs is the IDs of the pair of VPCs between which a peering is created - VPCIDs []string `json:"vpc_ids"` -} - -// VPCPeeringUpdateRequest represents a request to update a Virtual Private Cloud Peering. -type VPCPeeringUpdateRequest struct { - // Name is the name of the VPC Peering - Name string `json:"name"` -} - -// VPCPeeringCreateRequestByVPCID represents a request to create a Virtual Private Cloud Peering -// for an associated VPC ID. -type VPCPeeringCreateRequestByVPCID struct { - // Name is the name of the VPC Peering - Name string `json:"name"` - // VPCID is the ID of one of the VPCs with which the peering has to be created - VPCID string `json:"vpc_id"` -} - -// CreateVPCPeering creates a new Virtual Private Cloud Peering. -func (v *VPCsServiceOp) CreateVPCPeering(ctx context.Context, create *VPCPeeringCreateRequest) (*VPCPeering, *Response, error) { - path := vpcPeeringsPath - req, err := v.client.NewRequest(ctx, http.MethodPost, path, create) - if err != nil { - return nil, nil, err - } - - root := new(vpcPeeringRoot) - resp, err := v.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.VPCPeering, resp, nil -} - -// GetVPCPeering retrieves a Virtual Private Cloud Peering. -func (v *VPCsServiceOp) GetVPCPeering(ctx context.Context, id string) (*VPCPeering, *Response, error) { - path := vpcPeeringsPath + "/" + id - req, err := v.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(vpcPeeringRoot) - resp, err := v.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.VPCPeering, resp, nil -} - -// ListVPCPeerings lists all Virtual Private Cloud Peerings. -func (v *VPCsServiceOp) ListVPCPeerings(ctx context.Context, opt *ListOptions) ([]*VPCPeering, *Response, error) { - path, err := addOptions(vpcPeeringsPath, opt) - if err != nil { - return nil, nil, err - } - req, err := v.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(vpcPeeringsRoot) - resp, err := v.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - return root.VPCPeerings, resp, nil -} - -// UpdateVPCPeering updates a Virtual Private Cloud Peering. -func (v *VPCsServiceOp) UpdateVPCPeering(ctx context.Context, id string, update *VPCPeeringUpdateRequest) (*VPCPeering, *Response, error) { - path := vpcPeeringsPath + "/" + id - req, err := v.client.NewRequest(ctx, http.MethodPatch, path, update) - if err != nil { - return nil, nil, err - } - - root := new(vpcPeeringRoot) - resp, err := v.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.VPCPeering, resp, nil -} - -// DeleteVPCPeering deletes a Virtual Private Cloud Peering. -func (v *VPCsServiceOp) DeleteVPCPeering(ctx context.Context, id string) (*Response, error) { - path := vpcPeeringsPath + "/" + id - req, err := v.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - - resp, err := v.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - return resp, nil -} - -// CreateVPCPeeringByVPCID creates a new Virtual Private Cloud Peering for requested VPC ID. -func (v *VPCsServiceOp) CreateVPCPeeringByVPCID(ctx context.Context, id string, create *VPCPeeringCreateRequestByVPCID) (*VPCPeering, *Response, error) { - path := vpcsBasePath + "/" + id + "/peerings" - req, err := v.client.NewRequest(ctx, http.MethodPost, path, create) - if err != nil { - return nil, nil, err - } - - root := new(vpcPeeringRoot) - resp, err := v.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.VPCPeering, resp, nil -} - -// ListVPCPeeringsByVPCID lists all Virtual Private Cloud Peerings for requested VPC ID. -func (v *VPCsServiceOp) ListVPCPeeringsByVPCID(ctx context.Context, id string, opt *ListOptions) ([]*VPCPeering, *Response, error) { - path, err := addOptions(vpcsBasePath+"/"+id+"/peerings", opt) - req, err := v.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(vpcPeeringsRoot) - resp, err := v.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - return root.VPCPeerings, resp, nil -} - -// UpdateVPCPeeringByVPCID updates a Virtual Private Cloud Peering for requested VPC ID. -func (v *VPCsServiceOp) UpdateVPCPeeringByVPCID(ctx context.Context, vpcID, peerID string, update *VPCPeeringUpdateRequest) (*VPCPeering, *Response, error) { - path := vpcsBasePath + "/" + vpcID + "/peerings" + "/" + peerID - req, err := v.client.NewRequest(ctx, http.MethodPatch, path, update) - if err != nil { - return nil, nil, err - } - - root := new(vpcPeeringRoot) - resp, err := v.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - return root.VPCPeering, resp, nil -} diff --git a/vendor/github.com/digitalocean/godo/vpcs.go b/vendor/github.com/digitalocean/godo/vpcs.go deleted file mode 100644 index 6752519..0000000 --- a/vendor/github.com/digitalocean/godo/vpcs.go +++ /dev/null @@ -1,273 +0,0 @@ -package godo - -import ( - "context" - "net/http" - "time" -) - -const vpcsBasePath = "/v2/vpcs" - -// VPCsService is an interface for managing Virtual Private Cloud configurations with the -// DigitalOcean API. -// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/VPCs -type VPCsService interface { - Create(context.Context, *VPCCreateRequest) (*VPC, *Response, error) - Get(context.Context, string) (*VPC, *Response, error) - List(context.Context, *ListOptions) ([]*VPC, *Response, error) - ListMembers(context.Context, string, *VPCListMembersRequest, *ListOptions) ([]*VPCMember, *Response, error) - Update(context.Context, string, *VPCUpdateRequest) (*VPC, *Response, error) - Set(context.Context, string, ...VPCSetField) (*VPC, *Response, error) - Delete(context.Context, string) (*Response, error) - CreateVPCPeering(context.Context, *VPCPeeringCreateRequest) (*VPCPeering, *Response, error) - GetVPCPeering(context.Context, string) (*VPCPeering, *Response, error) - ListVPCPeerings(context.Context, *ListOptions) ([]*VPCPeering, *Response, error) - UpdateVPCPeering(context.Context, string, *VPCPeeringUpdateRequest) (*VPCPeering, *Response, error) - DeleteVPCPeering(context.Context, string) (*Response, error) - CreateVPCPeeringByVPCID(context.Context, string, *VPCPeeringCreateRequestByVPCID) (*VPCPeering, *Response, error) - ListVPCPeeringsByVPCID(context.Context, string, *ListOptions) ([]*VPCPeering, *Response, error) - UpdateVPCPeeringByVPCID(context.Context, string, string, *VPCPeeringUpdateRequest) (*VPCPeering, *Response, error) -} - -var _ VPCsService = &VPCsServiceOp{} - -// VPCsServiceOp interfaces with VPC endpoints in the DigitalOcean API. -type VPCsServiceOp struct { - client *Client -} - -// VPCCreateRequest represents a request to create a Virtual Private Cloud. -type VPCCreateRequest struct { - Name string `json:"name,omitempty"` - RegionSlug string `json:"region,omitempty"` - Description string `json:"description,omitempty"` - IPRange string `json:"ip_range,omitempty"` -} - -// VPCUpdateRequest represents a request to update a Virtual Private Cloud. -type VPCUpdateRequest struct { - Name string `json:"name,omitempty"` - Description string `json:"description,omitempty"` - Default *bool `json:"default,omitempty"` -} - -// VPCSetField allows one to set individual fields within a VPC configuration. -type VPCSetField interface { - vpcSetField(map[string]interface{}) -} - -// VPCSetName is used when one want to set the `name` field of a VPC. -// Ex.: VPCs.Set(..., VPCSetName("new-name")) -type VPCSetName string - -// VPCSetDescription is used when one want to set the `description` field of a VPC. -// Ex.: VPCs.Set(..., VPCSetDescription("vpc description")) -type VPCSetDescription string - -// VPCSetDefault is used when one wants to enable the `default` field of a VPC, to -// set a VPC as the default one in the region -// Ex.: VPCs.Set(..., VPCSetDefault()) -func VPCSetDefault() VPCSetField { - return &vpcSetDefault{} -} - -// vpcSetDefault satisfies the VPCSetField interface -type vpcSetDefault struct{} - -// VPC represents a DigitalOcean Virtual Private Cloud configuration. -type VPC struct { - ID string `json:"id,omitempty"` - URN string `json:"urn"` - Name string `json:"name,omitempty"` - Description string `json:"description,omitempty"` - IPRange string `json:"ip_range,omitempty"` - RegionSlug string `json:"region,omitempty"` - CreatedAt time.Time `json:"created_at,omitempty"` - Default bool `json:"default,omitempty"` -} - -type VPCListMembersRequest struct { - ResourceType string `url:"resource_type,omitempty"` -} - -type VPCMember struct { - URN string `json:"urn,omitempty"` - Name string `json:"name,omitempty"` - CreatedAt time.Time `json:"created_at,omitempty"` -} - -type vpcRoot struct { - VPC *VPC `json:"vpc"` -} - -type vpcsRoot struct { - VPCs []*VPC `json:"vpcs"` - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -type vpcMembersRoot struct { - Members []*VPCMember `json:"members"` - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -// Get returns the details of a Virtual Private Cloud. -func (v *VPCsServiceOp) Get(ctx context.Context, id string) (*VPC, *Response, error) { - path := vpcsBasePath + "/" + id - req, err := v.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(vpcRoot) - resp, err := v.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.VPC, resp, nil -} - -// Create creates a new Virtual Private Cloud. -func (v *VPCsServiceOp) Create(ctx context.Context, create *VPCCreateRequest) (*VPC, *Response, error) { - path := vpcsBasePath - req, err := v.client.NewRequest(ctx, http.MethodPost, path, create) - if err != nil { - return nil, nil, err - } - - root := new(vpcRoot) - resp, err := v.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.VPC, resp, nil -} - -// List returns a list of the caller's VPCs, with optional pagination. -func (v *VPCsServiceOp) List(ctx context.Context, opt *ListOptions) ([]*VPC, *Response, error) { - path, err := addOptions(vpcsBasePath, opt) - if err != nil { - return nil, nil, err - } - req, err := v.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(vpcsRoot) - resp, err := v.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.VPCs, resp, nil -} - -// Update updates a Virtual Private Cloud's properties. -func (v *VPCsServiceOp) Update(ctx context.Context, id string, update *VPCUpdateRequest) (*VPC, *Response, error) { - path := vpcsBasePath + "/" + id - req, err := v.client.NewRequest(ctx, http.MethodPut, path, update) - if err != nil { - return nil, nil, err - } - - root := new(vpcRoot) - resp, err := v.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.VPC, resp, nil -} - -func (n VPCSetName) vpcSetField(in map[string]interface{}) { - in["name"] = n -} - -func (n VPCSetDescription) vpcSetField(in map[string]interface{}) { - in["description"] = n -} - -func (*vpcSetDefault) vpcSetField(in map[string]interface{}) { - in["default"] = true -} - -// Set updates specific properties of a Virtual Private Cloud. -func (v *VPCsServiceOp) Set(ctx context.Context, id string, fields ...VPCSetField) (*VPC, *Response, error) { - path := vpcsBasePath + "/" + id - update := make(map[string]interface{}, len(fields)) - for _, field := range fields { - field.vpcSetField(update) - } - - req, err := v.client.NewRequest(ctx, http.MethodPatch, path, update) - if err != nil { - return nil, nil, err - } - - root := new(vpcRoot) - resp, err := v.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.VPC, resp, nil -} - -// Delete deletes a Virtual Private Cloud. There is no way to recover a VPC once it has been -// destroyed. -func (v *VPCsServiceOp) Delete(ctx context.Context, id string) (*Response, error) { - path := vpcsBasePath + "/" + id - req, err := v.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - - resp, err := v.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - - return resp, nil -} - -func (v *VPCsServiceOp) ListMembers(ctx context.Context, id string, request *VPCListMembersRequest, opt *ListOptions) ([]*VPCMember, *Response, error) { - path := vpcsBasePath + "/" + id + "/members" - pathWithResourceType, err := addOptions(path, request) - if err != nil { - return nil, nil, err - } - pathWithOpts, err := addOptions(pathWithResourceType, opt) - if err != nil { - return nil, nil, err - } - - req, err := v.client.NewRequest(ctx, http.MethodGet, pathWithOpts, nil) - if err != nil { - return nil, nil, err - } - - root := new(vpcMembersRoot) - resp, err := v.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.Members, resp, nil - -} diff --git a/vendor/github.com/golang/mock/AUTHORS b/vendor/github.com/golang/mock/AUTHORS deleted file mode 100644 index 660b8cc..0000000 --- a/vendor/github.com/golang/mock/AUTHORS +++ /dev/null @@ -1,12 +0,0 @@ -# This is the official list of GoMock authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# Please keep the list sorted. - -Alex Reece -Google Inc. diff --git a/vendor/github.com/golang/mock/CONTRIBUTORS b/vendor/github.com/golang/mock/CONTRIBUTORS deleted file mode 100644 index def849c..0000000 --- a/vendor/github.com/golang/mock/CONTRIBUTORS +++ /dev/null @@ -1,37 +0,0 @@ -# This is the official list of people who can contribute (and typically -# have contributed) code to the gomock repository. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# The submission process automatically checks to make sure -# that people submitting code are listed in this file (by email address). -# -# Names should be added to this file only after verifying that -# the individual or the individual's organization has agreed to -# the appropriate Contributor License Agreement, found here: -# -# http://code.google.com/legal/individual-cla-v1.0.html -# http://code.google.com/legal/corporate-cla-v1.0.html -# -# The agreement for individuals can be filled out on the web. -# -# When adding J Random Contributor's name to this file, -# either J's name or J's organization's name should be -# added to the AUTHORS file, depending on whether the -# individual or corporate CLA was used. - -# Names should be added to this file like so: -# Name -# -# An entry with two email addresses specifies that the -# first address should be used in the submit logs and -# that the second address should be recognized as the -# same person when interacting with Rietveld. - -# Please keep the list sorted. - -Aaron Jacobs -Alex Reece -David Symonds -Ryan Barrett diff --git a/vendor/github.com/golang/mock/LICENSE b/vendor/github.com/golang/mock/LICENSE deleted file mode 100644 index d645695..0000000 --- a/vendor/github.com/golang/mock/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/golang/mock/gomock/call.go b/vendor/github.com/golang/mock/gomock/call.go deleted file mode 100644 index 13c9f44..0000000 --- a/vendor/github.com/golang/mock/gomock/call.go +++ /dev/null @@ -1,445 +0,0 @@ -// Copyright 2010 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gomock - -import ( - "fmt" - "reflect" - "strconv" - "strings" -) - -// Call represents an expected call to a mock. -type Call struct { - t TestHelper // for triggering test failures on invalid call setup - - receiver interface{} // the receiver of the method call - method string // the name of the method - methodType reflect.Type // the type of the method - args []Matcher // the args - origin string // file and line number of call setup - - preReqs []*Call // prerequisite calls - - // Expectations - minCalls, maxCalls int - - numCalls int // actual number made - - // actions are called when this Call is called. Each action gets the args and - // can set the return values by returning a non-nil slice. Actions run in the - // order they are created. - actions []func([]interface{}) []interface{} -} - -// newCall creates a *Call. It requires the method type in order to support -// unexported methods. -func newCall(t TestHelper, receiver interface{}, method string, methodType reflect.Type, args ...interface{}) *Call { - t.Helper() - - // TODO: check arity, types. - mArgs := make([]Matcher, len(args)) - for i, arg := range args { - if m, ok := arg.(Matcher); ok { - mArgs[i] = m - } else if arg == nil { - // Handle nil specially so that passing a nil interface value - // will match the typed nils of concrete args. - mArgs[i] = Nil() - } else { - mArgs[i] = Eq(arg) - } - } - - // callerInfo's skip should be updated if the number of calls between the user's test - // and this line changes, i.e. this code is wrapped in another anonymous function. - // 0 is us, 1 is RecordCallWithMethodType(), 2 is the generated recorder, and 3 is the user's test. - origin := callerInfo(3) - actions := []func([]interface{}) []interface{}{func([]interface{}) []interface{} { - // Synthesize the zero value for each of the return args' types. - rets := make([]interface{}, methodType.NumOut()) - for i := 0; i < methodType.NumOut(); i++ { - rets[i] = reflect.Zero(methodType.Out(i)).Interface() - } - return rets - }} - return &Call{t: t, receiver: receiver, method: method, methodType: methodType, - args: mArgs, origin: origin, minCalls: 1, maxCalls: 1, actions: actions} -} - -// AnyTimes allows the expectation to be called 0 or more times -func (c *Call) AnyTimes() *Call { - c.minCalls, c.maxCalls = 0, 1e8 // close enough to infinity - return c -} - -// MinTimes requires the call to occur at least n times. If AnyTimes or MaxTimes have not been called or if MaxTimes -// was previously called with 1, MinTimes also sets the maximum number of calls to infinity. -func (c *Call) MinTimes(n int) *Call { - c.minCalls = n - if c.maxCalls == 1 { - c.maxCalls = 1e8 - } - return c -} - -// MaxTimes limits the number of calls to n times. If AnyTimes or MinTimes have not been called or if MinTimes was -// previously called with 1, MaxTimes also sets the minimum number of calls to 0. -func (c *Call) MaxTimes(n int) *Call { - c.maxCalls = n - if c.minCalls == 1 { - c.minCalls = 0 - } - return c -} - -// DoAndReturn declares the action to run when the call is matched. -// The return values from this function are returned by the mocked function. -// It takes an interface{} argument to support n-arity functions. -func (c *Call) DoAndReturn(f interface{}) *Call { - // TODO: Check arity and types here, rather than dying badly elsewhere. - v := reflect.ValueOf(f) - - c.addAction(func(args []interface{}) []interface{} { - c.t.Helper() - vArgs := make([]reflect.Value, len(args)) - ft := v.Type() - if c.methodType.NumIn() != ft.NumIn() { - c.t.Fatalf("wrong number of arguments in DoAndReturn func for %T.%v: got %d, want %d [%s]", - c.receiver, c.method, ft.NumIn(), c.methodType.NumIn(), c.origin) - return nil - } - for i := 0; i < len(args); i++ { - if args[i] != nil { - vArgs[i] = reflect.ValueOf(args[i]) - } else { - // Use the zero value for the arg. - vArgs[i] = reflect.Zero(ft.In(i)) - } - } - vRets := v.Call(vArgs) - rets := make([]interface{}, len(vRets)) - for i, ret := range vRets { - rets[i] = ret.Interface() - } - return rets - }) - return c -} - -// Do declares the action to run when the call is matched. The function's -// return values are ignored to retain backward compatibility. To use the -// return values call DoAndReturn. -// It takes an interface{} argument to support n-arity functions. -func (c *Call) Do(f interface{}) *Call { - // TODO: Check arity and types here, rather than dying badly elsewhere. - v := reflect.ValueOf(f) - - c.addAction(func(args []interface{}) []interface{} { - c.t.Helper() - if c.methodType.NumIn() != v.Type().NumIn() { - c.t.Fatalf("wrong number of arguments in Do func for %T.%v: got %d, want %d [%s]", - c.receiver, c.method, v.Type().NumIn(), c.methodType.NumIn(), c.origin) - return nil - } - vArgs := make([]reflect.Value, len(args)) - ft := v.Type() - for i := 0; i < len(args); i++ { - if args[i] != nil { - vArgs[i] = reflect.ValueOf(args[i]) - } else { - // Use the zero value for the arg. - vArgs[i] = reflect.Zero(ft.In(i)) - } - } - v.Call(vArgs) - return nil - }) - return c -} - -// Return declares the values to be returned by the mocked function call. -func (c *Call) Return(rets ...interface{}) *Call { - c.t.Helper() - - mt := c.methodType - if len(rets) != mt.NumOut() { - c.t.Fatalf("wrong number of arguments to Return for %T.%v: got %d, want %d [%s]", - c.receiver, c.method, len(rets), mt.NumOut(), c.origin) - } - for i, ret := range rets { - if got, want := reflect.TypeOf(ret), mt.Out(i); got == want { - // Identical types; nothing to do. - } else if got == nil { - // Nil needs special handling. - switch want.Kind() { - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - // ok - default: - c.t.Fatalf("argument %d to Return for %T.%v is nil, but %v is not nillable [%s]", - i, c.receiver, c.method, want, c.origin) - } - } else if got.AssignableTo(want) { - // Assignable type relation. Make the assignment now so that the generated code - // can return the values with a type assertion. - v := reflect.New(want).Elem() - v.Set(reflect.ValueOf(ret)) - rets[i] = v.Interface() - } else { - c.t.Fatalf("wrong type of argument %d to Return for %T.%v: %v is not assignable to %v [%s]", - i, c.receiver, c.method, got, want, c.origin) - } - } - - c.addAction(func([]interface{}) []interface{} { - return rets - }) - - return c -} - -// Times declares the exact number of times a function call is expected to be executed. -func (c *Call) Times(n int) *Call { - c.minCalls, c.maxCalls = n, n - return c -} - -// SetArg declares an action that will set the nth argument's value, -// indirected through a pointer. Or, in the case of a slice, SetArg -// will copy value's elements into the nth argument. -func (c *Call) SetArg(n int, value interface{}) *Call { - c.t.Helper() - - mt := c.methodType - // TODO: This will break on variadic methods. - // We will need to check those at invocation time. - if n < 0 || n >= mt.NumIn() { - c.t.Fatalf("SetArg(%d, ...) called for a method with %d args [%s]", - n, mt.NumIn(), c.origin) - } - // Permit setting argument through an interface. - // In the interface case, we don't (nay, can't) check the type here. - at := mt.In(n) - switch at.Kind() { - case reflect.Ptr: - dt := at.Elem() - if vt := reflect.TypeOf(value); !vt.AssignableTo(dt) { - c.t.Fatalf("SetArg(%d, ...) argument is a %v, not assignable to %v [%s]", - n, vt, dt, c.origin) - } - case reflect.Interface: - // nothing to do - case reflect.Slice: - // nothing to do - default: - c.t.Fatalf("SetArg(%d, ...) referring to argument of non-pointer non-interface non-slice type %v [%s]", - n, at, c.origin) - } - - c.addAction(func(args []interface{}) []interface{} { - v := reflect.ValueOf(value) - switch reflect.TypeOf(args[n]).Kind() { - case reflect.Slice: - setSlice(args[n], v) - default: - reflect.ValueOf(args[n]).Elem().Set(v) - } - return nil - }) - return c -} - -// isPreReq returns true if other is a direct or indirect prerequisite to c. -func (c *Call) isPreReq(other *Call) bool { - for _, preReq := range c.preReqs { - if other == preReq || preReq.isPreReq(other) { - return true - } - } - return false -} - -// After declares that the call may only match after preReq has been exhausted. -func (c *Call) After(preReq *Call) *Call { - c.t.Helper() - - if c == preReq { - c.t.Fatalf("A call isn't allowed to be its own prerequisite") - } - if preReq.isPreReq(c) { - c.t.Fatalf("Loop in call order: %v is a prerequisite to %v (possibly indirectly).", c, preReq) - } - - c.preReqs = append(c.preReqs, preReq) - return c -} - -// Returns true if the minimum number of calls have been made. -func (c *Call) satisfied() bool { - return c.numCalls >= c.minCalls -} - -// Returns true if the maximum number of calls have been made. -func (c *Call) exhausted() bool { - return c.numCalls >= c.maxCalls -} - -func (c *Call) String() string { - args := make([]string, len(c.args)) - for i, arg := range c.args { - args[i] = arg.String() - } - arguments := strings.Join(args, ", ") - return fmt.Sprintf("%T.%v(%s) %s", c.receiver, c.method, arguments, c.origin) -} - -// Tests if the given call matches the expected call. -// If yes, returns nil. If no, returns error with message explaining why it does not match. -func (c *Call) matches(args []interface{}) error { - if !c.methodType.IsVariadic() { - if len(args) != len(c.args) { - return fmt.Errorf("expected call at %s has the wrong number of arguments. Got: %d, want: %d", - c.origin, len(args), len(c.args)) - } - - for i, m := range c.args { - if !m.Matches(args[i]) { - return fmt.Errorf( - "expected call at %s doesn't match the argument at index %d.\nGot: %v\nWant: %v", - c.origin, i, formatGottenArg(m, args[i]), m, - ) - } - } - } else { - if len(c.args) < c.methodType.NumIn()-1 { - return fmt.Errorf("expected call at %s has the wrong number of matchers. Got: %d, want: %d", - c.origin, len(c.args), c.methodType.NumIn()-1) - } - if len(c.args) != c.methodType.NumIn() && len(args) != len(c.args) { - return fmt.Errorf("expected call at %s has the wrong number of arguments. Got: %d, want: %d", - c.origin, len(args), len(c.args)) - } - if len(args) < len(c.args)-1 { - return fmt.Errorf("expected call at %s has the wrong number of arguments. Got: %d, want: greater than or equal to %d", - c.origin, len(args), len(c.args)-1) - } - - for i, m := range c.args { - if i < c.methodType.NumIn()-1 { - // Non-variadic args - if !m.Matches(args[i]) { - return fmt.Errorf("expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v", - c.origin, strconv.Itoa(i), formatGottenArg(m, args[i]), m) - } - continue - } - // The last arg has a possibility of a variadic argument, so let it branch - - // sample: Foo(a int, b int, c ...int) - if i < len(c.args) && i < len(args) { - if m.Matches(args[i]) { - // Got Foo(a, b, c) want Foo(matcherA, matcherB, gomock.Any()) - // Got Foo(a, b, c) want Foo(matcherA, matcherB, someSliceMatcher) - // Got Foo(a, b, c) want Foo(matcherA, matcherB, matcherC) - // Got Foo(a, b) want Foo(matcherA, matcherB) - // Got Foo(a, b, c, d) want Foo(matcherA, matcherB, matcherC, matcherD) - continue - } - } - - // The number of actual args don't match the number of matchers, - // or the last matcher is a slice and the last arg is not. - // If this function still matches it is because the last matcher - // matches all the remaining arguments or the lack of any. - // Convert the remaining arguments, if any, into a slice of the - // expected type. - vArgsType := c.methodType.In(c.methodType.NumIn() - 1) - vArgs := reflect.MakeSlice(vArgsType, 0, len(args)-i) - for _, arg := range args[i:] { - vArgs = reflect.Append(vArgs, reflect.ValueOf(arg)) - } - if m.Matches(vArgs.Interface()) { - // Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, gomock.Any()) - // Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, someSliceMatcher) - // Got Foo(a, b) want Foo(matcherA, matcherB, gomock.Any()) - // Got Foo(a, b) want Foo(matcherA, matcherB, someEmptySliceMatcher) - break - } - // Wrong number of matchers or not match. Fail. - // Got Foo(a, b) want Foo(matcherA, matcherB, matcherC, matcherD) - // Got Foo(a, b, c) want Foo(matcherA, matcherB, matcherC, matcherD) - // Got Foo(a, b, c, d) want Foo(matcherA, matcherB, matcherC, matcherD, matcherE) - // Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, matcherC, matcherD) - // Got Foo(a, b, c) want Foo(matcherA, matcherB) - - return fmt.Errorf("expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v", - c.origin, strconv.Itoa(i), formatGottenArg(m, args[i:]), c.args[i]) - } - } - - // Check that all prerequisite calls have been satisfied. - for _, preReqCall := range c.preReqs { - if !preReqCall.satisfied() { - return fmt.Errorf("expected call at %s doesn't have a prerequisite call satisfied:\n%v\nshould be called before:\n%v", - c.origin, preReqCall, c) - } - } - - // Check that the call is not exhausted. - if c.exhausted() { - return fmt.Errorf("expected call at %s has already been called the max number of times", c.origin) - } - - return nil -} - -// dropPrereqs tells the expected Call to not re-check prerequisite calls any -// longer, and to return its current set. -func (c *Call) dropPrereqs() (preReqs []*Call) { - preReqs = c.preReqs - c.preReqs = nil - return -} - -func (c *Call) call() []func([]interface{}) []interface{} { - c.numCalls++ - return c.actions -} - -// InOrder declares that the given calls should occur in order. -func InOrder(calls ...*Call) { - for i := 1; i < len(calls); i++ { - calls[i].After(calls[i-1]) - } -} - -func setSlice(arg interface{}, v reflect.Value) { - va := reflect.ValueOf(arg) - for i := 0; i < v.Len(); i++ { - va.Index(i).Set(v.Index(i)) - } -} - -func (c *Call) addAction(action func([]interface{}) []interface{}) { - c.actions = append(c.actions, action) -} - -func formatGottenArg(m Matcher, arg interface{}) string { - got := fmt.Sprintf("%v (%T)", arg, arg) - if gs, ok := m.(GotFormatter); ok { - got = gs.Got(arg) - } - return got -} diff --git a/vendor/github.com/golang/mock/gomock/callset.go b/vendor/github.com/golang/mock/gomock/callset.go deleted file mode 100644 index 49dba78..0000000 --- a/vendor/github.com/golang/mock/gomock/callset.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2011 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gomock - -import ( - "bytes" - "errors" - "fmt" -) - -// callSet represents a set of expected calls, indexed by receiver and method -// name. -type callSet struct { - // Calls that are still expected. - expected map[callSetKey][]*Call - // Calls that have been exhausted. - exhausted map[callSetKey][]*Call -} - -// callSetKey is the key in the maps in callSet -type callSetKey struct { - receiver interface{} - fname string -} - -func newCallSet() *callSet { - return &callSet{make(map[callSetKey][]*Call), make(map[callSetKey][]*Call)} -} - -// Add adds a new expected call. -func (cs callSet) Add(call *Call) { - key := callSetKey{call.receiver, call.method} - m := cs.expected - if call.exhausted() { - m = cs.exhausted - } - m[key] = append(m[key], call) -} - -// Remove removes an expected call. -func (cs callSet) Remove(call *Call) { - key := callSetKey{call.receiver, call.method} - calls := cs.expected[key] - for i, c := range calls { - if c == call { - // maintain order for remaining calls - cs.expected[key] = append(calls[:i], calls[i+1:]...) - cs.exhausted[key] = append(cs.exhausted[key], call) - break - } - } -} - -// FindMatch searches for a matching call. Returns error with explanation message if no call matched. -func (cs callSet) FindMatch(receiver interface{}, method string, args []interface{}) (*Call, error) { - key := callSetKey{receiver, method} - - // Search through the expected calls. - expected := cs.expected[key] - var callsErrors bytes.Buffer - for _, call := range expected { - err := call.matches(args) - if err != nil { - _, _ = fmt.Fprintf(&callsErrors, "\n%v", err) - } else { - return call, nil - } - } - - // If we haven't found a match then search through the exhausted calls so we - // get useful error messages. - exhausted := cs.exhausted[key] - for _, call := range exhausted { - if err := call.matches(args); err != nil { - _, _ = fmt.Fprintf(&callsErrors, "\n%v", err) - continue - } - _, _ = fmt.Fprintf( - &callsErrors, "all expected calls for method %q have been exhausted", method, - ) - } - - if len(expected)+len(exhausted) == 0 { - _, _ = fmt.Fprintf(&callsErrors, "there are no expected calls of the method %q for that receiver", method) - } - - return nil, errors.New(callsErrors.String()) -} - -// Failures returns the calls that are not satisfied. -func (cs callSet) Failures() []*Call { - failures := make([]*Call, 0, len(cs.expected)) - for _, calls := range cs.expected { - for _, call := range calls { - if !call.satisfied() { - failures = append(failures, call) - } - } - } - return failures -} diff --git a/vendor/github.com/golang/mock/gomock/controller.go b/vendor/github.com/golang/mock/gomock/controller.go deleted file mode 100644 index f054200..0000000 --- a/vendor/github.com/golang/mock/gomock/controller.go +++ /dev/null @@ -1,336 +0,0 @@ -// Copyright 2010 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package gomock is a mock framework for Go. -// -// Standard usage: -// (1) Define an interface that you wish to mock. -// type MyInterface interface { -// SomeMethod(x int64, y string) -// } -// (2) Use mockgen to generate a mock from the interface. -// (3) Use the mock in a test: -// func TestMyThing(t *testing.T) { -// mockCtrl := gomock.NewController(t) -// defer mockCtrl.Finish() -// -// mockObj := something.NewMockMyInterface(mockCtrl) -// mockObj.EXPECT().SomeMethod(4, "blah") -// // pass mockObj to a real object and play with it. -// } -// -// By default, expected calls are not enforced to run in any particular order. -// Call order dependency can be enforced by use of InOrder and/or Call.After. -// Call.After can create more varied call order dependencies, but InOrder is -// often more convenient. -// -// The following examples create equivalent call order dependencies. -// -// Example of using Call.After to chain expected call order: -// -// firstCall := mockObj.EXPECT().SomeMethod(1, "first") -// secondCall := mockObj.EXPECT().SomeMethod(2, "second").After(firstCall) -// mockObj.EXPECT().SomeMethod(3, "third").After(secondCall) -// -// Example of using InOrder to declare expected call order: -// -// gomock.InOrder( -// mockObj.EXPECT().SomeMethod(1, "first"), -// mockObj.EXPECT().SomeMethod(2, "second"), -// mockObj.EXPECT().SomeMethod(3, "third"), -// ) -package gomock - -import ( - "context" - "fmt" - "reflect" - "runtime" - "sync" -) - -// A TestReporter is something that can be used to report test failures. It -// is satisfied by the standard library's *testing.T. -type TestReporter interface { - Errorf(format string, args ...interface{}) - Fatalf(format string, args ...interface{}) -} - -// TestHelper is a TestReporter that has the Helper method. It is satisfied -// by the standard library's *testing.T. -type TestHelper interface { - TestReporter - Helper() -} - -// cleanuper is used to check if TestHelper also has the `Cleanup` method. A -// common pattern is to pass in a `*testing.T` to -// `NewController(t TestReporter)`. In Go 1.14+, `*testing.T` has a cleanup -// method. This can be utilized to call `Finish()` so the caller of this library -// does not have to. -type cleanuper interface { - Cleanup(func()) -} - -// A Controller represents the top-level control of a mock ecosystem. It -// defines the scope and lifetime of mock objects, as well as their -// expectations. It is safe to call Controller's methods from multiple -// goroutines. Each test should create a new Controller and invoke Finish via -// defer. -// -// func TestFoo(t *testing.T) { -// ctrl := gomock.NewController(t) -// defer ctrl.Finish() -// // .. -// } -// -// func TestBar(t *testing.T) { -// t.Run("Sub-Test-1", st) { -// ctrl := gomock.NewController(st) -// defer ctrl.Finish() -// // .. -// }) -// t.Run("Sub-Test-2", st) { -// ctrl := gomock.NewController(st) -// defer ctrl.Finish() -// // .. -// }) -// }) -type Controller struct { - // T should only be called within a generated mock. It is not intended to - // be used in user code and may be changed in future versions. T is the - // TestReporter passed in when creating the Controller via NewController. - // If the TestReporter does not implement a TestHelper it will be wrapped - // with a nopTestHelper. - T TestHelper - mu sync.Mutex - expectedCalls *callSet - finished bool -} - -// NewController returns a new Controller. It is the preferred way to create a -// Controller. -// -// New in go1.14+, if you are passing a *testing.T into this function you no -// longer need to call ctrl.Finish() in your test methods. -func NewController(t TestReporter) *Controller { - h, ok := t.(TestHelper) - if !ok { - h = &nopTestHelper{t} - } - ctrl := &Controller{ - T: h, - expectedCalls: newCallSet(), - } - if c, ok := isCleanuper(ctrl.T); ok { - c.Cleanup(func() { - ctrl.T.Helper() - ctrl.finish(true, nil) - }) - } - - return ctrl -} - -type cancelReporter struct { - t TestHelper - cancel func() -} - -func (r *cancelReporter) Errorf(format string, args ...interface{}) { - r.t.Errorf(format, args...) -} -func (r *cancelReporter) Fatalf(format string, args ...interface{}) { - defer r.cancel() - r.t.Fatalf(format, args...) -} - -func (r *cancelReporter) Helper() { - r.t.Helper() -} - -// WithContext returns a new Controller and a Context, which is cancelled on any -// fatal failure. -func WithContext(ctx context.Context, t TestReporter) (*Controller, context.Context) { - h, ok := t.(TestHelper) - if !ok { - h = &nopTestHelper{t: t} - } - - ctx, cancel := context.WithCancel(ctx) - return NewController(&cancelReporter{t: h, cancel: cancel}), ctx -} - -type nopTestHelper struct { - t TestReporter -} - -func (h *nopTestHelper) Errorf(format string, args ...interface{}) { - h.t.Errorf(format, args...) -} -func (h *nopTestHelper) Fatalf(format string, args ...interface{}) { - h.t.Fatalf(format, args...) -} - -func (h nopTestHelper) Helper() {} - -// RecordCall is called by a mock. It should not be called by user code. -func (ctrl *Controller) RecordCall(receiver interface{}, method string, args ...interface{}) *Call { - ctrl.T.Helper() - - recv := reflect.ValueOf(receiver) - for i := 0; i < recv.Type().NumMethod(); i++ { - if recv.Type().Method(i).Name == method { - return ctrl.RecordCallWithMethodType(receiver, method, recv.Method(i).Type(), args...) - } - } - ctrl.T.Fatalf("gomock: failed finding method %s on %T", method, receiver) - panic("unreachable") -} - -// RecordCallWithMethodType is called by a mock. It should not be called by user code. -func (ctrl *Controller) RecordCallWithMethodType(receiver interface{}, method string, methodType reflect.Type, args ...interface{}) *Call { - ctrl.T.Helper() - - call := newCall(ctrl.T, receiver, method, methodType, args...) - - ctrl.mu.Lock() - defer ctrl.mu.Unlock() - ctrl.expectedCalls.Add(call) - - return call -} - -// Call is called by a mock. It should not be called by user code. -func (ctrl *Controller) Call(receiver interface{}, method string, args ...interface{}) []interface{} { - ctrl.T.Helper() - - // Nest this code so we can use defer to make sure the lock is released. - actions := func() []func([]interface{}) []interface{} { - ctrl.T.Helper() - ctrl.mu.Lock() - defer ctrl.mu.Unlock() - - expected, err := ctrl.expectedCalls.FindMatch(receiver, method, args) - if err != nil { - // callerInfo's skip should be updated if the number of calls between the user's test - // and this line changes, i.e. this code is wrapped in another anonymous function. - // 0 is us, 1 is controller.Call(), 2 is the generated mock, and 3 is the user's test. - origin := callerInfo(3) - ctrl.T.Fatalf("Unexpected call to %T.%v(%v) at %s because: %s", receiver, method, args, origin, err) - } - - // Two things happen here: - // * the matching call no longer needs to check prerequite calls, - // * and the prerequite calls are no longer expected, so remove them. - preReqCalls := expected.dropPrereqs() - for _, preReqCall := range preReqCalls { - ctrl.expectedCalls.Remove(preReqCall) - } - - actions := expected.call() - if expected.exhausted() { - ctrl.expectedCalls.Remove(expected) - } - return actions - }() - - var rets []interface{} - for _, action := range actions { - if r := action(args); r != nil { - rets = r - } - } - - return rets -} - -// Finish checks to see if all the methods that were expected to be called -// were called. It should be invoked for each Controller. It is not idempotent -// and therefore can only be invoked once. -// -// New in go1.14+, if you are passing a *testing.T into NewController function you no -// longer need to call ctrl.Finish() in your test methods. -func (ctrl *Controller) Finish() { - // If we're currently panicking, probably because this is a deferred call. - // This must be recovered in the deferred function. - err := recover() - ctrl.finish(false, err) -} - -func (ctrl *Controller) finish(cleanup bool, panicErr interface{}) { - ctrl.T.Helper() - - ctrl.mu.Lock() - defer ctrl.mu.Unlock() - - if ctrl.finished { - if _, ok := isCleanuper(ctrl.T); !ok { - ctrl.T.Fatalf("Controller.Finish was called more than once. It has to be called exactly once.") - } - return - } - ctrl.finished = true - - // Short-circuit, pass through the panic. - if panicErr != nil { - panic(panicErr) - } - - // Check that all remaining expected calls are satisfied. - failures := ctrl.expectedCalls.Failures() - for _, call := range failures { - ctrl.T.Errorf("missing call(s) to %v", call) - } - if len(failures) != 0 { - if !cleanup { - ctrl.T.Fatalf("aborting test due to missing call(s)") - return - } - ctrl.T.Errorf("aborting test due to missing call(s)") - } -} - -// callerInfo returns the file:line of the call site. skip is the number -// of stack frames to skip when reporting. 0 is callerInfo's call site. -func callerInfo(skip int) string { - if _, file, line, ok := runtime.Caller(skip + 1); ok { - return fmt.Sprintf("%s:%d", file, line) - } - return "unknown file" -} - -// isCleanuper checks it if t's base TestReporter has a Cleanup method. -func isCleanuper(t TestReporter) (cleanuper, bool) { - tr := unwrapTestReporter(t) - c, ok := tr.(cleanuper) - return c, ok -} - -// unwrapTestReporter unwraps TestReporter to the base implementation. -func unwrapTestReporter(t TestReporter) TestReporter { - tr := t - switch nt := t.(type) { - case *cancelReporter: - tr = nt.t - if h, check := tr.(*nopTestHelper); check { - tr = h.t - } - case *nopTestHelper: - tr = nt.t - default: - // not wrapped - } - return tr -} diff --git a/vendor/github.com/golang/mock/gomock/matchers.go b/vendor/github.com/golang/mock/gomock/matchers.go deleted file mode 100644 index 2822fb2..0000000 --- a/vendor/github.com/golang/mock/gomock/matchers.go +++ /dev/null @@ -1,341 +0,0 @@ -// Copyright 2010 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gomock - -import ( - "fmt" - "reflect" - "strings" -) - -// A Matcher is a representation of a class of values. -// It is used to represent the valid or expected arguments to a mocked method. -type Matcher interface { - // Matches returns whether x is a match. - Matches(x interface{}) bool - - // String describes what the matcher matches. - String() string -} - -// WantFormatter modifies the given Matcher's String() method to the given -// Stringer. This allows for control on how the "Want" is formatted when -// printing . -func WantFormatter(s fmt.Stringer, m Matcher) Matcher { - type matcher interface { - Matches(x interface{}) bool - } - - return struct { - matcher - fmt.Stringer - }{ - matcher: m, - Stringer: s, - } -} - -// StringerFunc type is an adapter to allow the use of ordinary functions as -// a Stringer. If f is a function with the appropriate signature, -// StringerFunc(f) is a Stringer that calls f. -type StringerFunc func() string - -// String implements fmt.Stringer. -func (f StringerFunc) String() string { - return f() -} - -// GotFormatter is used to better print failure messages. If a matcher -// implements GotFormatter, it will use the result from Got when printing -// the failure message. -type GotFormatter interface { - // Got is invoked with the received value. The result is used when - // printing the failure message. - Got(got interface{}) string -} - -// GotFormatterFunc type is an adapter to allow the use of ordinary -// functions as a GotFormatter. If f is a function with the appropriate -// signature, GotFormatterFunc(f) is a GotFormatter that calls f. -type GotFormatterFunc func(got interface{}) string - -// Got implements GotFormatter. -func (f GotFormatterFunc) Got(got interface{}) string { - return f(got) -} - -// GotFormatterAdapter attaches a GotFormatter to a Matcher. -func GotFormatterAdapter(s GotFormatter, m Matcher) Matcher { - return struct { - GotFormatter - Matcher - }{ - GotFormatter: s, - Matcher: m, - } -} - -type anyMatcher struct{} - -func (anyMatcher) Matches(interface{}) bool { - return true -} - -func (anyMatcher) String() string { - return "is anything" -} - -type eqMatcher struct { - x interface{} -} - -func (e eqMatcher) Matches(x interface{}) bool { - // In case, some value is nil - if e.x == nil || x == nil { - return reflect.DeepEqual(e.x, x) - } - - // Check if types assignable and convert them to common type - x1Val := reflect.ValueOf(e.x) - x2Val := reflect.ValueOf(x) - - if x1Val.Type().AssignableTo(x2Val.Type()) { - x1ValConverted := x1Val.Convert(x2Val.Type()) - return reflect.DeepEqual(x1ValConverted.Interface(), x2Val.Interface()) - } - - return false -} - -func (e eqMatcher) String() string { - return fmt.Sprintf("is equal to %v (%T)", e.x, e.x) -} - -type nilMatcher struct{} - -func (nilMatcher) Matches(x interface{}) bool { - if x == nil { - return true - } - - v := reflect.ValueOf(x) - switch v.Kind() { - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, - reflect.Ptr, reflect.Slice: - return v.IsNil() - } - - return false -} - -func (nilMatcher) String() string { - return "is nil" -} - -type notMatcher struct { - m Matcher -} - -func (n notMatcher) Matches(x interface{}) bool { - return !n.m.Matches(x) -} - -func (n notMatcher) String() string { - return "not(" + n.m.String() + ")" -} - -type assignableToTypeOfMatcher struct { - targetType reflect.Type -} - -func (m assignableToTypeOfMatcher) Matches(x interface{}) bool { - return reflect.TypeOf(x).AssignableTo(m.targetType) -} - -func (m assignableToTypeOfMatcher) String() string { - return "is assignable to " + m.targetType.Name() -} - -type allMatcher struct { - matchers []Matcher -} - -func (am allMatcher) Matches(x interface{}) bool { - for _, m := range am.matchers { - if !m.Matches(x) { - return false - } - } - return true -} - -func (am allMatcher) String() string { - ss := make([]string, 0, len(am.matchers)) - for _, matcher := range am.matchers { - ss = append(ss, matcher.String()) - } - return strings.Join(ss, "; ") -} - -type lenMatcher struct { - i int -} - -func (m lenMatcher) Matches(x interface{}) bool { - v := reflect.ValueOf(x) - switch v.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == m.i - default: - return false - } -} - -func (m lenMatcher) String() string { - return fmt.Sprintf("has length %d", m.i) -} - -type inAnyOrderMatcher struct { - x interface{} -} - -func (m inAnyOrderMatcher) Matches(x interface{}) bool { - given, ok := m.prepareValue(x) - if !ok { - return false - } - wanted, ok := m.prepareValue(m.x) - if !ok { - return false - } - - if given.Len() != wanted.Len() { - return false - } - - usedFromGiven := make([]bool, given.Len()) - foundFromWanted := make([]bool, wanted.Len()) - for i := 0; i < wanted.Len(); i++ { - wantedMatcher := Eq(wanted.Index(i).Interface()) - for j := 0; j < given.Len(); j++ { - if usedFromGiven[j] { - continue - } - if wantedMatcher.Matches(given.Index(j).Interface()) { - foundFromWanted[i] = true - usedFromGiven[j] = true - break - } - } - } - - missingFromWanted := 0 - for _, found := range foundFromWanted { - if !found { - missingFromWanted++ - } - } - extraInGiven := 0 - for _, used := range usedFromGiven { - if !used { - extraInGiven++ - } - } - - return extraInGiven == 0 && missingFromWanted == 0 -} - -func (m inAnyOrderMatcher) prepareValue(x interface{}) (reflect.Value, bool) { - xValue := reflect.ValueOf(x) - switch xValue.Kind() { - case reflect.Slice, reflect.Array: - return xValue, true - default: - return reflect.Value{}, false - } -} - -func (m inAnyOrderMatcher) String() string { - return fmt.Sprintf("has the same elements as %v", m.x) -} - -// Constructors - -// All returns a composite Matcher that returns true if and only all of the -// matchers return true. -func All(ms ...Matcher) Matcher { return allMatcher{ms} } - -// Any returns a matcher that always matches. -func Any() Matcher { return anyMatcher{} } - -// Eq returns a matcher that matches on equality. -// -// Example usage: -// Eq(5).Matches(5) // returns true -// Eq(5).Matches(4) // returns false -func Eq(x interface{}) Matcher { return eqMatcher{x} } - -// Len returns a matcher that matches on length. This matcher returns false if -// is compared to a type that is not an array, chan, map, slice, or string. -func Len(i int) Matcher { - return lenMatcher{i} -} - -// Nil returns a matcher that matches if the received value is nil. -// -// Example usage: -// var x *bytes.Buffer -// Nil().Matches(x) // returns true -// x = &bytes.Buffer{} -// Nil().Matches(x) // returns false -func Nil() Matcher { return nilMatcher{} } - -// Not reverses the results of its given child matcher. -// -// Example usage: -// Not(Eq(5)).Matches(4) // returns true -// Not(Eq(5)).Matches(5) // returns false -func Not(x interface{}) Matcher { - if m, ok := x.(Matcher); ok { - return notMatcher{m} - } - return notMatcher{Eq(x)} -} - -// AssignableToTypeOf is a Matcher that matches if the parameter to the mock -// function is assignable to the type of the parameter to this function. -// -// Example usage: -// var s fmt.Stringer = &bytes.Buffer{} -// AssignableToTypeOf(s).Matches(time.Second) // returns true -// AssignableToTypeOf(s).Matches(99) // returns false -// -// var ctx = reflect.TypeOf((*context.Context)(nil)).Elem() -// AssignableToTypeOf(ctx).Matches(context.Background()) // returns true -func AssignableToTypeOf(x interface{}) Matcher { - if xt, ok := x.(reflect.Type); ok { - return assignableToTypeOfMatcher{xt} - } - return assignableToTypeOfMatcher{reflect.TypeOf(x)} -} - -// InAnyOrder is a Matcher that returns true for collections of the same elements ignoring the order. -// -// Example usage: -// InAnyOrder([]int{1, 2, 3}).Matches([]int{1, 3, 2}) // returns true -// InAnyOrder([]int{1, 2, 3}).Matches([]int{1, 2}) // returns false -func InAnyOrder(x interface{}) Matcher { - return inAnyOrderMatcher{x} -} diff --git a/vendor/github.com/google/go-querystring/LICENSE b/vendor/github.com/google/go-querystring/LICENSE deleted file mode 100644 index ae121a1..0000000 --- a/vendor/github.com/google/go-querystring/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2013 Google. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/go-querystring/query/encode.go b/vendor/github.com/google/go-querystring/query/encode.go deleted file mode 100644 index 91198f8..0000000 --- a/vendor/github.com/google/go-querystring/query/encode.go +++ /dev/null @@ -1,357 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package query implements encoding of structs into URL query parameters. -// -// As a simple example: -// -// type Options struct { -// Query string `url:"q"` -// ShowAll bool `url:"all"` -// Page int `url:"page"` -// } -// -// opt := Options{ "foo", true, 2 } -// v, _ := query.Values(opt) -// fmt.Print(v.Encode()) // will output: "q=foo&all=true&page=2" -// -// The exact mapping between Go values and url.Values is described in the -// documentation for the Values() function. -package query - -import ( - "bytes" - "fmt" - "net/url" - "reflect" - "strconv" - "strings" - "time" -) - -var timeType = reflect.TypeOf(time.Time{}) - -var encoderType = reflect.TypeOf(new(Encoder)).Elem() - -// Encoder is an interface implemented by any type that wishes to encode -// itself into URL values in a non-standard way. -type Encoder interface { - EncodeValues(key string, v *url.Values) error -} - -// Values returns the url.Values encoding of v. -// -// Values expects to be passed a struct, and traverses it recursively using the -// following encoding rules. -// -// Each exported struct field is encoded as a URL parameter unless -// -// - the field's tag is "-", or -// - the field is empty and its tag specifies the "omitempty" option -// -// The empty values are false, 0, any nil pointer or interface value, any array -// slice, map, or string of length zero, and any type (such as time.Time) that -// returns true for IsZero(). -// -// The URL parameter name defaults to the struct field name but can be -// specified in the struct field's tag value. The "url" key in the struct -// field's tag value is the key name, followed by an optional comma and -// options. For example: -// -// // Field is ignored by this package. -// Field int `url:"-"` -// -// // Field appears as URL parameter "myName". -// Field int `url:"myName"` -// -// // Field appears as URL parameter "myName" and the field is omitted if -// // its value is empty -// Field int `url:"myName,omitempty"` -// -// // Field appears as URL parameter "Field" (the default), but the field -// // is skipped if empty. Note the leading comma. -// Field int `url:",omitempty"` -// -// For encoding individual field values, the following type-dependent rules -// apply: -// -// Boolean values default to encoding as the strings "true" or "false". -// Including the "int" option signals that the field should be encoded as the -// strings "1" or "0". -// -// time.Time values default to encoding as RFC3339 timestamps. Including the -// "unix" option signals that the field should be encoded as a Unix time (see -// time.Unix()). The "unixmilli" and "unixnano" options will encode the number -// of milliseconds and nanoseconds, respectively, since January 1, 1970 (see -// time.UnixNano()). Including the "layout" struct tag (separate from the -// "url" tag) will use the value of the "layout" tag as a layout passed to -// time.Format. For example: -// -// // Encode a time.Time as YYYY-MM-DD -// Field time.Time `layout:"2006-01-02"` -// -// Slice and Array values default to encoding as multiple URL values of the -// same name. Including the "comma" option signals that the field should be -// encoded as a single comma-delimited value. Including the "space" option -// similarly encodes the value as a single space-delimited string. Including -// the "semicolon" option will encode the value as a semicolon-delimited string. -// Including the "brackets" option signals that the multiple URL values should -// have "[]" appended to the value name. "numbered" will append a number to -// the end of each incidence of the value name, example: -// name0=value0&name1=value1, etc. Including the "del" struct tag (separate -// from the "url" tag) will use the value of the "del" tag as the delimiter. -// For example: -// -// // Encode a slice of bools as ints ("1" for true, "0" for false), -// // separated by exclamation points "!". -// Field []bool `url:",int" del:"!"` -// -// Anonymous struct fields are usually encoded as if their inner exported -// fields were fields in the outer struct, subject to the standard Go -// visibility rules. An anonymous struct field with a name given in its URL -// tag is treated as having that name, rather than being anonymous. -// -// Non-nil pointer values are encoded as the value pointed to. -// -// Nested structs are encoded including parent fields in value names for -// scoping. e.g: -// -// "user[name]=acme&user[addr][postcode]=1234&user[addr][city]=SFO" -// -// All other values are encoded using their default string representation. -// -// Multiple fields that encode to the same URL parameter name will be included -// as multiple URL values of the same name. -func Values(v interface{}) (url.Values, error) { - values := make(url.Values) - val := reflect.ValueOf(v) - for val.Kind() == reflect.Ptr { - if val.IsNil() { - return values, nil - } - val = val.Elem() - } - - if v == nil { - return values, nil - } - - if val.Kind() != reflect.Struct { - return nil, fmt.Errorf("query: Values() expects struct input. Got %v", val.Kind()) - } - - err := reflectValue(values, val, "") - return values, err -} - -// reflectValue populates the values parameter from the struct fields in val. -// Embedded structs are followed recursively (using the rules defined in the -// Values function documentation) breadth-first. -func reflectValue(values url.Values, val reflect.Value, scope string) error { - var embedded []reflect.Value - - typ := val.Type() - for i := 0; i < typ.NumField(); i++ { - sf := typ.Field(i) - if sf.PkgPath != "" && !sf.Anonymous { // unexported - continue - } - - sv := val.Field(i) - tag := sf.Tag.Get("url") - if tag == "-" { - continue - } - name, opts := parseTag(tag) - - if name == "" { - if sf.Anonymous { - v := reflect.Indirect(sv) - if v.IsValid() && v.Kind() == reflect.Struct { - // save embedded struct for later processing - embedded = append(embedded, v) - continue - } - } - - name = sf.Name - } - - if scope != "" { - name = scope + "[" + name + "]" - } - - if opts.Contains("omitempty") && isEmptyValue(sv) { - continue - } - - if sv.Type().Implements(encoderType) { - // if sv is a nil pointer and the custom encoder is defined on a non-pointer - // method receiver, set sv to the zero value of the underlying type - if !reflect.Indirect(sv).IsValid() && sv.Type().Elem().Implements(encoderType) { - sv = reflect.New(sv.Type().Elem()) - } - - m := sv.Interface().(Encoder) - if err := m.EncodeValues(name, &values); err != nil { - return err - } - continue - } - - // recursively dereference pointers. break on nil pointers - for sv.Kind() == reflect.Ptr { - if sv.IsNil() { - break - } - sv = sv.Elem() - } - - if sv.Kind() == reflect.Slice || sv.Kind() == reflect.Array { - var del string - if opts.Contains("comma") { - del = "," - } else if opts.Contains("space") { - del = " " - } else if opts.Contains("semicolon") { - del = ";" - } else if opts.Contains("brackets") { - name = name + "[]" - } else { - del = sf.Tag.Get("del") - } - - if del != "" { - s := new(bytes.Buffer) - first := true - for i := 0; i < sv.Len(); i++ { - if first { - first = false - } else { - s.WriteString(del) - } - s.WriteString(valueString(sv.Index(i), opts, sf)) - } - values.Add(name, s.String()) - } else { - for i := 0; i < sv.Len(); i++ { - k := name - if opts.Contains("numbered") { - k = fmt.Sprintf("%s%d", name, i) - } - values.Add(k, valueString(sv.Index(i), opts, sf)) - } - } - continue - } - - if sv.Type() == timeType { - values.Add(name, valueString(sv, opts, sf)) - continue - } - - if sv.Kind() == reflect.Struct { - if err := reflectValue(values, sv, name); err != nil { - return err - } - continue - } - - values.Add(name, valueString(sv, opts, sf)) - } - - for _, f := range embedded { - if err := reflectValue(values, f, scope); err != nil { - return err - } - } - - return nil -} - -// valueString returns the string representation of a value. -func valueString(v reflect.Value, opts tagOptions, sf reflect.StructField) string { - for v.Kind() == reflect.Ptr { - if v.IsNil() { - return "" - } - v = v.Elem() - } - - if v.Kind() == reflect.Bool && opts.Contains("int") { - if v.Bool() { - return "1" - } - return "0" - } - - if v.Type() == timeType { - t := v.Interface().(time.Time) - if opts.Contains("unix") { - return strconv.FormatInt(t.Unix(), 10) - } - if opts.Contains("unixmilli") { - return strconv.FormatInt((t.UnixNano() / 1e6), 10) - } - if opts.Contains("unixnano") { - return strconv.FormatInt(t.UnixNano(), 10) - } - if layout := sf.Tag.Get("layout"); layout != "" { - return t.Format(layout) - } - return t.Format(time.RFC3339) - } - - return fmt.Sprint(v.Interface()) -} - -// isEmptyValue checks if a value should be considered empty for the purposes -// of omitting fields with the "omitempty" option. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - - type zeroable interface { - IsZero() bool - } - - if z, ok := v.Interface().(zeroable); ok { - return z.IsZero() - } - - return false -} - -// tagOptions is the string following a comma in a struct field's "url" tag, or -// the empty string. It does not include the leading comma. -type tagOptions []string - -// parseTag splits a struct field's url tag into its name and comma-separated -// options. -func parseTag(tag string) (string, tagOptions) { - s := strings.Split(tag, ",") - return s[0], s[1:] -} - -// Contains checks whether the tagOptions contains the specified option. -func (o tagOptions) Contains(option string) bool { - for _, s := range o { - if s == option { - return true - } - } - return false -} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE deleted file mode 100644 index e87a115..0000000 --- a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/go-cleanhttp/README.md b/vendor/github.com/hashicorp/go-cleanhttp/README.md deleted file mode 100644 index 036e531..0000000 --- a/vendor/github.com/hashicorp/go-cleanhttp/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# cleanhttp - -Functions for accessing "clean" Go http.Client values - -------------- - -The Go standard library contains a default `http.Client` called -`http.DefaultClient`. It is a common idiom in Go code to start with -`http.DefaultClient` and tweak it as necessary, and in fact, this is -encouraged; from the `http` package documentation: - -> The Client's Transport typically has internal state (cached TCP connections), -so Clients should be reused instead of created as needed. Clients are safe for -concurrent use by multiple goroutines. - -Unfortunately, this is a shared value, and it is not uncommon for libraries to -assume that they are free to modify it at will. With enough dependencies, it -can be very easy to encounter strange problems and race conditions due to -manipulation of this shared value across libraries and goroutines (clients are -safe for concurrent use, but writing values to the client struct itself is not -protected). - -Making things worse is the fact that a bare `http.Client` will use a default -`http.Transport` called `http.DefaultTransport`, which is another global value -that behaves the same way. So it is not simply enough to replace -`http.DefaultClient` with `&http.Client{}`. - -This repository provides some simple functions to get a "clean" `http.Client` --- one that uses the same default values as the Go standard library, but -returns a client that does not share any state with other clients. diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go deleted file mode 100644 index fe28d15..0000000 --- a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go +++ /dev/null @@ -1,58 +0,0 @@ -package cleanhttp - -import ( - "net" - "net/http" - "runtime" - "time" -) - -// DefaultTransport returns a new http.Transport with similar default values to -// http.DefaultTransport, but with idle connections and keepalives disabled. -func DefaultTransport() *http.Transport { - transport := DefaultPooledTransport() - transport.DisableKeepAlives = true - transport.MaxIdleConnsPerHost = -1 - return transport -} - -// DefaultPooledTransport returns a new http.Transport with similar default -// values to http.DefaultTransport. Do not use this for transient transports as -// it can leak file descriptors over time. Only use this for transports that -// will be re-used for the same host(s). -func DefaultPooledTransport() *http.Transport { - transport := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).DialContext, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - ForceAttemptHTTP2: true, - MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1, - } - return transport -} - -// DefaultClient returns a new http.Client with similar default values to -// http.Client, but with a non-shared Transport, idle connections disabled, and -// keepalives disabled. -func DefaultClient() *http.Client { - return &http.Client{ - Transport: DefaultTransport(), - } -} - -// DefaultPooledClient returns a new http.Client with similar default values to -// http.Client, but with a shared Transport. Do not use this function for -// transient clients as it can leak file descriptors over time. Only use this -// for clients that will be re-used for the same host(s). -func DefaultPooledClient() *http.Client { - return &http.Client{ - Transport: DefaultPooledTransport(), - } -} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/doc.go b/vendor/github.com/hashicorp/go-cleanhttp/doc.go deleted file mode 100644 index 0584109..0000000 --- a/vendor/github.com/hashicorp/go-cleanhttp/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Package cleanhttp offers convenience utilities for acquiring "clean" -// http.Transport and http.Client structs. -// -// Values set on http.DefaultClient and http.DefaultTransport affect all -// callers. This can have detrimental effects, esepcially in TLS contexts, -// where client or root certificates set to talk to multiple endpoints can end -// up displacing each other, leading to hard-to-debug issues. This package -// provides non-shared http.Client and http.Transport structs to ensure that -// the configuration will not be overwritten by other parts of the application -// or dependencies. -// -// The DefaultClient and DefaultTransport functions disable idle connections -// and keepalives. Without ensuring that idle connections are closed before -// garbage collection, short-term clients/transports can leak file descriptors, -// eventually leading to "too many open files" errors. If you will be -// connecting to the same hosts repeatedly from the same client, you can use -// DefaultPooledClient to receive a client that has connection pooling -// semantics similar to http.DefaultClient. -// -package cleanhttp diff --git a/vendor/github.com/hashicorp/go-cleanhttp/handlers.go b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go deleted file mode 100644 index 3c845dc..0000000 --- a/vendor/github.com/hashicorp/go-cleanhttp/handlers.go +++ /dev/null @@ -1,48 +0,0 @@ -package cleanhttp - -import ( - "net/http" - "strings" - "unicode" -) - -// HandlerInput provides input options to cleanhttp's handlers -type HandlerInput struct { - ErrStatus int -} - -// PrintablePathCheckHandler is a middleware that ensures the request path -// contains only printable runes. -func PrintablePathCheckHandler(next http.Handler, input *HandlerInput) http.Handler { - // Nil-check on input to make it optional - if input == nil { - input = &HandlerInput{ - ErrStatus: http.StatusBadRequest, - } - } - - // Default to http.StatusBadRequest on error - if input.ErrStatus == 0 { - input.ErrStatus = http.StatusBadRequest - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r != nil { - // Check URL path for non-printable characters - idx := strings.IndexFunc(r.URL.Path, func(c rune) bool { - return !unicode.IsPrint(c) - }) - - if idx != -1 { - w.WriteHeader(input.ErrStatus) - return - } - - if next != nil { - next.ServeHTTP(w, r) - } - } - - return - }) -} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/.gitignore b/vendor/github.com/hashicorp/go-retryablehttp/.gitignore deleted file mode 100644 index 4e309e0..0000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -.idea/ -*.iml -*.test -.vscode/ \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-retryablehttp/.go-version b/vendor/github.com/hashicorp/go-retryablehttp/.go-version deleted file mode 100644 index 6fee2fe..0000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/.go-version +++ /dev/null @@ -1 +0,0 @@ -1.22.2 diff --git a/vendor/github.com/hashicorp/go-retryablehttp/CHANGELOG.md b/vendor/github.com/hashicorp/go-retryablehttp/CHANGELOG.md deleted file mode 100644 index 68a627c..0000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/CHANGELOG.md +++ /dev/null @@ -1,33 +0,0 @@ -## 0.7.7 (May 30, 2024) - -BUG FIXES: - -- client: avoid potentially leaking URL-embedded basic authentication credentials in logs (#158) - -## 0.7.6 (May 9, 2024) - -ENHANCEMENTS: - -- client: support a `RetryPrepare` function for modifying the request before retrying (#216) -- client: support HTTP-date values for `Retry-After` header value (#138) -- client: avoid reading entire body when the body is a `*bytes.Reader` (#197) - -BUG FIXES: - -- client: fix a broken check for invalid server certificate in go 1.20+ (#210) - -## 0.7.5 (Nov 8, 2023) - -BUG FIXES: - -- client: fixes an issue where the request body is not preserved on temporary redirects or re-established HTTP/2 connections (#207) - -## 0.7.4 (Jun 6, 2023) - -BUG FIXES: - -- client: fixing an issue where the Content-Type header wouldn't be sent with an empty payload when using HTTP/2 (#194) - -## 0.7.3 (May 15, 2023) - -Initial release diff --git a/vendor/github.com/hashicorp/go-retryablehttp/CODEOWNERS b/vendor/github.com/hashicorp/go-retryablehttp/CODEOWNERS deleted file mode 100644 index d6dd78a..0000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/CODEOWNERS +++ /dev/null @@ -1 +0,0 @@ -* @hashicorp/go-retryablehttp-maintainers diff --git a/vendor/github.com/hashicorp/go-retryablehttp/LICENSE b/vendor/github.com/hashicorp/go-retryablehttp/LICENSE deleted file mode 100644 index f4f97ee..0000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/LICENSE +++ /dev/null @@ -1,365 +0,0 @@ -Copyright (c) 2015 HashiCorp, Inc. - -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/go-retryablehttp/Makefile b/vendor/github.com/hashicorp/go-retryablehttp/Makefile deleted file mode 100644 index 5255241..0000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -default: test - -test: - go vet ./... - go test -v -race ./... - -updatedeps: - go get -f -t -u ./... - go get -f -u ./... - -.PHONY: default test updatedeps diff --git a/vendor/github.com/hashicorp/go-retryablehttp/README.md b/vendor/github.com/hashicorp/go-retryablehttp/README.md deleted file mode 100644 index 145a62f..0000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/README.md +++ /dev/null @@ -1,62 +0,0 @@ -go-retryablehttp -================ - -[![Build Status](http://img.shields.io/travis/hashicorp/go-retryablehttp.svg?style=flat-square)][travis] -[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] - -[travis]: http://travis-ci.org/hashicorp/go-retryablehttp -[godocs]: http://godoc.org/github.com/hashicorp/go-retryablehttp - -The `retryablehttp` package provides a familiar HTTP client interface with -automatic retries and exponential backoff. It is a thin wrapper over the -standard `net/http` client library and exposes nearly the same public API. This -makes `retryablehttp` very easy to drop into existing programs. - -`retryablehttp` performs automatic retries under certain conditions. Mainly, if -an error is returned by the client (connection errors, etc.), or if a 500-range -response code is received (except 501), then a retry is invoked after a wait -period. Otherwise, the response is returned and left to the caller to -interpret. - -The main difference from `net/http` is that requests which take a request body -(POST/PUT et. al) can have the body provided in a number of ways (some more or -less efficient) that allow "rewinding" the request body if the initial request -fails so that the full request can be attempted again. See the -[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp) for more -details. - -Version 0.6.0 and before are compatible with Go prior to 1.12. From 0.6.1 onward, Go 1.12+ is required. -From 0.6.7 onward, Go 1.13+ is required. - -Example Use -=========== - -Using this library should look almost identical to what you would do with -`net/http`. The most simple example of a GET request is shown below: - -```go -resp, err := retryablehttp.Get("/foo") -if err != nil { - panic(err) -} -``` - -The returned response object is an `*http.Response`, the same thing you would -usually get from `net/http`. Had the request failed one or more times, the above -call would block and retry with exponential backoff. - -## Getting a stdlib `*http.Client` with retries - -It's possible to convert a `*retryablehttp.Client` directly to a `*http.Client`. -This makes use of retryablehttp broadly applicable with minimal effort. Simply -configure a `*retryablehttp.Client` as you wish, and then call `StandardClient()`: - -```go -retryClient := retryablehttp.NewClient() -retryClient.RetryMax = 10 - -standardClient := retryClient.StandardClient() // *http.Client -``` - -For more usage and examples see the -[pkg.go.dev](https://pkg.go.dev/github.com/hashicorp/go-retryablehttp). diff --git a/vendor/github.com/hashicorp/go-retryablehttp/cert_error_go119.go b/vendor/github.com/hashicorp/go-retryablehttp/cert_error_go119.go deleted file mode 100644 index b2b27e8..0000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/cert_error_go119.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -//go:build !go1.20 -// +build !go1.20 - -package retryablehttp - -import "crypto/x509" - -func isCertError(err error) bool { - _, ok := err.(x509.UnknownAuthorityError) - return ok -} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/cert_error_go120.go b/vendor/github.com/hashicorp/go-retryablehttp/cert_error_go120.go deleted file mode 100644 index a3cd315..0000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/cert_error_go120.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -//go:build go1.20 -// +build go1.20 - -package retryablehttp - -import "crypto/tls" - -func isCertError(err error) bool { - _, ok := err.(*tls.CertificateVerificationError) - return ok -} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/client.go b/vendor/github.com/hashicorp/go-retryablehttp/client.go deleted file mode 100644 index efee53c..0000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/client.go +++ /dev/null @@ -1,919 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// Package retryablehttp provides a familiar HTTP client interface with -// automatic retries and exponential backoff. It is a thin wrapper over the -// standard net/http client library and exposes nearly the same public API. -// This makes retryablehttp very easy to drop into existing programs. -// -// retryablehttp performs automatic retries under certain conditions. Mainly, if -// an error is returned by the client (connection errors etc), or if a 500-range -// response is received, then a retry is invoked. Otherwise, the response is -// returned and left to the caller to interpret. -// -// Requests which take a request body should provide a non-nil function -// parameter. The best choice is to provide either a function satisfying -// ReaderFunc which provides multiple io.Readers in an efficient manner, a -// *bytes.Buffer (the underlying raw byte slice will be used) or a raw byte -// slice. As it is a reference type, and we will wrap it as needed by readers, -// we can efficiently re-use the request body without needing to copy it. If an -// io.Reader (such as a *bytes.Reader) is provided, the full body will be read -// prior to the first request, and will be efficiently re-used for any retries. -// ReadSeeker can be used, but some users have observed occasional data races -// between the net/http library and the Seek functionality of some -// implementations of ReadSeeker, so should be avoided if possible. -package retryablehttp - -import ( - "bytes" - "context" - "fmt" - "io" - "log" - "math" - "math/rand" - "net/http" - "net/url" - "os" - "regexp" - "strconv" - "strings" - "sync" - "time" - - cleanhttp "github.com/hashicorp/go-cleanhttp" -) - -var ( - // Default retry configuration - defaultRetryWaitMin = 1 * time.Second - defaultRetryWaitMax = 30 * time.Second - defaultRetryMax = 4 - - // defaultLogger is the logger provided with defaultClient - defaultLogger = log.New(os.Stderr, "", log.LstdFlags) - - // defaultClient is used for performing requests without explicitly making - // a new client. It is purposely private to avoid modifications. - defaultClient = NewClient() - - // We need to consume response bodies to maintain http connections, but - // limit the size we consume to respReadLimit. - respReadLimit = int64(4096) - - // timeNow sets the function that returns the current time. - // This defaults to time.Now. Changes to this should only be done in tests. - timeNow = time.Now - - // A regular expression to match the error returned by net/http when the - // configured number of redirects is exhausted. This error isn't typed - // specifically so we resort to matching on the error string. - redirectsErrorRe = regexp.MustCompile(`stopped after \d+ redirects\z`) - - // A regular expression to match the error returned by net/http when the - // scheme specified in the URL is invalid. This error isn't typed - // specifically so we resort to matching on the error string. - schemeErrorRe = regexp.MustCompile(`unsupported protocol scheme`) - - // A regular expression to match the error returned by net/http when a - // request header or value is invalid. This error isn't typed - // specifically so we resort to matching on the error string. - invalidHeaderErrorRe = regexp.MustCompile(`invalid header`) - - // A regular expression to match the error returned by net/http when the - // TLS certificate is not trusted. This error isn't typed - // specifically so we resort to matching on the error string. - notTrustedErrorRe = regexp.MustCompile(`certificate is not trusted`) -) - -// ReaderFunc is the type of function that can be given natively to NewRequest -type ReaderFunc func() (io.Reader, error) - -// ResponseHandlerFunc is a type of function that takes in a Response, and does something with it. -// The ResponseHandlerFunc is called when the HTTP client successfully receives a response and the -// CheckRetry function indicates that a retry of the base request is not necessary. -// If an error is returned from this function, the CheckRetry policy will be used to determine -// whether to retry the whole request (including this handler). -// -// Make sure to check status codes! Even if the request was completed it may have a non-2xx status code. -// -// The response body is not automatically closed. It must be closed either by the ResponseHandlerFunc or -// by the caller out-of-band. Failure to do so will result in a memory leak. -type ResponseHandlerFunc func(*http.Response) error - -// LenReader is an interface implemented by many in-memory io.Reader's. Used -// for automatically sending the right Content-Length header when possible. -type LenReader interface { - Len() int -} - -// Request wraps the metadata needed to create HTTP requests. -type Request struct { - // body is a seekable reader over the request body payload. This is - // used to rewind the request data in between retries. - body ReaderFunc - - responseHandler ResponseHandlerFunc - - // Embed an HTTP request directly. This makes a *Request act exactly - // like an *http.Request so that all meta methods are supported. - *http.Request -} - -// WithContext returns wrapped Request with a shallow copy of underlying *http.Request -// with its context changed to ctx. The provided ctx must be non-nil. -func (r *Request) WithContext(ctx context.Context) *Request { - return &Request{ - body: r.body, - responseHandler: r.responseHandler, - Request: r.Request.WithContext(ctx), - } -} - -// SetResponseHandler allows setting the response handler. -func (r *Request) SetResponseHandler(fn ResponseHandlerFunc) { - r.responseHandler = fn -} - -// BodyBytes allows accessing the request body. It is an analogue to -// http.Request's Body variable, but it returns a copy of the underlying data -// rather than consuming it. -// -// This function is not thread-safe; do not call it at the same time as another -// call, or at the same time this request is being used with Client.Do. -func (r *Request) BodyBytes() ([]byte, error) { - if r.body == nil { - return nil, nil - } - body, err := r.body() - if err != nil { - return nil, err - } - buf := new(bytes.Buffer) - _, err = buf.ReadFrom(body) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// SetBody allows setting the request body. -// -// It is useful if a new body needs to be set without constructing a new Request. -func (r *Request) SetBody(rawBody interface{}) error { - bodyReader, contentLength, err := getBodyReaderAndContentLength(rawBody) - if err != nil { - return err - } - r.body = bodyReader - r.ContentLength = contentLength - if bodyReader != nil { - r.GetBody = func() (io.ReadCloser, error) { - body, err := bodyReader() - if err != nil { - return nil, err - } - if rc, ok := body.(io.ReadCloser); ok { - return rc, nil - } - return io.NopCloser(body), nil - } - } else { - r.GetBody = func() (io.ReadCloser, error) { return http.NoBody, nil } - } - return nil -} - -// WriteTo allows copying the request body into a writer. -// -// It writes data to w until there's no more data to write or -// when an error occurs. The return int64 value is the number of bytes -// written. Any error encountered during the write is also returned. -// The signature matches io.WriterTo interface. -func (r *Request) WriteTo(w io.Writer) (int64, error) { - body, err := r.body() - if err != nil { - return 0, err - } - if c, ok := body.(io.Closer); ok { - defer c.Close() - } - return io.Copy(w, body) -} - -func getBodyReaderAndContentLength(rawBody interface{}) (ReaderFunc, int64, error) { - var bodyReader ReaderFunc - var contentLength int64 - - switch body := rawBody.(type) { - // If they gave us a function already, great! Use it. - case ReaderFunc: - bodyReader = body - tmp, err := body() - if err != nil { - return nil, 0, err - } - if lr, ok := tmp.(LenReader); ok { - contentLength = int64(lr.Len()) - } - if c, ok := tmp.(io.Closer); ok { - c.Close() - } - - case func() (io.Reader, error): - bodyReader = body - tmp, err := body() - if err != nil { - return nil, 0, err - } - if lr, ok := tmp.(LenReader); ok { - contentLength = int64(lr.Len()) - } - if c, ok := tmp.(io.Closer); ok { - c.Close() - } - - // If a regular byte slice, we can read it over and over via new - // readers - case []byte: - buf := body - bodyReader = func() (io.Reader, error) { - return bytes.NewReader(buf), nil - } - contentLength = int64(len(buf)) - - // If a bytes.Buffer we can read the underlying byte slice over and - // over - case *bytes.Buffer: - buf := body - bodyReader = func() (io.Reader, error) { - return bytes.NewReader(buf.Bytes()), nil - } - contentLength = int64(buf.Len()) - - // We prioritize *bytes.Reader here because we don't really want to - // deal with it seeking so want it to match here instead of the - // io.ReadSeeker case. - case *bytes.Reader: - snapshot := *body - bodyReader = func() (io.Reader, error) { - r := snapshot - return &r, nil - } - contentLength = int64(body.Len()) - - // Compat case - case io.ReadSeeker: - raw := body - bodyReader = func() (io.Reader, error) { - _, err := raw.Seek(0, 0) - return io.NopCloser(raw), err - } - if lr, ok := raw.(LenReader); ok { - contentLength = int64(lr.Len()) - } - - // Read all in so we can reset - case io.Reader: - buf, err := io.ReadAll(body) - if err != nil { - return nil, 0, err - } - if len(buf) == 0 { - bodyReader = func() (io.Reader, error) { - return http.NoBody, nil - } - contentLength = 0 - } else { - bodyReader = func() (io.Reader, error) { - return bytes.NewReader(buf), nil - } - contentLength = int64(len(buf)) - } - - // No body provided, nothing to do - case nil: - - // Unrecognized type - default: - return nil, 0, fmt.Errorf("cannot handle type %T", rawBody) - } - return bodyReader, contentLength, nil -} - -// FromRequest wraps an http.Request in a retryablehttp.Request -func FromRequest(r *http.Request) (*Request, error) { - bodyReader, _, err := getBodyReaderAndContentLength(r.Body) - if err != nil { - return nil, err - } - // Could assert contentLength == r.ContentLength - return &Request{body: bodyReader, Request: r}, nil -} - -// NewRequest creates a new wrapped request. -func NewRequest(method, url string, rawBody interface{}) (*Request, error) { - return NewRequestWithContext(context.Background(), method, url, rawBody) -} - -// NewRequestWithContext creates a new wrapped request with the provided context. -// -// The context controls the entire lifetime of a request and its response: -// obtaining a connection, sending the request, and reading the response headers and body. -func NewRequestWithContext(ctx context.Context, method, url string, rawBody interface{}) (*Request, error) { - httpReq, err := http.NewRequestWithContext(ctx, method, url, nil) - if err != nil { - return nil, err - } - - req := &Request{ - Request: httpReq, - } - if err := req.SetBody(rawBody); err != nil { - return nil, err - } - - return req, nil -} - -// Logger interface allows to use other loggers than -// standard log.Logger. -type Logger interface { - Printf(string, ...interface{}) -} - -// LeveledLogger is an interface that can be implemented by any logger or a -// logger wrapper to provide leveled logging. The methods accept a message -// string and a variadic number of key-value pairs. For log.Printf style -// formatting where message string contains a format specifier, use Logger -// interface. -type LeveledLogger interface { - Error(msg string, keysAndValues ...interface{}) - Info(msg string, keysAndValues ...interface{}) - Debug(msg string, keysAndValues ...interface{}) - Warn(msg string, keysAndValues ...interface{}) -} - -// hookLogger adapts an LeveledLogger to Logger for use by the existing hook functions -// without changing the API. -type hookLogger struct { - LeveledLogger -} - -func (h hookLogger) Printf(s string, args ...interface{}) { - h.Info(fmt.Sprintf(s, args...)) -} - -// RequestLogHook allows a function to run before each retry. The HTTP -// request which will be made, and the retry number (0 for the initial -// request) are available to users. The internal logger is exposed to -// consumers. -type RequestLogHook func(Logger, *http.Request, int) - -// ResponseLogHook is like RequestLogHook, but allows running a function -// on each HTTP response. This function will be invoked at the end of -// every HTTP request executed, regardless of whether a subsequent retry -// needs to be performed or not. If the response body is read or closed -// from this method, this will affect the response returned from Do(). -type ResponseLogHook func(Logger, *http.Response) - -// CheckRetry specifies a policy for handling retries. It is called -// following each request with the response and error values returned by -// the http.Client. If CheckRetry returns false, the Client stops retrying -// and returns the response to the caller. If CheckRetry returns an error, -// that error value is returned in lieu of the error from the request. The -// Client will close any response body when retrying, but if the retry is -// aborted it is up to the CheckRetry callback to properly close any -// response body before returning. -type CheckRetry func(ctx context.Context, resp *http.Response, err error) (bool, error) - -// Backoff specifies a policy for how long to wait between retries. -// It is called after a failing request to determine the amount of time -// that should pass before trying again. -type Backoff func(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration - -// ErrorHandler is called if retries are expired, containing the last status -// from the http library. If not specified, default behavior for the library is -// to close the body and return an error indicating how many tries were -// attempted. If overriding this, be sure to close the body if needed. -type ErrorHandler func(resp *http.Response, err error, numTries int) (*http.Response, error) - -// PrepareRetry is called before retry operation. It can be used for example to re-sign the request -type PrepareRetry func(req *http.Request) error - -// Client is used to make HTTP requests. It adds additional functionality -// like automatic retries to tolerate minor outages. -type Client struct { - HTTPClient *http.Client // Internal HTTP client. - Logger interface{} // Customer logger instance. Can be either Logger or LeveledLogger - - RetryWaitMin time.Duration // Minimum time to wait - RetryWaitMax time.Duration // Maximum time to wait - RetryMax int // Maximum number of retries - - // RequestLogHook allows a user-supplied function to be called - // before each retry. - RequestLogHook RequestLogHook - - // ResponseLogHook allows a user-supplied function to be called - // with the response from each HTTP request executed. - ResponseLogHook ResponseLogHook - - // CheckRetry specifies the policy for handling retries, and is called - // after each request. The default policy is DefaultRetryPolicy. - CheckRetry CheckRetry - - // Backoff specifies the policy for how long to wait between retries - Backoff Backoff - - // ErrorHandler specifies the custom error handler to use, if any - ErrorHandler ErrorHandler - - // PrepareRetry can prepare the request for retry operation, for example re-sign it - PrepareRetry PrepareRetry - - loggerInit sync.Once - clientInit sync.Once -} - -// NewClient creates a new Client with default settings. -func NewClient() *Client { - return &Client{ - HTTPClient: cleanhttp.DefaultPooledClient(), - Logger: defaultLogger, - RetryWaitMin: defaultRetryWaitMin, - RetryWaitMax: defaultRetryWaitMax, - RetryMax: defaultRetryMax, - CheckRetry: DefaultRetryPolicy, - Backoff: DefaultBackoff, - } -} - -func (c *Client) logger() interface{} { - c.loggerInit.Do(func() { - if c.Logger == nil { - return - } - - switch c.Logger.(type) { - case Logger, LeveledLogger: - // ok - default: - // This should happen in dev when they are setting Logger and work on code, not in prod. - panic(fmt.Sprintf("invalid logger type passed, must be Logger or LeveledLogger, was %T", c.Logger)) - } - }) - - return c.Logger -} - -// DefaultRetryPolicy provides a default callback for Client.CheckRetry, which -// will retry on connection errors and server errors. -func DefaultRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) { - // do not retry on context.Canceled or context.DeadlineExceeded - if ctx.Err() != nil { - return false, ctx.Err() - } - - // don't propagate other errors - shouldRetry, _ := baseRetryPolicy(resp, err) - return shouldRetry, nil -} - -// ErrorPropagatedRetryPolicy is the same as DefaultRetryPolicy, except it -// propagates errors back instead of returning nil. This allows you to inspect -// why it decided to retry or not. -func ErrorPropagatedRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) { - // do not retry on context.Canceled or context.DeadlineExceeded - if ctx.Err() != nil { - return false, ctx.Err() - } - - return baseRetryPolicy(resp, err) -} - -func baseRetryPolicy(resp *http.Response, err error) (bool, error) { - if err != nil { - if v, ok := err.(*url.Error); ok { - // Don't retry if the error was due to too many redirects. - if redirectsErrorRe.MatchString(v.Error()) { - return false, v - } - - // Don't retry if the error was due to an invalid protocol scheme. - if schemeErrorRe.MatchString(v.Error()) { - return false, v - } - - // Don't retry if the error was due to an invalid header. - if invalidHeaderErrorRe.MatchString(v.Error()) { - return false, v - } - - // Don't retry if the error was due to TLS cert verification failure. - if notTrustedErrorRe.MatchString(v.Error()) { - return false, v - } - if isCertError(v.Err) { - return false, v - } - } - - // The error is likely recoverable so retry. - return true, nil - } - - // 429 Too Many Requests is recoverable. Sometimes the server puts - // a Retry-After response header to indicate when the server is - // available to start processing request from client. - if resp.StatusCode == http.StatusTooManyRequests { - return true, nil - } - - // Check the response code. We retry on 500-range responses to allow - // the server time to recover, as 500's are typically not permanent - // errors and may relate to outages on the server side. This will catch - // invalid response codes as well, like 0 and 999. - if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != http.StatusNotImplemented) { - return true, fmt.Errorf("unexpected HTTP status %s", resp.Status) - } - - return false, nil -} - -// DefaultBackoff provides a default callback for Client.Backoff which -// will perform exponential backoff based on the attempt number and limited -// by the provided minimum and maximum durations. -// -// It also tries to parse Retry-After response header when a http.StatusTooManyRequests -// (HTTP Code 429) is found in the resp parameter. Hence it will return the number of -// seconds the server states it may be ready to process more requests from this client. -func DefaultBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { - if resp != nil { - if resp.StatusCode == http.StatusTooManyRequests || resp.StatusCode == http.StatusServiceUnavailable { - if sleep, ok := parseRetryAfterHeader(resp.Header["Retry-After"]); ok { - return sleep - } - } - } - - mult := math.Pow(2, float64(attemptNum)) * float64(min) - sleep := time.Duration(mult) - if float64(sleep) != mult || sleep > max { - sleep = max - } - return sleep -} - -// parseRetryAfterHeader parses the Retry-After header and returns the -// delay duration according to the spec: https://httpwg.org/specs/rfc7231.html#header.retry-after -// The bool returned will be true if the header was successfully parsed. -// Otherwise, the header was either not present, or was not parseable according to the spec. -// -// Retry-After headers come in two flavors: Seconds or HTTP-Date -// -// Examples: -// * Retry-After: Fri, 31 Dec 1999 23:59:59 GMT -// * Retry-After: 120 -func parseRetryAfterHeader(headers []string) (time.Duration, bool) { - if len(headers) == 0 || headers[0] == "" { - return 0, false - } - header := headers[0] - // Retry-After: 120 - if sleep, err := strconv.ParseInt(header, 10, 64); err == nil { - if sleep < 0 { // a negative sleep doesn't make sense - return 0, false - } - return time.Second * time.Duration(sleep), true - } - - // Retry-After: Fri, 31 Dec 1999 23:59:59 GMT - retryTime, err := time.Parse(time.RFC1123, header) - if err != nil { - return 0, false - } - if until := retryTime.Sub(timeNow()); until > 0 { - return until, true - } - // date is in the past - return 0, true -} - -// LinearJitterBackoff provides a callback for Client.Backoff which will -// perform linear backoff based on the attempt number and with jitter to -// prevent a thundering herd. -// -// min and max here are *not* absolute values. The number to be multiplied by -// the attempt number will be chosen at random from between them, thus they are -// bounding the jitter. -// -// For instance: -// * To get strictly linear backoff of one second increasing each retry, set -// both to one second (1s, 2s, 3s, 4s, ...) -// * To get a small amount of jitter centered around one second increasing each -// retry, set to around one second, such as a min of 800ms and max of 1200ms -// (892ms, 2102ms, 2945ms, 4312ms, ...) -// * To get extreme jitter, set to a very wide spread, such as a min of 100ms -// and a max of 20s (15382ms, 292ms, 51321ms, 35234ms, ...) -func LinearJitterBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { - // attemptNum always starts at zero but we want to start at 1 for multiplication - attemptNum++ - - if max <= min { - // Unclear what to do here, or they are the same, so return min * - // attemptNum - return min * time.Duration(attemptNum) - } - - // Seed rand; doing this every time is fine - source := rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) - - // Pick a random number that lies somewhere between the min and max and - // multiply by the attemptNum. attemptNum starts at zero so we always - // increment here. We first get a random percentage, then apply that to the - // difference between min and max, and add to min. - jitter := source.Float64() * float64(max-min) - jitterMin := int64(jitter) + int64(min) - return time.Duration(jitterMin * int64(attemptNum)) -} - -// PassthroughErrorHandler is an ErrorHandler that directly passes through the -// values from the net/http library for the final request. The body is not -// closed. -func PassthroughErrorHandler(resp *http.Response, err error, _ int) (*http.Response, error) { - return resp, err -} - -// Do wraps calling an HTTP method with retries. -func (c *Client) Do(req *Request) (*http.Response, error) { - c.clientInit.Do(func() { - if c.HTTPClient == nil { - c.HTTPClient = cleanhttp.DefaultPooledClient() - } - }) - - logger := c.logger() - - if logger != nil { - switch v := logger.(type) { - case LeveledLogger: - v.Debug("performing request", "method", req.Method, "url", redactURL(req.URL)) - case Logger: - v.Printf("[DEBUG] %s %s", req.Method, redactURL(req.URL)) - } - } - - var resp *http.Response - var attempt int - var shouldRetry bool - var doErr, respErr, checkErr, prepareErr error - - for i := 0; ; i++ { - doErr, respErr, prepareErr = nil, nil, nil - attempt++ - - // Always rewind the request body when non-nil. - if req.body != nil { - body, err := req.body() - if err != nil { - c.HTTPClient.CloseIdleConnections() - return resp, err - } - if c, ok := body.(io.ReadCloser); ok { - req.Body = c - } else { - req.Body = io.NopCloser(body) - } - } - - if c.RequestLogHook != nil { - switch v := logger.(type) { - case LeveledLogger: - c.RequestLogHook(hookLogger{v}, req.Request, i) - case Logger: - c.RequestLogHook(v, req.Request, i) - default: - c.RequestLogHook(nil, req.Request, i) - } - } - - // Attempt the request - resp, doErr = c.HTTPClient.Do(req.Request) - - // Check if we should continue with retries. - shouldRetry, checkErr = c.CheckRetry(req.Context(), resp, doErr) - if !shouldRetry && doErr == nil && req.responseHandler != nil { - respErr = req.responseHandler(resp) - shouldRetry, checkErr = c.CheckRetry(req.Context(), resp, respErr) - } - - err := doErr - if respErr != nil { - err = respErr - } - if err != nil { - switch v := logger.(type) { - case LeveledLogger: - v.Error("request failed", "error", err, "method", req.Method, "url", redactURL(req.URL)) - case Logger: - v.Printf("[ERR] %s %s request failed: %v", req.Method, redactURL(req.URL), err) - } - } else { - // Call this here to maintain the behavior of logging all requests, - // even if CheckRetry signals to stop. - if c.ResponseLogHook != nil { - // Call the response logger function if provided. - switch v := logger.(type) { - case LeveledLogger: - c.ResponseLogHook(hookLogger{v}, resp) - case Logger: - c.ResponseLogHook(v, resp) - default: - c.ResponseLogHook(nil, resp) - } - } - } - - if !shouldRetry { - break - } - - // We do this before drainBody because there's no need for the I/O if - // we're breaking out - remain := c.RetryMax - i - if remain <= 0 { - break - } - - // We're going to retry, consume any response to reuse the connection. - if doErr == nil { - c.drainBody(resp.Body) - } - - wait := c.Backoff(c.RetryWaitMin, c.RetryWaitMax, i, resp) - if logger != nil { - desc := fmt.Sprintf("%s %s", req.Method, redactURL(req.URL)) - if resp != nil { - desc = fmt.Sprintf("%s (status: %d)", desc, resp.StatusCode) - } - switch v := logger.(type) { - case LeveledLogger: - v.Debug("retrying request", "request", desc, "timeout", wait, "remaining", remain) - case Logger: - v.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain) - } - } - timer := time.NewTimer(wait) - select { - case <-req.Context().Done(): - timer.Stop() - c.HTTPClient.CloseIdleConnections() - return nil, req.Context().Err() - case <-timer.C: - } - - // Make shallow copy of http Request so that we can modify its body - // without racing against the closeBody call in persistConn.writeLoop. - httpreq := *req.Request - req.Request = &httpreq - - if c.PrepareRetry != nil { - if err := c.PrepareRetry(req.Request); err != nil { - prepareErr = err - break - } - } - } - - // this is the closest we have to success criteria - if doErr == nil && respErr == nil && checkErr == nil && prepareErr == nil && !shouldRetry { - return resp, nil - } - - defer c.HTTPClient.CloseIdleConnections() - - var err error - if prepareErr != nil { - err = prepareErr - } else if checkErr != nil { - err = checkErr - } else if respErr != nil { - err = respErr - } else { - err = doErr - } - - if c.ErrorHandler != nil { - return c.ErrorHandler(resp, err, attempt) - } - - // By default, we close the response body and return an error without - // returning the response - if resp != nil { - c.drainBody(resp.Body) - } - - // this means CheckRetry thought the request was a failure, but didn't - // communicate why - if err == nil { - return nil, fmt.Errorf("%s %s giving up after %d attempt(s)", - req.Method, redactURL(req.URL), attempt) - } - - return nil, fmt.Errorf("%s %s giving up after %d attempt(s): %w", - req.Method, redactURL(req.URL), attempt, err) -} - -// Try to read the response body so we can reuse this connection. -func (c *Client) drainBody(body io.ReadCloser) { - defer body.Close() - _, err := io.Copy(io.Discard, io.LimitReader(body, respReadLimit)) - if err != nil { - if c.logger() != nil { - switch v := c.logger().(type) { - case LeveledLogger: - v.Error("error reading response body", "error", err) - case Logger: - v.Printf("[ERR] error reading response body: %v", err) - } - } - } -} - -// Get is a shortcut for doing a GET request without making a new client. -func Get(url string) (*http.Response, error) { - return defaultClient.Get(url) -} - -// Get is a convenience helper for doing simple GET requests. -func (c *Client) Get(url string) (*http.Response, error) { - req, err := NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - return c.Do(req) -} - -// Head is a shortcut for doing a HEAD request without making a new client. -func Head(url string) (*http.Response, error) { - return defaultClient.Head(url) -} - -// Head is a convenience method for doing simple HEAD requests. -func (c *Client) Head(url string) (*http.Response, error) { - req, err := NewRequest("HEAD", url, nil) - if err != nil { - return nil, err - } - return c.Do(req) -} - -// Post is a shortcut for doing a POST request without making a new client. -func Post(url, bodyType string, body interface{}) (*http.Response, error) { - return defaultClient.Post(url, bodyType, body) -} - -// Post is a convenience method for doing simple POST requests. -func (c *Client) Post(url, bodyType string, body interface{}) (*http.Response, error) { - req, err := NewRequest("POST", url, body) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", bodyType) - return c.Do(req) -} - -// PostForm is a shortcut to perform a POST with form data without creating -// a new client. -func PostForm(url string, data url.Values) (*http.Response, error) { - return defaultClient.PostForm(url, data) -} - -// PostForm is a convenience method for doing simple POST operations using -// pre-filled url.Values form data. -func (c *Client) PostForm(url string, data url.Values) (*http.Response, error) { - return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) -} - -// StandardClient returns a stdlib *http.Client with a custom Transport, which -// shims in a *retryablehttp.Client for added retries. -func (c *Client) StandardClient() *http.Client { - return &http.Client{ - Transport: &RoundTripper{Client: c}, - } -} - -// Taken from url.URL#Redacted() which was introduced in go 1.15. -// We can switch to using it directly if we'll bump the minimum required go version. -func redactURL(u *url.URL) string { - if u == nil { - return "" - } - - ru := *u - if _, has := ru.User.Password(); has { - ru.User = url.UserPassword(ru.User.Username(), "xxxxx") - } - return ru.String() -} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go b/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go deleted file mode 100644 index 8c407ad..0000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package retryablehttp - -import ( - "errors" - "net/http" - "net/url" - "sync" -) - -// RoundTripper implements the http.RoundTripper interface, using a retrying -// HTTP client to execute requests. -// -// It is important to note that retryablehttp doesn't always act exactly as a -// RoundTripper should. This is highly dependent on the retryable client's -// configuration. -type RoundTripper struct { - // The client to use during requests. If nil, the default retryablehttp - // client and settings will be used. - Client *Client - - // once ensures that the logic to initialize the default client runs at - // most once, in a single thread. - once sync.Once -} - -// init initializes the underlying retryable client. -func (rt *RoundTripper) init() { - if rt.Client == nil { - rt.Client = NewClient() - } -} - -// RoundTrip satisfies the http.RoundTripper interface. -func (rt *RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - rt.once.Do(rt.init) - - // Convert the request to be retryable. - retryableReq, err := FromRequest(req) - if err != nil { - return nil, err - } - - // Execute the request. - resp, err := rt.Client.Do(retryableReq) - // If we got an error returned by standard library's `Do` method, unwrap it - // otherwise we will wind up erroneously re-nesting the error. - if _, ok := err.(*url.Error); ok { - return resp, errors.Unwrap(err) - } - - return resp, err -} diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore deleted file mode 100644 index daf913b..0000000 --- a/vendor/github.com/pkg/errors/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml deleted file mode 100644 index 9159de0..0000000 --- a/vendor/github.com/pkg/errors/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go_import_path: github.com/pkg/errors -go: - - 1.11.x - - 1.12.x - - 1.13.x - - tip - -script: - - make check diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE deleted file mode 100644 index 835ba3e..0000000 --- a/vendor/github.com/pkg/errors/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2015, Dave Cheney -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/errors/Makefile b/vendor/github.com/pkg/errors/Makefile deleted file mode 100644 index ce9d7cd..0000000 --- a/vendor/github.com/pkg/errors/Makefile +++ /dev/null @@ -1,44 +0,0 @@ -PKGS := github.com/pkg/errors -SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS)) -GO := go - -check: test vet gofmt misspell unconvert staticcheck ineffassign unparam - -test: - $(GO) test $(PKGS) - -vet: | test - $(GO) vet $(PKGS) - -staticcheck: - $(GO) get honnef.co/go/tools/cmd/staticcheck - staticcheck -checks all $(PKGS) - -misspell: - $(GO) get github.com/client9/misspell/cmd/misspell - misspell \ - -locale GB \ - -error \ - *.md *.go - -unconvert: - $(GO) get github.com/mdempsky/unconvert - unconvert -v $(PKGS) - -ineffassign: - $(GO) get github.com/gordonklaus/ineffassign - find $(SRCDIRS) -name '*.go' | xargs ineffassign - -pedantic: check errcheck - -unparam: - $(GO) get mvdan.cc/unparam - unparam ./... - -errcheck: - $(GO) get github.com/kisielk/errcheck - errcheck $(PKGS) - -gofmt: - @echo Checking code is gofmted - @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)" diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md deleted file mode 100644 index 54dfdcb..0000000 --- a/vendor/github.com/pkg/errors/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge) - -Package errors provides simple error handling primitives. - -`go get github.com/pkg/errors` - -The traditional error handling idiom in Go is roughly akin to -```go -if err != nil { - return err -} -``` -which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. - -## Adding context to an error - -The errors.Wrap function returns a new error that adds context to the original error. For example -```go -_, err := ioutil.ReadAll(r) -if err != nil { - return errors.Wrap(err, "read failed") -} -``` -## Retrieving the cause of an error - -Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. -```go -type causer interface { - Cause() error -} -``` -`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: -```go -switch err := errors.Cause(err).(type) { -case *MyError: - // handle specifically -default: - // unknown error -} -``` - -[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). - -## Roadmap - -With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows: - -- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible) -- 1.0. Final release. - -## Contributing - -Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports. - -Before sending a PR, please discuss your change by raising an issue. - -## License - -BSD-2-Clause diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml deleted file mode 100644 index a932ead..0000000 --- a/vendor/github.com/pkg/errors/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\pkg\errors -shallow_clone: true # for startup speed - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -# http://www.appveyor.com/docs/installed-software -install: - # some helpful output for debugging builds - - go version - - go env - # pre-installed MinGW at C:\MinGW is 32bit only - # but MSYS2 at C:\msys64 has mingw64 - - set PATH=C:\msys64\mingw64\bin;%PATH% - - gcc --version - - g++ --version - -build_script: - - go install -v ./... - -test_script: - - set PATH=C:\gopath\bin;%PATH% - - go test -v ./... - -#artifacts: -# - path: '%GOPATH%\bin\*.exe' -deploy: off diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go deleted file mode 100644 index 161aea2..0000000 --- a/vendor/github.com/pkg/errors/errors.go +++ /dev/null @@ -1,288 +0,0 @@ -// Package errors provides simple error handling primitives. -// -// The traditional error handling idiom in Go is roughly akin to -// -// if err != nil { -// return err -// } -// -// which when applied recursively up the call stack results in error reports -// without context or debugging information. The errors package allows -// programmers to add context to the failure path in their code in a way -// that does not destroy the original value of the error. -// -// Adding context to an error -// -// The errors.Wrap function returns a new error that adds context to the -// original error by recording a stack trace at the point Wrap is called, -// together with the supplied message. For example -// -// _, err := ioutil.ReadAll(r) -// if err != nil { -// return errors.Wrap(err, "read failed") -// } -// -// If additional control is required, the errors.WithStack and -// errors.WithMessage functions destructure errors.Wrap into its component -// operations: annotating an error with a stack trace and with a message, -// respectively. -// -// Retrieving the cause of an error -// -// Using errors.Wrap constructs a stack of errors, adding context to the -// preceding error. Depending on the nature of the error it may be necessary -// to reverse the operation of errors.Wrap to retrieve the original error -// for inspection. Any error value which implements this interface -// -// type causer interface { -// Cause() error -// } -// -// can be inspected by errors.Cause. errors.Cause will recursively retrieve -// the topmost error that does not implement causer, which is assumed to be -// the original cause. For example: -// -// switch err := errors.Cause(err).(type) { -// case *MyError: -// // handle specifically -// default: -// // unknown error -// } -// -// Although the causer interface is not exported by this package, it is -// considered a part of its stable public interface. -// -// Formatted printing of errors -// -// All error values returned from this package implement fmt.Formatter and can -// be formatted by the fmt package. The following verbs are supported: -// -// %s print the error. If the error has a Cause it will be -// printed recursively. -// %v see %s -// %+v extended format. Each Frame of the error's StackTrace will -// be printed in detail. -// -// Retrieving the stack trace of an error or wrapper -// -// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are -// invoked. This information can be retrieved with the following interface: -// -// type stackTracer interface { -// StackTrace() errors.StackTrace -// } -// -// The returned errors.StackTrace type is defined as -// -// type StackTrace []Frame -// -// The Frame type represents a call site in the stack trace. Frame supports -// the fmt.Formatter interface that can be used for printing information about -// the stack trace of this error. For example: -// -// if err, ok := err.(stackTracer); ok { -// for _, f := range err.StackTrace() { -// fmt.Printf("%+s:%d\n", f, f) -// } -// } -// -// Although the stackTracer interface is not exported by this package, it is -// considered a part of its stable public interface. -// -// See the documentation for Frame.Format for more details. -package errors - -import ( - "fmt" - "io" -) - -// New returns an error with the supplied message. -// New also records the stack trace at the point it was called. -func New(message string) error { - return &fundamental{ - msg: message, - stack: callers(), - } -} - -// Errorf formats according to a format specifier and returns the string -// as a value that satisfies error. -// Errorf also records the stack trace at the point it was called. -func Errorf(format string, args ...interface{}) error { - return &fundamental{ - msg: fmt.Sprintf(format, args...), - stack: callers(), - } -} - -// fundamental is an error that has a message and a stack, but no caller. -type fundamental struct { - msg string - *stack -} - -func (f *fundamental) Error() string { return f.msg } - -func (f *fundamental) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - io.WriteString(s, f.msg) - f.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, f.msg) - case 'q': - fmt.Fprintf(s, "%q", f.msg) - } -} - -// WithStack annotates err with a stack trace at the point WithStack was called. -// If err is nil, WithStack returns nil. -func WithStack(err error) error { - if err == nil { - return nil - } - return &withStack{ - err, - callers(), - } -} - -type withStack struct { - error - *stack -} - -func (w *withStack) Cause() error { return w.error } - -// Unwrap provides compatibility for Go 1.13 error chains. -func (w *withStack) Unwrap() error { return w.error } - -func (w *withStack) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v", w.Cause()) - w.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, w.Error()) - case 'q': - fmt.Fprintf(s, "%q", w.Error()) - } -} - -// Wrap returns an error annotating err with a stack trace -// at the point Wrap is called, and the supplied message. -// If err is nil, Wrap returns nil. -func Wrap(err error, message string) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: message, - } - return &withStack{ - err, - callers(), - } -} - -// Wrapf returns an error annotating err with a stack trace -// at the point Wrapf is called, and the format specifier. -// If err is nil, Wrapf returns nil. -func Wrapf(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } - return &withStack{ - err, - callers(), - } -} - -// WithMessage annotates err with a new message. -// If err is nil, WithMessage returns nil. -func WithMessage(err error, message string) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: message, - } -} - -// WithMessagef annotates err with the format specifier. -// If err is nil, WithMessagef returns nil. -func WithMessagef(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } -} - -type withMessage struct { - cause error - msg string -} - -func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } -func (w *withMessage) Cause() error { return w.cause } - -// Unwrap provides compatibility for Go 1.13 error chains. -func (w *withMessage) Unwrap() error { return w.cause } - -func (w *withMessage) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v\n", w.Cause()) - io.WriteString(s, w.msg) - return - } - fallthrough - case 's', 'q': - io.WriteString(s, w.Error()) - } -} - -// Cause returns the underlying cause of the error, if possible. -// An error value has a cause if it implements the following -// interface: -// -// type causer interface { -// Cause() error -// } -// -// If the error does not implement Cause, the original error will -// be returned. If the error is nil, nil will be returned without further -// investigation. -func Cause(err error) error { - type causer interface { - Cause() error - } - - for err != nil { - cause, ok := err.(causer) - if !ok { - break - } - err = cause.Cause() - } - return err -} diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go deleted file mode 100644 index be0d10d..0000000 --- a/vendor/github.com/pkg/errors/go113.go +++ /dev/null @@ -1,38 +0,0 @@ -// +build go1.13 - -package errors - -import ( - stderrors "errors" -) - -// Is reports whether any error in err's chain matches target. -// -// The chain consists of err itself followed by the sequence of errors obtained by -// repeatedly calling Unwrap. -// -// An error is considered to match a target if it is equal to that target or if -// it implements a method Is(error) bool such that Is(target) returns true. -func Is(err, target error) bool { return stderrors.Is(err, target) } - -// As finds the first error in err's chain that matches target, and if so, sets -// target to that error value and returns true. -// -// The chain consists of err itself followed by the sequence of errors obtained by -// repeatedly calling Unwrap. -// -// An error matches target if the error's concrete value is assignable to the value -// pointed to by target, or if the error has a method As(interface{}) bool such that -// As(target) returns true. In the latter case, the As method is responsible for -// setting target. -// -// As will panic if target is not a non-nil pointer to either a type that implements -// error, or to any interface type. As returns false if err is nil. -func As(err error, target interface{}) bool { return stderrors.As(err, target) } - -// Unwrap returns the result of calling the Unwrap method on err, if err's -// type contains an Unwrap method returning error. -// Otherwise, Unwrap returns nil. -func Unwrap(err error) error { - return stderrors.Unwrap(err) -} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go deleted file mode 100644 index 779a834..0000000 --- a/vendor/github.com/pkg/errors/stack.go +++ /dev/null @@ -1,177 +0,0 @@ -package errors - -import ( - "fmt" - "io" - "path" - "runtime" - "strconv" - "strings" -) - -// Frame represents a program counter inside a stack frame. -// For historical reasons if Frame is interpreted as a uintptr -// its value represents the program counter + 1. -type Frame uintptr - -// pc returns the program counter for this frame; -// multiple frames may have the same PC value. -func (f Frame) pc() uintptr { return uintptr(f) - 1 } - -// file returns the full path to the file that contains the -// function for this Frame's pc. -func (f Frame) file() string { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return "unknown" - } - file, _ := fn.FileLine(f.pc()) - return file -} - -// line returns the line number of source code of the -// function for this Frame's pc. -func (f Frame) line() int { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return 0 - } - _, line := fn.FileLine(f.pc()) - return line -} - -// name returns the name of this function, if known. -func (f Frame) name() string { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return "unknown" - } - return fn.Name() -} - -// Format formats the frame according to the fmt.Formatter interface. -// -// %s source file -// %d source line -// %n function name -// %v equivalent to %s:%d -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+s function name and path of source file relative to the compile time -// GOPATH separated by \n\t (\n\t) -// %+v equivalent to %+s:%d -func (f Frame) Format(s fmt.State, verb rune) { - switch verb { - case 's': - switch { - case s.Flag('+'): - io.WriteString(s, f.name()) - io.WriteString(s, "\n\t") - io.WriteString(s, f.file()) - default: - io.WriteString(s, path.Base(f.file())) - } - case 'd': - io.WriteString(s, strconv.Itoa(f.line())) - case 'n': - io.WriteString(s, funcname(f.name())) - case 'v': - f.Format(s, 's') - io.WriteString(s, ":") - f.Format(s, 'd') - } -} - -// MarshalText formats a stacktrace Frame as a text string. The output is the -// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs. -func (f Frame) MarshalText() ([]byte, error) { - name := f.name() - if name == "unknown" { - return []byte(name), nil - } - return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil -} - -// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). -type StackTrace []Frame - -// Format formats the stack of Frames according to the fmt.Formatter interface. -// -// %s lists source files for each Frame in the stack -// %v lists the source file and line number for each Frame in the stack -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+v Prints filename, function, and line number for each Frame in the stack. -func (st StackTrace) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case s.Flag('+'): - for _, f := range st { - io.WriteString(s, "\n") - f.Format(s, verb) - } - case s.Flag('#'): - fmt.Fprintf(s, "%#v", []Frame(st)) - default: - st.formatSlice(s, verb) - } - case 's': - st.formatSlice(s, verb) - } -} - -// formatSlice will format this StackTrace into the given buffer as a slice of -// Frame, only valid when called with '%s' or '%v'. -func (st StackTrace) formatSlice(s fmt.State, verb rune) { - io.WriteString(s, "[") - for i, f := range st { - if i > 0 { - io.WriteString(s, " ") - } - f.Format(s, verb) - } - io.WriteString(s, "]") -} - -// stack represents a stack of program counters. -type stack []uintptr - -func (s *stack) Format(st fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case st.Flag('+'): - for _, pc := range *s { - f := Frame(pc) - fmt.Fprintf(st, "\n%+v", f) - } - } - } -} - -func (s *stack) StackTrace() StackTrace { - f := make([]Frame, len(*s)) - for i := 0; i < len(f); i++ { - f[i] = Frame((*s)[i]) - } - return f -} - -func callers() *stack { - const depth = 32 - var pcs [depth]uintptr - n := runtime.Callers(3, pcs[:]) - var st stack = pcs[0:n] - return &st -} - -// funcname removes the path prefix component of a function's name reported by func.Name(). -func funcname(name string) string { - i := strings.LastIndex(name, "/") - name = name[i+1:] - i = strings.Index(name, ".") - return name[i+1:] -} diff --git a/vendor/golang.org/x/oauth2/.travis.yml b/vendor/golang.org/x/oauth2/.travis.yml deleted file mode 100644 index fa139db..0000000 --- a/vendor/golang.org/x/oauth2/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -go: - - tip - -install: - - export GOPATH="$HOME/gopath" - - mkdir -p "$GOPATH/src/golang.org/x" - - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2" - - go get -v -t -d golang.org/x/oauth2/... - -script: - - go test -v golang.org/x/oauth2/... diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/golang.org/x/oauth2/CONTRIBUTING.md deleted file mode 100644 index dfbed62..0000000 --- a/vendor/golang.org/x/oauth2/CONTRIBUTING.md +++ /dev/null @@ -1,26 +0,0 @@ -# Contributing to Go - -Go is an open source project. - -It is the work of hundreds of contributors. We appreciate your help! - -## Filing issues - -When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions: - -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? - -General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. -The gophers there will answer or ask you to file an issue if you've tripped over a bug. - -## Contributing code - -Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) -before sending patches. - -Unless otherwise noted, the Go source files are distributed under -the BSD-style license found in the LICENSE file. diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE deleted file mode 100644 index 6a66aea..0000000 --- a/vendor/golang.org/x/oauth2/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md deleted file mode 100644 index 781770c..0000000 --- a/vendor/golang.org/x/oauth2/README.md +++ /dev/null @@ -1,40 +0,0 @@ -# OAuth2 for Go - -[![Go Reference](https://pkg.go.dev/badge/golang.org/x/oauth2.svg)](https://pkg.go.dev/golang.org/x/oauth2) -[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) - -oauth2 package contains a client implementation for OAuth 2.0 spec. - -## Installation - -~~~~ -go get golang.org/x/oauth2 -~~~~ - -Or you can manually git clone the repository to -`$(go env GOPATH)/src/golang.org/x/oauth2`. - -See pkg.go.dev for further documentation and examples. - -* [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) -* [pkg.go.dev/golang.org/x/oauth2/google](https://pkg.go.dev/golang.org/x/oauth2/google) - -## Policy for new endpoints - -We no longer accept new provider-specific packages in this repo if all -they do is add a single endpoint variable. If you just want to add a -single endpoint, add it to the -[pkg.go.dev/golang.org/x/oauth2/endpoints](https://pkg.go.dev/golang.org/x/oauth2/endpoints) -package. - -## Report Issues / Send Patches - -The main issue tracker for the oauth2 repository is located at -https://github.com/golang/oauth2/issues. - -This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. In particular: - -* Excluding trivial changes, all contributions should be connected to an existing issue. -* API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted. -* The code owners are listed at [dev.golang.org/owners](https://dev.golang.org/owners#:~:text=x/oauth2). diff --git a/vendor/golang.org/x/oauth2/deviceauth.go b/vendor/golang.org/x/oauth2/deviceauth.go deleted file mode 100644 index e99c92f..0000000 --- a/vendor/golang.org/x/oauth2/deviceauth.go +++ /dev/null @@ -1,198 +0,0 @@ -package oauth2 - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strings" - "time" - - "golang.org/x/oauth2/internal" -) - -// https://datatracker.ietf.org/doc/html/rfc8628#section-3.5 -const ( - errAuthorizationPending = "authorization_pending" - errSlowDown = "slow_down" - errAccessDenied = "access_denied" - errExpiredToken = "expired_token" -) - -// DeviceAuthResponse describes a successful RFC 8628 Device Authorization Response -// https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 -type DeviceAuthResponse struct { - // DeviceCode - DeviceCode string `json:"device_code"` - // UserCode is the code the user should enter at the verification uri - UserCode string `json:"user_code"` - // VerificationURI is where user should enter the user code - VerificationURI string `json:"verification_uri"` - // VerificationURIComplete (if populated) includes the user code in the verification URI. This is typically shown to the user in non-textual form, such as a QR code. - VerificationURIComplete string `json:"verification_uri_complete,omitempty"` - // Expiry is when the device code and user code expire - Expiry time.Time `json:"expires_in,omitempty"` - // Interval is the duration in seconds that Poll should wait between requests - Interval int64 `json:"interval,omitempty"` -} - -func (d DeviceAuthResponse) MarshalJSON() ([]byte, error) { - type Alias DeviceAuthResponse - var expiresIn int64 - if !d.Expiry.IsZero() { - expiresIn = int64(time.Until(d.Expiry).Seconds()) - } - return json.Marshal(&struct { - ExpiresIn int64 `json:"expires_in,omitempty"` - *Alias - }{ - ExpiresIn: expiresIn, - Alias: (*Alias)(&d), - }) - -} - -func (c *DeviceAuthResponse) UnmarshalJSON(data []byte) error { - type Alias DeviceAuthResponse - aux := &struct { - ExpiresIn int64 `json:"expires_in"` - // workaround misspelling of verification_uri - VerificationURL string `json:"verification_url"` - *Alias - }{ - Alias: (*Alias)(c), - } - if err := json.Unmarshal(data, &aux); err != nil { - return err - } - if aux.ExpiresIn != 0 { - c.Expiry = time.Now().UTC().Add(time.Second * time.Duration(aux.ExpiresIn)) - } - if c.VerificationURI == "" { - c.VerificationURI = aux.VerificationURL - } - return nil -} - -// DeviceAuth returns a device auth struct which contains a device code -// and authorization information provided for users to enter on another device. -func (c *Config) DeviceAuth(ctx context.Context, opts ...AuthCodeOption) (*DeviceAuthResponse, error) { - // https://datatracker.ietf.org/doc/html/rfc8628#section-3.1 - v := url.Values{ - "client_id": {c.ClientID}, - } - if len(c.Scopes) > 0 { - v.Set("scope", strings.Join(c.Scopes, " ")) - } - for _, opt := range opts { - opt.setValue(v) - } - return retrieveDeviceAuth(ctx, c, v) -} - -func retrieveDeviceAuth(ctx context.Context, c *Config, v url.Values) (*DeviceAuthResponse, error) { - if c.Endpoint.DeviceAuthURL == "" { - return nil, errors.New("endpoint missing DeviceAuthURL") - } - - req, err := http.NewRequest("POST", c.Endpoint.DeviceAuthURL, strings.NewReader(v.Encode())) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - req.Header.Set("Accept", "application/json") - - t := time.Now() - r, err := internal.ContextClient(ctx).Do(req) - if err != nil { - return nil, err - } - - body, err := io.ReadAll(io.LimitReader(r.Body, 1<<20)) - if err != nil { - return nil, fmt.Errorf("oauth2: cannot auth device: %v", err) - } - if code := r.StatusCode; code < 200 || code > 299 { - return nil, &RetrieveError{ - Response: r, - Body: body, - } - } - - da := &DeviceAuthResponse{} - err = json.Unmarshal(body, &da) - if err != nil { - return nil, fmt.Errorf("unmarshal %s", err) - } - - if !da.Expiry.IsZero() { - // Make a small adjustment to account for time taken by the request - da.Expiry = da.Expiry.Add(-time.Since(t)) - } - - return da, nil -} - -// DeviceAccessToken polls the server to exchange a device code for a token. -func (c *Config) DeviceAccessToken(ctx context.Context, da *DeviceAuthResponse, opts ...AuthCodeOption) (*Token, error) { - if !da.Expiry.IsZero() { - var cancel context.CancelFunc - ctx, cancel = context.WithDeadline(ctx, da.Expiry) - defer cancel() - } - - // https://datatracker.ietf.org/doc/html/rfc8628#section-3.4 - v := url.Values{ - "client_id": {c.ClientID}, - "grant_type": {"urn:ietf:params:oauth:grant-type:device_code"}, - "device_code": {da.DeviceCode}, - } - if len(c.Scopes) > 0 { - v.Set("scope", strings.Join(c.Scopes, " ")) - } - for _, opt := range opts { - opt.setValue(v) - } - - // "If no value is provided, clients MUST use 5 as the default." - // https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 - interval := da.Interval - if interval == 0 { - interval = 5 - } - - ticker := time.NewTicker(time.Duration(interval) * time.Second) - defer ticker.Stop() - for { - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-ticker.C: - tok, err := retrieveToken(ctx, c, v) - if err == nil { - return tok, nil - } - - e, ok := err.(*RetrieveError) - if !ok { - return nil, err - } - switch e.ErrorCode { - case errSlowDown: - // https://datatracker.ietf.org/doc/html/rfc8628#section-3.5 - // "the interval MUST be increased by 5 seconds for this and all subsequent requests" - interval += 5 - ticker.Reset(time.Duration(interval) * time.Second) - case errAuthorizationPending: - // Do nothing. - case errAccessDenied, errExpiredToken: - fallthrough - default: - return tok, err - } - } - } -} diff --git a/vendor/golang.org/x/oauth2/internal/doc.go b/vendor/golang.org/x/oauth2/internal/doc.go deleted file mode 100644 index 03265e8..0000000 --- a/vendor/golang.org/x/oauth2/internal/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package internal contains support packages for oauth2 package. -package internal diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go deleted file mode 100644 index 14989be..0000000 --- a/vendor/golang.org/x/oauth2/internal/oauth2.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -import ( - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "errors" - "fmt" -) - -// ParseKey converts the binary contents of a private key file -// to an *rsa.PrivateKey. It detects whether the private key is in a -// PEM container or not. If so, it extracts the private key -// from PEM container before conversion. It only supports PEM -// containers with no passphrase. -func ParseKey(key []byte) (*rsa.PrivateKey, error) { - block, _ := pem.Decode(key) - if block != nil { - key = block.Bytes - } - parsedKey, err := x509.ParsePKCS8PrivateKey(key) - if err != nil { - parsedKey, err = x509.ParsePKCS1PrivateKey(key) - if err != nil { - return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8; parse error: %v", err) - } - } - parsed, ok := parsedKey.(*rsa.PrivateKey) - if !ok { - return nil, errors.New("private key is invalid") - } - return parsed, nil -} diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go deleted file mode 100644 index e83ddee..0000000 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ /dev/null @@ -1,352 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "math" - "mime" - "net/http" - "net/url" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" -) - -// Token represents the credentials used to authorize -// the requests to access protected resources on the OAuth 2.0 -// provider's backend. -// -// This type is a mirror of oauth2.Token and exists to break -// an otherwise-circular dependency. Other internal packages -// should convert this Token into an oauth2.Token before use. -type Token struct { - // AccessToken is the token that authorizes and authenticates - // the requests. - AccessToken string - - // TokenType is the type of token. - // The Type method returns either this or "Bearer", the default. - TokenType string - - // RefreshToken is a token that's used by the application - // (as opposed to the user) to refresh the access token - // if it expires. - RefreshToken string - - // Expiry is the optional expiration time of the access token. - // - // If zero, TokenSource implementations will reuse the same - // token forever and RefreshToken or equivalent - // mechanisms for that TokenSource will not be used. - Expiry time.Time - - // Raw optionally contains extra metadata from the server - // when updating a token. - Raw interface{} -} - -// tokenJSON is the struct representing the HTTP response from OAuth2 -// providers returning a token or error in JSON form. -// https://datatracker.ietf.org/doc/html/rfc6749#section-5.1 -type tokenJSON struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - RefreshToken string `json:"refresh_token"` - ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number - // error fields - // https://datatracker.ietf.org/doc/html/rfc6749#section-5.2 - ErrorCode string `json:"error"` - ErrorDescription string `json:"error_description"` - ErrorURI string `json:"error_uri"` -} - -func (e *tokenJSON) expiry() (t time.Time) { - if v := e.ExpiresIn; v != 0 { - return time.Now().Add(time.Duration(v) * time.Second) - } - return -} - -type expirationTime int32 - -func (e *expirationTime) UnmarshalJSON(b []byte) error { - if len(b) == 0 || string(b) == "null" { - return nil - } - var n json.Number - err := json.Unmarshal(b, &n) - if err != nil { - return err - } - i, err := n.Int64() - if err != nil { - return err - } - if i > math.MaxInt32 { - i = math.MaxInt32 - } - *e = expirationTime(i) - return nil -} - -// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. -// -// Deprecated: this function no longer does anything. Caller code that -// wants to avoid potential extra HTTP requests made during -// auto-probing of the provider's auth style should set -// Endpoint.AuthStyle. -func RegisterBrokenAuthHeaderProvider(tokenURL string) {} - -// AuthStyle is a copy of the golang.org/x/oauth2 package's AuthStyle type. -type AuthStyle int - -const ( - AuthStyleUnknown AuthStyle = 0 - AuthStyleInParams AuthStyle = 1 - AuthStyleInHeader AuthStyle = 2 -) - -// LazyAuthStyleCache is a backwards compatibility compromise to let Configs -// have a lazily-initialized AuthStyleCache. -// -// The two users of this, oauth2.Config and oauth2/clientcredentials.Config, -// both would ideally just embed an unexported AuthStyleCache but because both -// were historically allowed to be copied by value we can't retroactively add an -// uncopyable Mutex to them. -// -// We could use an atomic.Pointer, but that was added recently enough (in Go -// 1.18) that we'd break Go 1.17 users where the tests as of 2023-08-03 -// still pass. By using an atomic.Value, it supports both Go 1.17 and -// copying by value, even if that's not ideal. -type LazyAuthStyleCache struct { - v atomic.Value // of *AuthStyleCache -} - -func (lc *LazyAuthStyleCache) Get() *AuthStyleCache { - if c, ok := lc.v.Load().(*AuthStyleCache); ok { - return c - } - c := new(AuthStyleCache) - if !lc.v.CompareAndSwap(nil, c) { - c = lc.v.Load().(*AuthStyleCache) - } - return c -} - -// AuthStyleCache is the set of tokenURLs we've successfully used via -// RetrieveToken and which style auth we ended up using. -// It's called a cache, but it doesn't (yet?) shrink. It's expected that -// the set of OAuth2 servers a program contacts over time is fixed and -// small. -type AuthStyleCache struct { - mu sync.Mutex - m map[string]AuthStyle // keyed by tokenURL -} - -// lookupAuthStyle reports which auth style we last used with tokenURL -// when calling RetrieveToken and whether we have ever done so. -func (c *AuthStyleCache) lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { - c.mu.Lock() - defer c.mu.Unlock() - style, ok = c.m[tokenURL] - return -} - -// setAuthStyle adds an entry to authStyleCache, documented above. -func (c *AuthStyleCache) setAuthStyle(tokenURL string, v AuthStyle) { - c.mu.Lock() - defer c.mu.Unlock() - if c.m == nil { - c.m = make(map[string]AuthStyle) - } - c.m[tokenURL] = v -} - -// newTokenRequest returns a new *http.Request to retrieve a new token -// from tokenURL using the provided clientID, clientSecret, and POST -// body parameters. -// -// inParams is whether the clientID & clientSecret should be encoded -// as the POST body. An 'inParams' value of true means to send it in -// the POST body (along with any values in v); false means to send it -// in the Authorization header. -func newTokenRequest(tokenURL, clientID, clientSecret string, v url.Values, authStyle AuthStyle) (*http.Request, error) { - if authStyle == AuthStyleInParams { - v = cloneURLValues(v) - if clientID != "" { - v.Set("client_id", clientID) - } - if clientSecret != "" { - v.Set("client_secret", clientSecret) - } - } - req, err := http.NewRequest("POST", tokenURL, strings.NewReader(v.Encode())) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - if authStyle == AuthStyleInHeader { - req.SetBasicAuth(url.QueryEscape(clientID), url.QueryEscape(clientSecret)) - } - return req, nil -} - -func cloneURLValues(v url.Values) url.Values { - v2 := make(url.Values, len(v)) - for k, vv := range v { - v2[k] = append([]string(nil), vv...) - } - return v2 -} - -func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle, styleCache *AuthStyleCache) (*Token, error) { - needsAuthStyleProbe := authStyle == 0 - if needsAuthStyleProbe { - if style, ok := styleCache.lookupAuthStyle(tokenURL); ok { - authStyle = style - needsAuthStyleProbe = false - } else { - authStyle = AuthStyleInHeader // the first way we'll try - } - } - req, err := newTokenRequest(tokenURL, clientID, clientSecret, v, authStyle) - if err != nil { - return nil, err - } - token, err := doTokenRoundTrip(ctx, req) - if err != nil && needsAuthStyleProbe { - // If we get an error, assume the server wants the - // clientID & clientSecret in a different form. - // See https://code.google.com/p/goauth2/issues/detail?id=31 for background. - // In summary: - // - Reddit only accepts client secret in the Authorization header - // - Dropbox accepts either it in URL param or Auth header, but not both. - // - Google only accepts URL param (not spec compliant?), not Auth header - // - Stripe only accepts client secret in Auth header with Bearer method, not Basic - // - // We used to maintain a big table in this code of all the sites and which way - // they went, but maintaining it didn't scale & got annoying. - // So just try both ways. - authStyle = AuthStyleInParams // the second way we'll try - req, _ = newTokenRequest(tokenURL, clientID, clientSecret, v, authStyle) - token, err = doTokenRoundTrip(ctx, req) - } - if needsAuthStyleProbe && err == nil { - styleCache.setAuthStyle(tokenURL, authStyle) - } - // Don't overwrite `RefreshToken` with an empty value - // if this was a token refreshing request. - if token != nil && token.RefreshToken == "" { - token.RefreshToken = v.Get("refresh_token") - } - return token, err -} - -func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { - r, err := ContextClient(ctx).Do(req.WithContext(ctx)) - if err != nil { - return nil, err - } - body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) - r.Body.Close() - if err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - - failureStatus := r.StatusCode < 200 || r.StatusCode > 299 - retrieveError := &RetrieveError{ - Response: r, - Body: body, - // attempt to populate error detail below - } - - var token *Token - content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) - switch content { - case "application/x-www-form-urlencoded", "text/plain": - // some endpoints return a query string - vals, err := url.ParseQuery(string(body)) - if err != nil { - if failureStatus { - return nil, retrieveError - } - return nil, fmt.Errorf("oauth2: cannot parse response: %v", err) - } - retrieveError.ErrorCode = vals.Get("error") - retrieveError.ErrorDescription = vals.Get("error_description") - retrieveError.ErrorURI = vals.Get("error_uri") - token = &Token{ - AccessToken: vals.Get("access_token"), - TokenType: vals.Get("token_type"), - RefreshToken: vals.Get("refresh_token"), - Raw: vals, - } - e := vals.Get("expires_in") - expires, _ := strconv.Atoi(e) - if expires != 0 { - token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) - } - default: - var tj tokenJSON - if err = json.Unmarshal(body, &tj); err != nil { - if failureStatus { - return nil, retrieveError - } - return nil, fmt.Errorf("oauth2: cannot parse json: %v", err) - } - retrieveError.ErrorCode = tj.ErrorCode - retrieveError.ErrorDescription = tj.ErrorDescription - retrieveError.ErrorURI = tj.ErrorURI - token = &Token{ - AccessToken: tj.AccessToken, - TokenType: tj.TokenType, - RefreshToken: tj.RefreshToken, - Expiry: tj.expiry(), - Raw: make(map[string]interface{}), - } - json.Unmarshal(body, &token.Raw) // no error checks for optional fields - } - // according to spec, servers should respond status 400 in error case - // https://www.rfc-editor.org/rfc/rfc6749#section-5.2 - // but some unorthodox servers respond 200 in error case - if failureStatus || retrieveError.ErrorCode != "" { - return nil, retrieveError - } - if token.AccessToken == "" { - return nil, errors.New("oauth2: server response missing access_token") - } - return token, nil -} - -// mirrors oauth2.RetrieveError -type RetrieveError struct { - Response *http.Response - Body []byte - ErrorCode string - ErrorDescription string - ErrorURI string -} - -func (r *RetrieveError) Error() string { - if r.ErrorCode != "" { - s := fmt.Sprintf("oauth2: %q", r.ErrorCode) - if r.ErrorDescription != "" { - s += fmt.Sprintf(" %q", r.ErrorDescription) - } - if r.ErrorURI != "" { - s += fmt.Sprintf(" %q", r.ErrorURI) - } - return s - } - return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) -} diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go deleted file mode 100644 index b9db01d..0000000 --- a/vendor/golang.org/x/oauth2/internal/transport.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -import ( - "context" - "net/http" -) - -// HTTPClient is the context key to use with golang.org/x/net/context's -// WithValue function to associate an *http.Client value with a context. -var HTTPClient ContextKey - -// ContextKey is just an empty struct. It exists so HTTPClient can be -// an immutable public variable with a unique type. It's immutable -// because nobody else can create a ContextKey, being unexported. -type ContextKey struct{} - -func ContextClient(ctx context.Context) *http.Client { - if ctx != nil { - if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { - return hc - } - } - return http.DefaultClient -} diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go deleted file mode 100644 index 09f6a49..0000000 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ /dev/null @@ -1,421 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package oauth2 provides support for making -// OAuth2 authorized and authenticated HTTP requests, -// as specified in RFC 6749. -// It can additionally grant authorization with Bearer JWT. -package oauth2 // import "golang.org/x/oauth2" - -import ( - "bytes" - "context" - "errors" - "net/http" - "net/url" - "strings" - "sync" - "time" - - "golang.org/x/oauth2/internal" -) - -// NoContext is the default context you should supply if not using -// your own context.Context (see https://golang.org/x/net/context). -// -// Deprecated: Use context.Background() or context.TODO() instead. -var NoContext = context.TODO() - -// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. -// -// Deprecated: this function no longer does anything. Caller code that -// wants to avoid potential extra HTTP requests made during -// auto-probing of the provider's auth style should set -// Endpoint.AuthStyle. -func RegisterBrokenAuthHeaderProvider(tokenURL string) {} - -// Config describes a typical 3-legged OAuth2 flow, with both the -// client application information and the server's endpoint URLs. -// For the client credentials 2-legged OAuth2 flow, see the clientcredentials -// package (https://golang.org/x/oauth2/clientcredentials). -type Config struct { - // ClientID is the application's ID. - ClientID string - - // ClientSecret is the application's secret. - ClientSecret string - - // Endpoint contains the resource server's token endpoint - // URLs. These are constants specific to each server and are - // often available via site-specific packages, such as - // google.Endpoint or github.Endpoint. - Endpoint Endpoint - - // RedirectURL is the URL to redirect users going through - // the OAuth flow, after the resource owner's URLs. - RedirectURL string - - // Scope specifies optional requested permissions. - Scopes []string - - // authStyleCache caches which auth style to use when Endpoint.AuthStyle is - // the zero value (AuthStyleAutoDetect). - authStyleCache internal.LazyAuthStyleCache -} - -// A TokenSource is anything that can return a token. -type TokenSource interface { - // Token returns a token or an error. - // Token must be safe for concurrent use by multiple goroutines. - // The returned Token must not be modified. - Token() (*Token, error) -} - -// Endpoint represents an OAuth 2.0 provider's authorization and token -// endpoint URLs. -type Endpoint struct { - AuthURL string - DeviceAuthURL string - TokenURL string - - // AuthStyle optionally specifies how the endpoint wants the - // client ID & client secret sent. The zero value means to - // auto-detect. - AuthStyle AuthStyle -} - -// AuthStyle represents how requests for tokens are authenticated -// to the server. -type AuthStyle int - -const ( - // AuthStyleAutoDetect means to auto-detect which authentication - // style the provider wants by trying both ways and caching - // the successful way for the future. - AuthStyleAutoDetect AuthStyle = 0 - - // AuthStyleInParams sends the "client_id" and "client_secret" - // in the POST body as application/x-www-form-urlencoded parameters. - AuthStyleInParams AuthStyle = 1 - - // AuthStyleInHeader sends the client_id and client_password - // using HTTP Basic Authorization. This is an optional style - // described in the OAuth2 RFC 6749 section 2.3.1. - AuthStyleInHeader AuthStyle = 2 -) - -var ( - // AccessTypeOnline and AccessTypeOffline are options passed - // to the Options.AuthCodeURL method. They modify the - // "access_type" field that gets sent in the URL returned by - // AuthCodeURL. - // - // Online is the default if neither is specified. If your - // application needs to refresh access tokens when the user - // is not present at the browser, then use offline. This will - // result in your application obtaining a refresh token the - // first time your application exchanges an authorization - // code for a user. - AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online") - AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline") - - // ApprovalForce forces the users to view the consent dialog - // and confirm the permissions request at the URL returned - // from AuthCodeURL, even if they've already done so. - ApprovalForce AuthCodeOption = SetAuthURLParam("prompt", "consent") -) - -// An AuthCodeOption is passed to Config.AuthCodeURL. -type AuthCodeOption interface { - setValue(url.Values) -} - -type setParam struct{ k, v string } - -func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } - -// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters -// to a provider's authorization endpoint. -func SetAuthURLParam(key, value string) AuthCodeOption { - return setParam{key, value} -} - -// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page -// that asks for permissions for the required scopes explicitly. -// -// State is an opaque value used by the client to maintain state between the -// request and callback. The authorization server includes this value when -// redirecting the user agent back to the client. -// -// Opts may include AccessTypeOnline or AccessTypeOffline, as well -// as ApprovalForce. -// -// To protect against CSRF attacks, opts should include a PKCE challenge -// (S256ChallengeOption). Not all servers support PKCE. An alternative is to -// generate a random state parameter and verify it after exchange. -// See https://datatracker.ietf.org/doc/html/rfc6749#section-10.12 (predating -// PKCE), https://www.oauth.com/oauth2-servers/pkce/ and -// https://www.ietf.org/archive/id/draft-ietf-oauth-v2-1-09.html#name-cross-site-request-forgery (describing both approaches) -func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { - var buf bytes.Buffer - buf.WriteString(c.Endpoint.AuthURL) - v := url.Values{ - "response_type": {"code"}, - "client_id": {c.ClientID}, - } - if c.RedirectURL != "" { - v.Set("redirect_uri", c.RedirectURL) - } - if len(c.Scopes) > 0 { - v.Set("scope", strings.Join(c.Scopes, " ")) - } - if state != "" { - v.Set("state", state) - } - for _, opt := range opts { - opt.setValue(v) - } - if strings.Contains(c.Endpoint.AuthURL, "?") { - buf.WriteByte('&') - } else { - buf.WriteByte('?') - } - buf.WriteString(v.Encode()) - return buf.String() -} - -// PasswordCredentialsToken converts a resource owner username and password -// pair into a token. -// -// Per the RFC, this grant type should only be used "when there is a high -// degree of trust between the resource owner and the client (e.g., the client -// is part of the device operating system or a highly privileged application), -// and when other authorization grant types are not available." -// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. -// -// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. -func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { - v := url.Values{ - "grant_type": {"password"}, - "username": {username}, - "password": {password}, - } - if len(c.Scopes) > 0 { - v.Set("scope", strings.Join(c.Scopes, " ")) - } - return retrieveToken(ctx, c, v) -} - -// Exchange converts an authorization code into a token. -// -// It is used after a resource provider redirects the user back -// to the Redirect URI (the URL obtained from AuthCodeURL). -// -// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. -// -// The code will be in the *http.Request.FormValue("code"). Before -// calling Exchange, be sure to validate FormValue("state") if you are -// using it to protect against CSRF attacks. -// -// If using PKCE to protect against CSRF attacks, opts should include a -// VerifierOption. -func (c *Config) Exchange(ctx context.Context, code string, opts ...AuthCodeOption) (*Token, error) { - v := url.Values{ - "grant_type": {"authorization_code"}, - "code": {code}, - } - if c.RedirectURL != "" { - v.Set("redirect_uri", c.RedirectURL) - } - for _, opt := range opts { - opt.setValue(v) - } - return retrieveToken(ctx, c, v) -} - -// Client returns an HTTP client using the provided token. -// The token will auto-refresh as necessary. The underlying -// HTTP transport will be obtained using the provided context. -// The returned client and its Transport should not be modified. -func (c *Config) Client(ctx context.Context, t *Token) *http.Client { - return NewClient(ctx, c.TokenSource(ctx, t)) -} - -// TokenSource returns a TokenSource that returns t until t expires, -// automatically refreshing it as necessary using the provided context. -// -// Most users will use Config.Client instead. -func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { - tkr := &tokenRefresher{ - ctx: ctx, - conf: c, - } - if t != nil { - tkr.refreshToken = t.RefreshToken - } - return &reuseTokenSource{ - t: t, - new: tkr, - } -} - -// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" -// HTTP requests to renew a token using a RefreshToken. -type tokenRefresher struct { - ctx context.Context // used to get HTTP requests - conf *Config - refreshToken string -} - -// WARNING: Token is not safe for concurrent access, as it -// updates the tokenRefresher's refreshToken field. -// Within this package, it is used by reuseTokenSource which -// synchronizes calls to this method with its own mutex. -func (tf *tokenRefresher) Token() (*Token, error) { - if tf.refreshToken == "" { - return nil, errors.New("oauth2: token expired and refresh token is not set") - } - - tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{ - "grant_type": {"refresh_token"}, - "refresh_token": {tf.refreshToken}, - }) - - if err != nil { - return nil, err - } - if tf.refreshToken != tk.RefreshToken { - tf.refreshToken = tk.RefreshToken - } - return tk, err -} - -// reuseTokenSource is a TokenSource that holds a single token in memory -// and validates its expiry before each call to retrieve it with -// Token. If it's expired, it will be auto-refreshed using the -// new TokenSource. -type reuseTokenSource struct { - new TokenSource // called when t is expired. - - mu sync.Mutex // guards t - t *Token - - expiryDelta time.Duration -} - -// Token returns the current token if it's still valid, else will -// refresh the current token (using r.Context for HTTP client -// information) and return the new one. -func (s *reuseTokenSource) Token() (*Token, error) { - s.mu.Lock() - defer s.mu.Unlock() - if s.t.Valid() { - return s.t, nil - } - t, err := s.new.Token() - if err != nil { - return nil, err - } - t.expiryDelta = s.expiryDelta - s.t = t - return t, nil -} - -// StaticTokenSource returns a TokenSource that always returns the same token. -// Because the provided token t is never refreshed, StaticTokenSource is only -// useful for tokens that never expire. -func StaticTokenSource(t *Token) TokenSource { - return staticTokenSource{t} -} - -// staticTokenSource is a TokenSource that always returns the same Token. -type staticTokenSource struct { - t *Token -} - -func (s staticTokenSource) Token() (*Token, error) { - return s.t, nil -} - -// HTTPClient is the context key to use with golang.org/x/net/context's -// WithValue function to associate an *http.Client value with a context. -var HTTPClient internal.ContextKey - -// NewClient creates an *http.Client from a Context and TokenSource. -// The returned client is not valid beyond the lifetime of the context. -// -// Note that if a custom *http.Client is provided via the Context it -// is used only for token acquisition and is not used to configure the -// *http.Client returned from NewClient. -// -// As a special case, if src is nil, a non-OAuth2 client is returned -// using the provided context. This exists to support related OAuth2 -// packages. -func NewClient(ctx context.Context, src TokenSource) *http.Client { - if src == nil { - return internal.ContextClient(ctx) - } - return &http.Client{ - Transport: &Transport{ - Base: internal.ContextClient(ctx).Transport, - Source: ReuseTokenSource(nil, src), - }, - } -} - -// ReuseTokenSource returns a TokenSource which repeatedly returns the -// same token as long as it's valid, starting with t. -// When its cached token is invalid, a new token is obtained from src. -// -// ReuseTokenSource is typically used to reuse tokens from a cache -// (such as a file on disk) between runs of a program, rather than -// obtaining new tokens unnecessarily. -// -// The initial token t may be nil, in which case the TokenSource is -// wrapped in a caching version if it isn't one already. This also -// means it's always safe to wrap ReuseTokenSource around any other -// TokenSource without adverse effects. -func ReuseTokenSource(t *Token, src TokenSource) TokenSource { - // Don't wrap a reuseTokenSource in itself. That would work, - // but cause an unnecessary number of mutex operations. - // Just build the equivalent one. - if rt, ok := src.(*reuseTokenSource); ok { - if t == nil { - // Just use it directly. - return rt - } - src = rt.new - } - return &reuseTokenSource{ - t: t, - new: src, - } -} - -// ReuseTokenSourceWithExpiry returns a TokenSource that acts in the same manner as the -// TokenSource returned by ReuseTokenSource, except the expiry buffer is -// configurable. The expiration time of a token is calculated as -// t.Expiry.Add(-earlyExpiry). -func ReuseTokenSourceWithExpiry(t *Token, src TokenSource, earlyExpiry time.Duration) TokenSource { - // Don't wrap a reuseTokenSource in itself. That would work, - // but cause an unnecessary number of mutex operations. - // Just build the equivalent one. - if rt, ok := src.(*reuseTokenSource); ok { - if t == nil { - // Just use it directly, but set the expiryDelta to earlyExpiry, - // so the behavior matches what the user expects. - rt.expiryDelta = earlyExpiry - return rt - } - src = rt.new - } - if t != nil { - t.expiryDelta = earlyExpiry - } - return &reuseTokenSource{ - t: t, - new: src, - expiryDelta: earlyExpiry, - } -} diff --git a/vendor/golang.org/x/oauth2/pkce.go b/vendor/golang.org/x/oauth2/pkce.go deleted file mode 100644 index 50593b6..0000000 --- a/vendor/golang.org/x/oauth2/pkce.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -package oauth2 - -import ( - "crypto/rand" - "crypto/sha256" - "encoding/base64" - "net/url" -) - -const ( - codeChallengeKey = "code_challenge" - codeChallengeMethodKey = "code_challenge_method" - codeVerifierKey = "code_verifier" -) - -// GenerateVerifier generates a PKCE code verifier with 32 octets of randomness. -// This follows recommendations in RFC 7636. -// -// A fresh verifier should be generated for each authorization. -// S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL -// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange -// (or Config.DeviceAccessToken). -func GenerateVerifier() string { - // "RECOMMENDED that the output of a suitable random number generator be - // used to create a 32-octet sequence. The octet sequence is then - // base64url-encoded to produce a 43-octet URL-safe string to use as the - // code verifier." - // https://datatracker.ietf.org/doc/html/rfc7636#section-4.1 - data := make([]byte, 32) - if _, err := rand.Read(data); err != nil { - panic(err) - } - return base64.RawURLEncoding.EncodeToString(data) -} - -// VerifierOption returns a PKCE code verifier AuthCodeOption. It should be -// passed to Config.Exchange or Config.DeviceAccessToken only. -func VerifierOption(verifier string) AuthCodeOption { - return setParam{k: codeVerifierKey, v: verifier} -} - -// S256ChallengeFromVerifier returns a PKCE code challenge derived from verifier with method S256. -// -// Prefer to use S256ChallengeOption where possible. -func S256ChallengeFromVerifier(verifier string) string { - sha := sha256.Sum256([]byte(verifier)) - return base64.RawURLEncoding.EncodeToString(sha[:]) -} - -// S256ChallengeOption derives a PKCE code challenge derived from verifier with -// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess -// only. -func S256ChallengeOption(verifier string) AuthCodeOption { - return challengeOption{ - challenge_method: "S256", - challenge: S256ChallengeFromVerifier(verifier), - } -} - -type challengeOption struct{ challenge_method, challenge string } - -func (p challengeOption) setValue(m url.Values) { - m.Set(codeChallengeMethodKey, p.challenge_method) - m.Set(codeChallengeKey, p.challenge) -} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go deleted file mode 100644 index 5bbb332..0000000 --- a/vendor/golang.org/x/oauth2/token.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package oauth2 - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "golang.org/x/oauth2/internal" -) - -// defaultExpiryDelta determines how earlier a token should be considered -// expired than its actual expiration time. It is used to avoid late -// expirations due to client-server time mismatches. -const defaultExpiryDelta = 10 * time.Second - -// Token represents the credentials used to authorize -// the requests to access protected resources on the OAuth 2.0 -// provider's backend. -// -// Most users of this package should not access fields of Token -// directly. They're exported mostly for use by related packages -// implementing derivative OAuth2 flows. -type Token struct { - // AccessToken is the token that authorizes and authenticates - // the requests. - AccessToken string `json:"access_token"` - - // TokenType is the type of token. - // The Type method returns either this or "Bearer", the default. - TokenType string `json:"token_type,omitempty"` - - // RefreshToken is a token that's used by the application - // (as opposed to the user) to refresh the access token - // if it expires. - RefreshToken string `json:"refresh_token,omitempty"` - - // Expiry is the optional expiration time of the access token. - // - // If zero, TokenSource implementations will reuse the same - // token forever and RefreshToken or equivalent - // mechanisms for that TokenSource will not be used. - Expiry time.Time `json:"expiry,omitempty"` - - // raw optionally contains extra metadata from the server - // when updating a token. - raw interface{} - - // expiryDelta is used to calculate when a token is considered - // expired, by subtracting from Expiry. If zero, defaultExpiryDelta - // is used. - expiryDelta time.Duration -} - -// Type returns t.TokenType if non-empty, else "Bearer". -func (t *Token) Type() string { - if strings.EqualFold(t.TokenType, "bearer") { - return "Bearer" - } - if strings.EqualFold(t.TokenType, "mac") { - return "MAC" - } - if strings.EqualFold(t.TokenType, "basic") { - return "Basic" - } - if t.TokenType != "" { - return t.TokenType - } - return "Bearer" -} - -// SetAuthHeader sets the Authorization header to r using the access -// token in t. -// -// This method is unnecessary when using Transport or an HTTP Client -// returned by this package. -func (t *Token) SetAuthHeader(r *http.Request) { - r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) -} - -// WithExtra returns a new Token that's a clone of t, but using the -// provided raw extra map. This is only intended for use by packages -// implementing derivative OAuth2 flows. -func (t *Token) WithExtra(extra interface{}) *Token { - t2 := new(Token) - *t2 = *t - t2.raw = extra - return t2 -} - -// Extra returns an extra field. -// Extra fields are key-value pairs returned by the server as a -// part of the token retrieval response. -func (t *Token) Extra(key string) interface{} { - if raw, ok := t.raw.(map[string]interface{}); ok { - return raw[key] - } - - vals, ok := t.raw.(url.Values) - if !ok { - return nil - } - - v := vals.Get(key) - switch s := strings.TrimSpace(v); strings.Count(s, ".") { - case 0: // Contains no "."; try to parse as int - if i, err := strconv.ParseInt(s, 10, 64); err == nil { - return i - } - case 1: // Contains a single "."; try to parse as float - if f, err := strconv.ParseFloat(s, 64); err == nil { - return f - } - } - - return v -} - -// timeNow is time.Now but pulled out as a variable for tests. -var timeNow = time.Now - -// expired reports whether the token is expired. -// t must be non-nil. -func (t *Token) expired() bool { - if t.Expiry.IsZero() { - return false - } - - expiryDelta := defaultExpiryDelta - if t.expiryDelta != 0 { - expiryDelta = t.expiryDelta - } - return t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow()) -} - -// Valid reports whether t is non-nil, has an AccessToken, and is not expired. -func (t *Token) Valid() bool { - return t != nil && t.AccessToken != "" && !t.expired() -} - -// tokenFromInternal maps an *internal.Token struct into -// a *Token struct. -func tokenFromInternal(t *internal.Token) *Token { - if t == nil { - return nil - } - return &Token{ - AccessToken: t.AccessToken, - TokenType: t.TokenType, - RefreshToken: t.RefreshToken, - Expiry: t.Expiry, - raw: t.Raw, - } -} - -// retrieveToken takes a *Config and uses that to retrieve an *internal.Token. -// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along -// with an error.. -func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { - tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle), c.authStyleCache.Get()) - if err != nil { - if rErr, ok := err.(*internal.RetrieveError); ok { - return nil, (*RetrieveError)(rErr) - } - return nil, err - } - return tokenFromInternal(tk), nil -} - -// RetrieveError is the error returned when the token endpoint returns a -// non-2XX HTTP status code or populates RFC 6749's 'error' parameter. -// https://datatracker.ietf.org/doc/html/rfc6749#section-5.2 -type RetrieveError struct { - Response *http.Response - // Body is the body that was consumed by reading Response.Body. - // It may be truncated. - Body []byte - // ErrorCode is RFC 6749's 'error' parameter. - ErrorCode string - // ErrorDescription is RFC 6749's 'error_description' parameter. - ErrorDescription string - // ErrorURI is RFC 6749's 'error_uri' parameter. - ErrorURI string -} - -func (r *RetrieveError) Error() string { - if r.ErrorCode != "" { - s := fmt.Sprintf("oauth2: %q", r.ErrorCode) - if r.ErrorDescription != "" { - s += fmt.Sprintf(" %q", r.ErrorDescription) - } - if r.ErrorURI != "" { - s += fmt.Sprintf(" %q", r.ErrorURI) - } - return s - } - return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) -} diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go deleted file mode 100644 index 9065791..0000000 --- a/vendor/golang.org/x/oauth2/transport.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package oauth2 - -import ( - "errors" - "log" - "net/http" - "sync" -) - -// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, -// wrapping a base RoundTripper and adding an Authorization header -// with a token from the supplied Sources. -// -// Transport is a low-level mechanism. Most code will use the -// higher-level Config.Client method instead. -type Transport struct { - // Source supplies the token to add to outgoing requests' - // Authorization headers. - Source TokenSource - - // Base is the base RoundTripper used to make HTTP requests. - // If nil, http.DefaultTransport is used. - Base http.RoundTripper -} - -// RoundTrip authorizes and authenticates the request with an -// access token from Transport's Source. -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - reqBodyClosed := false - if req.Body != nil { - defer func() { - if !reqBodyClosed { - req.Body.Close() - } - }() - } - - if t.Source == nil { - return nil, errors.New("oauth2: Transport's Source is nil") - } - token, err := t.Source.Token() - if err != nil { - return nil, err - } - - req2 := cloneRequest(req) // per RoundTripper contract - token.SetAuthHeader(req2) - - // req.Body is assumed to be closed by the base RoundTripper. - reqBodyClosed = true - return t.base().RoundTrip(req2) -} - -var cancelOnce sync.Once - -// CancelRequest does nothing. It used to be a legacy cancellation mechanism -// but now only it only logs on first use to warn that it's deprecated. -// -// Deprecated: use contexts for cancellation instead. -func (t *Transport) CancelRequest(req *http.Request) { - cancelOnce.Do(func() { - log.Printf("deprecated: golang.org/x/oauth2: Transport.CancelRequest no longer does anything; use contexts") - }) -} - -func (t *Transport) base() http.RoundTripper { - if t.Base != nil { - return t.Base - } - return http.DefaultTransport -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - return r2 -} diff --git a/vendor/golang.org/x/time/LICENSE b/vendor/golang.org/x/time/LICENSE deleted file mode 100644 index 6a66aea..0000000 --- a/vendor/golang.org/x/time/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/time/PATENTS b/vendor/golang.org/x/time/PATENTS deleted file mode 100644 index 7330990..0000000 --- a/vendor/golang.org/x/time/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go deleted file mode 100644 index 8f6c7f4..0000000 --- a/vendor/golang.org/x/time/rate/rate.go +++ /dev/null @@ -1,430 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package rate provides a rate limiter. -package rate - -import ( - "context" - "fmt" - "math" - "sync" - "time" -) - -// Limit defines the maximum frequency of some events. -// Limit is represented as number of events per second. -// A zero Limit allows no events. -type Limit float64 - -// Inf is the infinite rate limit; it allows all events (even if burst is zero). -const Inf = Limit(math.MaxFloat64) - -// Every converts a minimum time interval between events to a Limit. -func Every(interval time.Duration) Limit { - if interval <= 0 { - return Inf - } - return 1 / Limit(interval.Seconds()) -} - -// A Limiter controls how frequently events are allowed to happen. -// It implements a "token bucket" of size b, initially full and refilled -// at rate r tokens per second. -// Informally, in any large enough time interval, the Limiter limits the -// rate to r tokens per second, with a maximum burst size of b events. -// As a special case, if r == Inf (the infinite rate), b is ignored. -// See https://en.wikipedia.org/wiki/Token_bucket for more about token buckets. -// -// The zero value is a valid Limiter, but it will reject all events. -// Use NewLimiter to create non-zero Limiters. -// -// Limiter has three main methods, Allow, Reserve, and Wait. -// Most callers should use Wait. -// -// Each of the three methods consumes a single token. -// They differ in their behavior when no token is available. -// If no token is available, Allow returns false. -// If no token is available, Reserve returns a reservation for a future token -// and the amount of time the caller must wait before using it. -// If no token is available, Wait blocks until one can be obtained -// or its associated context.Context is canceled. -// -// The methods AllowN, ReserveN, and WaitN consume n tokens. -// -// Limiter is safe for simultaneous use by multiple goroutines. -type Limiter struct { - mu sync.Mutex - limit Limit - burst int - tokens float64 - // last is the last time the limiter's tokens field was updated - last time.Time - // lastEvent is the latest time of a rate-limited event (past or future) - lastEvent time.Time -} - -// Limit returns the maximum overall event rate. -func (lim *Limiter) Limit() Limit { - lim.mu.Lock() - defer lim.mu.Unlock() - return lim.limit -} - -// Burst returns the maximum burst size. Burst is the maximum number of tokens -// that can be consumed in a single call to Allow, Reserve, or Wait, so higher -// Burst values allow more events to happen at once. -// A zero Burst allows no events, unless limit == Inf. -func (lim *Limiter) Burst() int { - lim.mu.Lock() - defer lim.mu.Unlock() - return lim.burst -} - -// TokensAt returns the number of tokens available at time t. -func (lim *Limiter) TokensAt(t time.Time) float64 { - lim.mu.Lock() - _, tokens := lim.advance(t) // does not mutate lim - lim.mu.Unlock() - return tokens -} - -// Tokens returns the number of tokens available now. -func (lim *Limiter) Tokens() float64 { - return lim.TokensAt(time.Now()) -} - -// NewLimiter returns a new Limiter that allows events up to rate r and permits -// bursts of at most b tokens. -func NewLimiter(r Limit, b int) *Limiter { - return &Limiter{ - limit: r, - burst: b, - } -} - -// Allow reports whether an event may happen now. -func (lim *Limiter) Allow() bool { - return lim.AllowN(time.Now(), 1) -} - -// AllowN reports whether n events may happen at time t. -// Use this method if you intend to drop / skip events that exceed the rate limit. -// Otherwise use Reserve or Wait. -func (lim *Limiter) AllowN(t time.Time, n int) bool { - return lim.reserveN(t, n, 0).ok -} - -// A Reservation holds information about events that are permitted by a Limiter to happen after a delay. -// A Reservation may be canceled, which may enable the Limiter to permit additional events. -type Reservation struct { - ok bool - lim *Limiter - tokens int - timeToAct time.Time - // This is the Limit at reservation time, it can change later. - limit Limit -} - -// OK returns whether the limiter can provide the requested number of tokens -// within the maximum wait time. If OK is false, Delay returns InfDuration, and -// Cancel does nothing. -func (r *Reservation) OK() bool { - return r.ok -} - -// Delay is shorthand for DelayFrom(time.Now()). -func (r *Reservation) Delay() time.Duration { - return r.DelayFrom(time.Now()) -} - -// InfDuration is the duration returned by Delay when a Reservation is not OK. -const InfDuration = time.Duration(math.MaxInt64) - -// DelayFrom returns the duration for which the reservation holder must wait -// before taking the reserved action. Zero duration means act immediately. -// InfDuration means the limiter cannot grant the tokens requested in this -// Reservation within the maximum wait time. -func (r *Reservation) DelayFrom(t time.Time) time.Duration { - if !r.ok { - return InfDuration - } - delay := r.timeToAct.Sub(t) - if delay < 0 { - return 0 - } - return delay -} - -// Cancel is shorthand for CancelAt(time.Now()). -func (r *Reservation) Cancel() { - r.CancelAt(time.Now()) -} - -// CancelAt indicates that the reservation holder will not perform the reserved action -// and reverses the effects of this Reservation on the rate limit as much as possible, -// considering that other reservations may have already been made. -func (r *Reservation) CancelAt(t time.Time) { - if !r.ok { - return - } - - r.lim.mu.Lock() - defer r.lim.mu.Unlock() - - if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(t) { - return - } - - // calculate tokens to restore - // The duration between lim.lastEvent and r.timeToAct tells us how many tokens were reserved - // after r was obtained. These tokens should not be restored. - restoreTokens := float64(r.tokens) - r.limit.tokensFromDuration(r.lim.lastEvent.Sub(r.timeToAct)) - if restoreTokens <= 0 { - return - } - // advance time to now - t, tokens := r.lim.advance(t) - // calculate new number of tokens - tokens += restoreTokens - if burst := float64(r.lim.burst); tokens > burst { - tokens = burst - } - // update state - r.lim.last = t - r.lim.tokens = tokens - if r.timeToAct == r.lim.lastEvent { - prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) - if !prevEvent.Before(t) { - r.lim.lastEvent = prevEvent - } - } -} - -// Reserve is shorthand for ReserveN(time.Now(), 1). -func (lim *Limiter) Reserve() *Reservation { - return lim.ReserveN(time.Now(), 1) -} - -// ReserveN returns a Reservation that indicates how long the caller must wait before n events happen. -// The Limiter takes this Reservation into account when allowing future events. -// The returned Reservation’s OK() method returns false if n exceeds the Limiter's burst size. -// Usage example: -// -// r := lim.ReserveN(time.Now(), 1) -// if !r.OK() { -// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? -// return -// } -// time.Sleep(r.Delay()) -// Act() -// -// Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events. -// If you need to respect a deadline or cancel the delay, use Wait instead. -// To drop or skip events exceeding rate limit, use Allow instead. -func (lim *Limiter) ReserveN(t time.Time, n int) *Reservation { - r := lim.reserveN(t, n, InfDuration) - return &r -} - -// Wait is shorthand for WaitN(ctx, 1). -func (lim *Limiter) Wait(ctx context.Context) (err error) { - return lim.WaitN(ctx, 1) -} - -// WaitN blocks until lim permits n events to happen. -// It returns an error if n exceeds the Limiter's burst size, the Context is -// canceled, or the expected wait time exceeds the Context's Deadline. -// The burst limit is ignored if the rate limit is Inf. -func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { - // The test code calls lim.wait with a fake timer generator. - // This is the real timer generator. - newTimer := func(d time.Duration) (<-chan time.Time, func() bool, func()) { - timer := time.NewTimer(d) - return timer.C, timer.Stop, func() {} - } - - return lim.wait(ctx, n, time.Now(), newTimer) -} - -// wait is the internal implementation of WaitN. -func (lim *Limiter) wait(ctx context.Context, n int, t time.Time, newTimer func(d time.Duration) (<-chan time.Time, func() bool, func())) error { - lim.mu.Lock() - burst := lim.burst - limit := lim.limit - lim.mu.Unlock() - - if n > burst && limit != Inf { - return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, burst) - } - // Check if ctx is already cancelled - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - // Determine wait limit - waitLimit := InfDuration - if deadline, ok := ctx.Deadline(); ok { - waitLimit = deadline.Sub(t) - } - // Reserve - r := lim.reserveN(t, n, waitLimit) - if !r.ok { - return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n) - } - // Wait if necessary - delay := r.DelayFrom(t) - if delay == 0 { - return nil - } - ch, stop, advance := newTimer(delay) - defer stop() - advance() // only has an effect when testing - select { - case <-ch: - // We can proceed. - return nil - case <-ctx.Done(): - // Context was canceled before we could proceed. Cancel the - // reservation, which may permit other events to proceed sooner. - r.Cancel() - return ctx.Err() - } -} - -// SetLimit is shorthand for SetLimitAt(time.Now(), newLimit). -func (lim *Limiter) SetLimit(newLimit Limit) { - lim.SetLimitAt(time.Now(), newLimit) -} - -// SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated -// or underutilized by those which reserved (using Reserve or Wait) but did not yet act -// before SetLimitAt was called. -func (lim *Limiter) SetLimitAt(t time.Time, newLimit Limit) { - lim.mu.Lock() - defer lim.mu.Unlock() - - t, tokens := lim.advance(t) - - lim.last = t - lim.tokens = tokens - lim.limit = newLimit -} - -// SetBurst is shorthand for SetBurstAt(time.Now(), newBurst). -func (lim *Limiter) SetBurst(newBurst int) { - lim.SetBurstAt(time.Now(), newBurst) -} - -// SetBurstAt sets a new burst size for the limiter. -func (lim *Limiter) SetBurstAt(t time.Time, newBurst int) { - lim.mu.Lock() - defer lim.mu.Unlock() - - t, tokens := lim.advance(t) - - lim.last = t - lim.tokens = tokens - lim.burst = newBurst -} - -// reserveN is a helper method for AllowN, ReserveN, and WaitN. -// maxFutureReserve specifies the maximum reservation wait duration allowed. -// reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN. -func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) Reservation { - lim.mu.Lock() - defer lim.mu.Unlock() - - if lim.limit == Inf { - return Reservation{ - ok: true, - lim: lim, - tokens: n, - timeToAct: t, - } - } else if lim.limit == 0 { - var ok bool - if lim.burst >= n { - ok = true - lim.burst -= n - } - return Reservation{ - ok: ok, - lim: lim, - tokens: lim.burst, - timeToAct: t, - } - } - - t, tokens := lim.advance(t) - - // Calculate the remaining number of tokens resulting from the request. - tokens -= float64(n) - - // Calculate the wait duration - var waitDuration time.Duration - if tokens < 0 { - waitDuration = lim.limit.durationFromTokens(-tokens) - } - - // Decide result - ok := n <= lim.burst && waitDuration <= maxFutureReserve - - // Prepare reservation - r := Reservation{ - ok: ok, - lim: lim, - limit: lim.limit, - } - if ok { - r.tokens = n - r.timeToAct = t.Add(waitDuration) - - // Update state - lim.last = t - lim.tokens = tokens - lim.lastEvent = r.timeToAct - } - - return r -} - -// advance calculates and returns an updated state for lim resulting from the passage of time. -// lim is not changed. -// advance requires that lim.mu is held. -func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) { - last := lim.last - if t.Before(last) { - last = t - } - - // Calculate the new number of tokens, due to time that passed. - elapsed := t.Sub(last) - delta := lim.limit.tokensFromDuration(elapsed) - tokens := lim.tokens + delta - if burst := float64(lim.burst); tokens > burst { - tokens = burst - } - return t, tokens -} - -// durationFromTokens is a unit conversion function from the number of tokens to the duration -// of time it takes to accumulate them at a rate of limit tokens per second. -func (limit Limit) durationFromTokens(tokens float64) time.Duration { - if limit <= 0 { - return InfDuration - } - seconds := tokens / float64(limit) - return time.Duration(float64(time.Second) * seconds) -} - -// tokensFromDuration is a unit conversion function from a time duration to the number of tokens -// which could be accumulated during that duration at a rate of limit tokens per second. -func (limit Limit) tokensFromDuration(d time.Duration) float64 { - if limit <= 0 { - return 0 - } - return d.Seconds() * float64(limit) -} diff --git a/vendor/golang.org/x/time/rate/sometimes.go b/vendor/golang.org/x/time/rate/sometimes.go deleted file mode 100644 index 6ba99dd..0000000 --- a/vendor/golang.org/x/time/rate/sometimes.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package rate - -import ( - "sync" - "time" -) - -// Sometimes will perform an action occasionally. The First, Every, and -// Interval fields govern the behavior of Do, which performs the action. -// A zero Sometimes value will perform an action exactly once. -// -// # Example: logging with rate limiting -// -// var sometimes = rate.Sometimes{First: 3, Interval: 10*time.Second} -// func Spammy() { -// sometimes.Do(func() { log.Info("here I am!") }) -// } -type Sometimes struct { - First int // if non-zero, the first N calls to Do will run f. - Every int // if non-zero, every Nth call to Do will run f. - Interval time.Duration // if non-zero and Interval has elapsed since f's last run, Do will run f. - - mu sync.Mutex - count int // number of Do calls - last time.Time // last time f was run -} - -// Do runs the function f as allowed by First, Every, and Interval. -// -// The model is a union (not intersection) of filters. The first call to Do -// always runs f. Subsequent calls to Do run f if allowed by First or Every or -// Interval. -// -// A non-zero First:N causes the first N Do(f) calls to run f. -// -// A non-zero Every:M causes every Mth Do(f) call, starting with the first, to -// run f. -// -// A non-zero Interval causes Do(f) to run f if Interval has elapsed since -// Do last ran f. -// -// Specifying multiple filters produces the union of these execution streams. -// For example, specifying both First:N and Every:M causes the first N Do(f) -// calls and every Mth Do(f) call, starting with the first, to run f. See -// Examples for more. -// -// If Do is called multiple times simultaneously, the calls will block and run -// serially. Therefore, Do is intended for lightweight operations. -// -// Because a call to Do may block until f returns, if f causes Do to be called, -// it will deadlock. -func (s *Sometimes) Do(f func()) { - s.mu.Lock() - defer s.mu.Unlock() - if s.count == 0 || - (s.First > 0 && s.count < s.First) || - (s.Every > 0 && s.count%s.Every == 0) || - (s.Interval > 0 && time.Since(s.last) >= s.Interval) { - f() - s.last = time.Now() - } - s.count++ -} diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml deleted file mode 100644 index 7348c50..0000000 --- a/vendor/gopkg.in/yaml.v2/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ -language: go - -go: - - "1.4.x" - - "1.5.x" - - "1.6.x" - - "1.7.x" - - "1.8.x" - - "1.9.x" - - "1.10.x" - - "1.11.x" - - "1.12.x" - - "1.13.x" - - "1.14.x" - - "tip" - -go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE deleted file mode 100644 index 8dada3e..0000000 --- a/vendor/gopkg.in/yaml.v2/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml deleted file mode 100644 index 8da58fb..0000000 --- a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml +++ /dev/null @@ -1,31 +0,0 @@ -The following files were ported to Go from C files of libyaml, and thus -are still covered by their original copyright and license: - - apic.go - emitterc.go - parserc.go - readerc.go - scannerc.go - writerc.go - yamlh.go - yamlprivateh.go - -Copyright (c) 2006 Kirill Simonov - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/gopkg.in/yaml.v2/NOTICE b/vendor/gopkg.in/yaml.v2/NOTICE deleted file mode 100644 index 866d74a..0000000 --- a/vendor/gopkg.in/yaml.v2/NOTICE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2011-2016 Canonical Ltd. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md deleted file mode 100644 index b50c6e8..0000000 --- a/vendor/gopkg.in/yaml.v2/README.md +++ /dev/null @@ -1,133 +0,0 @@ -# YAML support for the Go language - -Introduction ------------- - -The yaml package enables Go programs to comfortably encode and decode YAML -values. It was developed within [Canonical](https://www.canonical.com) as -part of the [juju](https://juju.ubuntu.com) project, and is based on a -pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) -C library to parse and generate YAML data quickly and reliably. - -Compatibility -------------- - -The yaml package supports most of YAML 1.1 and 1.2, including support for -anchors, tags, map merging, etc. Multi-document unmarshalling is not yet -implemented, and base-60 floats from YAML 1.1 are purposefully not -supported since they're a poor design and are gone in YAML 1.2. - -Installation and usage ----------------------- - -The import path for the package is *gopkg.in/yaml.v2*. - -To install it, run: - - go get gopkg.in/yaml.v2 - -API documentation ------------------ - -If opened in a browser, the import path itself leads to the API documentation: - - * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) - -API stability -------------- - -The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). - - -License -------- - -The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. - - -Example -------- - -```Go -package main - -import ( - "fmt" - "log" - - "gopkg.in/yaml.v2" -) - -var data = ` -a: Easy! -b: - c: 2 - d: [3, 4] -` - -// Note: struct fields must be public in order for unmarshal to -// correctly populate the data. -type T struct { - A string - B struct { - RenamedC int `yaml:"c"` - D []int `yaml:",flow"` - } -} - -func main() { - t := T{} - - err := yaml.Unmarshal([]byte(data), &t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t:\n%v\n\n", t) - - d, err := yaml.Marshal(&t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t dump:\n%s\n\n", string(d)) - - m := make(map[interface{}]interface{}) - - err = yaml.Unmarshal([]byte(data), &m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m:\n%v\n\n", m) - - d, err = yaml.Marshal(&m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m dump:\n%s\n\n", string(d)) -} -``` - -This example will generate the following output: - -``` ---- t: -{Easy! {2 [3 4]}} - ---- t dump: -a: Easy! -b: - c: 2 - d: [3, 4] - - ---- m: -map[a:Easy! b:map[c:2 d:[3 4]]] - ---- m dump: -a: Easy! -b: - c: 2 - d: - - 3 - - 4 -``` - diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go deleted file mode 100644 index acf7140..0000000 --- a/vendor/gopkg.in/yaml.v2/apic.go +++ /dev/null @@ -1,744 +0,0 @@ -package yaml - -import ( - "io" -) - -func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { - //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) - - // Check if we can move the queue at the beginning of the buffer. - if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { - if parser.tokens_head != len(parser.tokens) { - copy(parser.tokens, parser.tokens[parser.tokens_head:]) - } - parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] - parser.tokens_head = 0 - } - parser.tokens = append(parser.tokens, *token) - if pos < 0 { - return - } - copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) - parser.tokens[parser.tokens_head+pos] = *token -} - -// Create a new parser object. -func yaml_parser_initialize(parser *yaml_parser_t) bool { - *parser = yaml_parser_t{ - raw_buffer: make([]byte, 0, input_raw_buffer_size), - buffer: make([]byte, 0, input_buffer_size), - } - return true -} - -// Destroy a parser object. -func yaml_parser_delete(parser *yaml_parser_t) { - *parser = yaml_parser_t{} -} - -// String read handler. -func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - if parser.input_pos == len(parser.input) { - return 0, io.EOF - } - n = copy(buffer, parser.input[parser.input_pos:]) - parser.input_pos += n - return n, nil -} - -// Reader read handler. -func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - return parser.input_reader.Read(buffer) -} - -// Set a string input. -func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_string_read_handler - parser.input = input - parser.input_pos = 0 -} - -// Set a file input. -func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_reader_read_handler - parser.input_reader = r -} - -// Set the source encoding. -func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { - if parser.encoding != yaml_ANY_ENCODING { - panic("must set the encoding only once") - } - parser.encoding = encoding -} - -var disableLineWrapping = false - -// Create a new emitter object. -func yaml_emitter_initialize(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{ - buffer: make([]byte, output_buffer_size), - raw_buffer: make([]byte, 0, output_raw_buffer_size), - states: make([]yaml_emitter_state_t, 0, initial_stack_size), - events: make([]yaml_event_t, 0, initial_queue_size), - } - if disableLineWrapping { - emitter.best_width = -1 - } -} - -// Destroy an emitter object. -func yaml_emitter_delete(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{} -} - -// String write handler. -func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - *emitter.output_buffer = append(*emitter.output_buffer, buffer...) - return nil -} - -// yaml_writer_write_handler uses emitter.output_writer to write the -// emitted text. -func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - _, err := emitter.output_writer.Write(buffer) - return err -} - -// Set a string output. -func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_string_write_handler - emitter.output_buffer = output_buffer -} - -// Set a file output. -func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_writer_write_handler - emitter.output_writer = w -} - -// Set the output encoding. -func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { - if emitter.encoding != yaml_ANY_ENCODING { - panic("must set the output encoding only once") - } - emitter.encoding = encoding -} - -// Set the canonical output style. -func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { - emitter.canonical = canonical -} - -//// Set the indentation increment. -func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { - if indent < 2 || indent > 9 { - indent = 2 - } - emitter.best_indent = indent -} - -// Set the preferred line width. -func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { - if width < 0 { - width = -1 - } - emitter.best_width = width -} - -// Set if unescaped non-ASCII characters are allowed. -func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { - emitter.unicode = unicode -} - -// Set the preferred line break character. -func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { - emitter.line_break = line_break -} - -///* -// * Destroy a token object. -// */ -// -//YAML_DECLARE(void) -//yaml_token_delete(yaml_token_t *token) -//{ -// assert(token); // Non-NULL token object expected. -// -// switch (token.type) -// { -// case YAML_TAG_DIRECTIVE_TOKEN: -// yaml_free(token.data.tag_directive.handle); -// yaml_free(token.data.tag_directive.prefix); -// break; -// -// case YAML_ALIAS_TOKEN: -// yaml_free(token.data.alias.value); -// break; -// -// case YAML_ANCHOR_TOKEN: -// yaml_free(token.data.anchor.value); -// break; -// -// case YAML_TAG_TOKEN: -// yaml_free(token.data.tag.handle); -// yaml_free(token.data.tag.suffix); -// break; -// -// case YAML_SCALAR_TOKEN: -// yaml_free(token.data.scalar.value); -// break; -// -// default: -// break; -// } -// -// memset(token, 0, sizeof(yaml_token_t)); -//} -// -///* -// * Check if a string is a valid UTF-8 sequence. -// * -// * Check 'reader.c' for more details on UTF-8 encoding. -// */ -// -//static int -//yaml_check_utf8(yaml_char_t *start, size_t length) -//{ -// yaml_char_t *end = start+length; -// yaml_char_t *pointer = start; -// -// while (pointer < end) { -// unsigned char octet; -// unsigned int width; -// unsigned int value; -// size_t k; -// -// octet = pointer[0]; -// width = (octet & 0x80) == 0x00 ? 1 : -// (octet & 0xE0) == 0xC0 ? 2 : -// (octet & 0xF0) == 0xE0 ? 3 : -// (octet & 0xF8) == 0xF0 ? 4 : 0; -// value = (octet & 0x80) == 0x00 ? octet & 0x7F : -// (octet & 0xE0) == 0xC0 ? octet & 0x1F : -// (octet & 0xF0) == 0xE0 ? octet & 0x0F : -// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; -// if (!width) return 0; -// if (pointer+width > end) return 0; -// for (k = 1; k < width; k ++) { -// octet = pointer[k]; -// if ((octet & 0xC0) != 0x80) return 0; -// value = (value << 6) + (octet & 0x3F); -// } -// if (!((width == 1) || -// (width == 2 && value >= 0x80) || -// (width == 3 && value >= 0x800) || -// (width == 4 && value >= 0x10000))) return 0; -// -// pointer += width; -// } -// -// return 1; -//} -// - -// Create STREAM-START. -func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - encoding: encoding, - } -} - -// Create STREAM-END. -func yaml_stream_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - } -} - -// Create DOCUMENT-START. -func yaml_document_start_event_initialize( - event *yaml_event_t, - version_directive *yaml_version_directive_t, - tag_directives []yaml_tag_directive_t, - implicit bool, -) { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: implicit, - } -} - -// Create DOCUMENT-END. -func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - implicit: implicit, - } -} - -///* -// * Create ALIAS. -// */ -// -//YAML_DECLARE(int) -//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) -//{ -// mark yaml_mark_t = { 0, 0, 0 } -// anchor_copy *yaml_char_t = NULL -// -// assert(event) // Non-NULL event object is expected. -// assert(anchor) // Non-NULL anchor is expected. -// -// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 -// -// anchor_copy = yaml_strdup(anchor) -// if (!anchor_copy) -// return 0 -// -// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) -// -// return 1 -//} - -// Create SCALAR. -func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - anchor: anchor, - tag: tag, - value: value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-START. -func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-END. -func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - } - return true -} - -// Create MAPPING-START. -func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } -} - -// Create MAPPING-END. -func yaml_mapping_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - } -} - -// Destroy an event object. -func yaml_event_delete(event *yaml_event_t) { - *event = yaml_event_t{} -} - -///* -// * Create a document object. -// */ -// -//YAML_DECLARE(int) -//yaml_document_initialize(document *yaml_document_t, -// version_directive *yaml_version_directive_t, -// tag_directives_start *yaml_tag_directive_t, -// tag_directives_end *yaml_tag_directive_t, -// start_implicit int, end_implicit int) -//{ -// struct { -// error yaml_error_type_t -// } context -// struct { -// start *yaml_node_t -// end *yaml_node_t -// top *yaml_node_t -// } nodes = { NULL, NULL, NULL } -// version_directive_copy *yaml_version_directive_t = NULL -// struct { -// start *yaml_tag_directive_t -// end *yaml_tag_directive_t -// top *yaml_tag_directive_t -// } tag_directives_copy = { NULL, NULL, NULL } -// value yaml_tag_directive_t = { NULL, NULL } -// mark yaml_mark_t = { 0, 0, 0 } -// -// assert(document) // Non-NULL document object is expected. -// assert((tag_directives_start && tag_directives_end) || -// (tag_directives_start == tag_directives_end)) -// // Valid tag directives are expected. -// -// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error -// -// if (version_directive) { -// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) -// if (!version_directive_copy) goto error -// version_directive_copy.major = version_directive.major -// version_directive_copy.minor = version_directive.minor -// } -// -// if (tag_directives_start != tag_directives_end) { -// tag_directive *yaml_tag_directive_t -// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) -// goto error -// for (tag_directive = tag_directives_start -// tag_directive != tag_directives_end; tag_directive ++) { -// assert(tag_directive.handle) -// assert(tag_directive.prefix) -// if (!yaml_check_utf8(tag_directive.handle, -// strlen((char *)tag_directive.handle))) -// goto error -// if (!yaml_check_utf8(tag_directive.prefix, -// strlen((char *)tag_directive.prefix))) -// goto error -// value.handle = yaml_strdup(tag_directive.handle) -// value.prefix = yaml_strdup(tag_directive.prefix) -// if (!value.handle || !value.prefix) goto error -// if (!PUSH(&context, tag_directives_copy, value)) -// goto error -// value.handle = NULL -// value.prefix = NULL -// } -// } -// -// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, -// tag_directives_copy.start, tag_directives_copy.top, -// start_implicit, end_implicit, mark, mark) -// -// return 1 -// -//error: -// STACK_DEL(&context, nodes) -// yaml_free(version_directive_copy) -// while (!STACK_EMPTY(&context, tag_directives_copy)) { -// value yaml_tag_directive_t = POP(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// } -// STACK_DEL(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// -// return 0 -//} -// -///* -// * Destroy a document object. -// */ -// -//YAML_DECLARE(void) -//yaml_document_delete(document *yaml_document_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// tag_directive *yaml_tag_directive_t -// -// context.error = YAML_NO_ERROR // Eliminate a compiler warning. -// -// assert(document) // Non-NULL document object is expected. -// -// while (!STACK_EMPTY(&context, document.nodes)) { -// node yaml_node_t = POP(&context, document.nodes) -// yaml_free(node.tag) -// switch (node.type) { -// case YAML_SCALAR_NODE: -// yaml_free(node.data.scalar.value) -// break -// case YAML_SEQUENCE_NODE: -// STACK_DEL(&context, node.data.sequence.items) -// break -// case YAML_MAPPING_NODE: -// STACK_DEL(&context, node.data.mapping.pairs) -// break -// default: -// assert(0) // Should not happen. -// } -// } -// STACK_DEL(&context, document.nodes) -// -// yaml_free(document.version_directive) -// for (tag_directive = document.tag_directives.start -// tag_directive != document.tag_directives.end -// tag_directive++) { -// yaml_free(tag_directive.handle) -// yaml_free(tag_directive.prefix) -// } -// yaml_free(document.tag_directives.start) -// -// memset(document, 0, sizeof(yaml_document_t)) -//} -// -///** -// * Get a document node. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_node(document *yaml_document_t, index int) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (index > 0 && document.nodes.start + index <= document.nodes.top) { -// return document.nodes.start + index - 1 -// } -// return NULL -//} -// -///** -// * Get the root object. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_root_node(document *yaml_document_t) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (document.nodes.top != document.nodes.start) { -// return document.nodes.start -// } -// return NULL -//} -// -///* -// * Add a scalar node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_scalar(document *yaml_document_t, -// tag *yaml_char_t, value *yaml_char_t, length int, -// style yaml_scalar_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// value_copy *yaml_char_t = NULL -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// assert(value) // Non-NULL value is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (length < 0) { -// length = strlen((char *)value) -// } -// -// if (!yaml_check_utf8(value, length)) goto error -// value_copy = yaml_malloc(length+1) -// if (!value_copy) goto error -// memcpy(value_copy, value, length) -// value_copy[length] = '\0' -// -// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// yaml_free(tag_copy) -// yaml_free(value_copy) -// -// return 0 -//} -// -///* -// * Add a sequence node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_sequence(document *yaml_document_t, -// tag *yaml_char_t, style yaml_sequence_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_item_t -// end *yaml_node_item_t -// top *yaml_node_item_t -// } items = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error -// -// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, items) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Add a mapping node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_mapping(document *yaml_document_t, -// tag *yaml_char_t, style yaml_mapping_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_pair_t -// end *yaml_node_pair_t -// top *yaml_node_pair_t -// } pairs = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error -// -// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, pairs) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Append an item to a sequence node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_sequence_item(document *yaml_document_t, -// sequence int, item int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// assert(document) // Non-NULL document is required. -// assert(sequence > 0 -// && document.nodes.start + sequence <= document.nodes.top) -// // Valid sequence id is required. -// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) -// // A sequence node is required. -// assert(item > 0 && document.nodes.start + item <= document.nodes.top) -// // Valid item id is required. -// -// if (!PUSH(&context, -// document.nodes.start[sequence-1].data.sequence.items, item)) -// return 0 -// -// return 1 -//} -// -///* -// * Append a pair of a key and a value to a mapping node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_mapping_pair(document *yaml_document_t, -// mapping int, key int, value int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// pair yaml_node_pair_t -// -// assert(document) // Non-NULL document is required. -// assert(mapping > 0 -// && document.nodes.start + mapping <= document.nodes.top) -// // Valid mapping id is required. -// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) -// // A mapping node is required. -// assert(key > 0 && document.nodes.start + key <= document.nodes.top) -// // Valid key id is required. -// assert(value > 0 && document.nodes.start + value <= document.nodes.top) -// // Valid value id is required. -// -// pair.key = key -// pair.value = value -// -// if (!PUSH(&context, -// document.nodes.start[mapping-1].data.mapping.pairs, pair)) -// return 0 -// -// return 1 -//} -// -// diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go deleted file mode 100644 index 129bc2a..0000000 --- a/vendor/gopkg.in/yaml.v2/decode.go +++ /dev/null @@ -1,815 +0,0 @@ -package yaml - -import ( - "encoding" - "encoding/base64" - "fmt" - "io" - "math" - "reflect" - "strconv" - "time" -) - -const ( - documentNode = 1 << iota - mappingNode - sequenceNode - scalarNode - aliasNode -) - -type node struct { - kind int - line, column int - tag string - // For an alias node, alias holds the resolved alias. - alias *node - value string - implicit bool - children []*node - anchors map[string]*node -} - -// ---------------------------------------------------------------------------- -// Parser, produces a node tree out of a libyaml event stream. - -type parser struct { - parser yaml_parser_t - event yaml_event_t - doc *node - doneInit bool -} - -func newParser(b []byte) *parser { - p := parser{} - if !yaml_parser_initialize(&p.parser) { - panic("failed to initialize YAML emitter") - } - if len(b) == 0 { - b = []byte{'\n'} - } - yaml_parser_set_input_string(&p.parser, b) - return &p -} - -func newParserFromReader(r io.Reader) *parser { - p := parser{} - if !yaml_parser_initialize(&p.parser) { - panic("failed to initialize YAML emitter") - } - yaml_parser_set_input_reader(&p.parser, r) - return &p -} - -func (p *parser) init() { - if p.doneInit { - return - } - p.expect(yaml_STREAM_START_EVENT) - p.doneInit = true -} - -func (p *parser) destroy() { - if p.event.typ != yaml_NO_EVENT { - yaml_event_delete(&p.event) - } - yaml_parser_delete(&p.parser) -} - -// expect consumes an event from the event stream and -// checks that it's of the expected type. -func (p *parser) expect(e yaml_event_type_t) { - if p.event.typ == yaml_NO_EVENT { - if !yaml_parser_parse(&p.parser, &p.event) { - p.fail() - } - } - if p.event.typ == yaml_STREAM_END_EVENT { - failf("attempted to go past the end of stream; corrupted value?") - } - if p.event.typ != e { - p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) - p.fail() - } - yaml_event_delete(&p.event) - p.event.typ = yaml_NO_EVENT -} - -// peek peeks at the next event in the event stream, -// puts the results into p.event and returns the event type. -func (p *parser) peek() yaml_event_type_t { - if p.event.typ != yaml_NO_EVENT { - return p.event.typ - } - if !yaml_parser_parse(&p.parser, &p.event) { - p.fail() - } - return p.event.typ -} - -func (p *parser) fail() { - var where string - var line int - if p.parser.problem_mark.line != 0 { - line = p.parser.problem_mark.line - // Scanner errors don't iterate line before returning error - if p.parser.error == yaml_SCANNER_ERROR { - line++ - } - } else if p.parser.context_mark.line != 0 { - line = p.parser.context_mark.line - } - if line != 0 { - where = "line " + strconv.Itoa(line) + ": " - } - var msg string - if len(p.parser.problem) > 0 { - msg = p.parser.problem - } else { - msg = "unknown problem parsing YAML content" - } - failf("%s%s", where, msg) -} - -func (p *parser) anchor(n *node, anchor []byte) { - if anchor != nil { - p.doc.anchors[string(anchor)] = n - } -} - -func (p *parser) parse() *node { - p.init() - switch p.peek() { - case yaml_SCALAR_EVENT: - return p.scalar() - case yaml_ALIAS_EVENT: - return p.alias() - case yaml_MAPPING_START_EVENT: - return p.mapping() - case yaml_SEQUENCE_START_EVENT: - return p.sequence() - case yaml_DOCUMENT_START_EVENT: - return p.document() - case yaml_STREAM_END_EVENT: - // Happens when attempting to decode an empty buffer. - return nil - default: - panic("attempted to parse unknown event: " + p.event.typ.String()) - } -} - -func (p *parser) node(kind int) *node { - return &node{ - kind: kind, - line: p.event.start_mark.line, - column: p.event.start_mark.column, - } -} - -func (p *parser) document() *node { - n := p.node(documentNode) - n.anchors = make(map[string]*node) - p.doc = n - p.expect(yaml_DOCUMENT_START_EVENT) - n.children = append(n.children, p.parse()) - p.expect(yaml_DOCUMENT_END_EVENT) - return n -} - -func (p *parser) alias() *node { - n := p.node(aliasNode) - n.value = string(p.event.anchor) - n.alias = p.doc.anchors[n.value] - if n.alias == nil { - failf("unknown anchor '%s' referenced", n.value) - } - p.expect(yaml_ALIAS_EVENT) - return n -} - -func (p *parser) scalar() *node { - n := p.node(scalarNode) - n.value = string(p.event.value) - n.tag = string(p.event.tag) - n.implicit = p.event.implicit - p.anchor(n, p.event.anchor) - p.expect(yaml_SCALAR_EVENT) - return n -} - -func (p *parser) sequence() *node { - n := p.node(sequenceNode) - p.anchor(n, p.event.anchor) - p.expect(yaml_SEQUENCE_START_EVENT) - for p.peek() != yaml_SEQUENCE_END_EVENT { - n.children = append(n.children, p.parse()) - } - p.expect(yaml_SEQUENCE_END_EVENT) - return n -} - -func (p *parser) mapping() *node { - n := p.node(mappingNode) - p.anchor(n, p.event.anchor) - p.expect(yaml_MAPPING_START_EVENT) - for p.peek() != yaml_MAPPING_END_EVENT { - n.children = append(n.children, p.parse(), p.parse()) - } - p.expect(yaml_MAPPING_END_EVENT) - return n -} - -// ---------------------------------------------------------------------------- -// Decoder, unmarshals a node into a provided value. - -type decoder struct { - doc *node - aliases map[*node]bool - mapType reflect.Type - terrors []string - strict bool - - decodeCount int - aliasCount int - aliasDepth int -} - -var ( - mapItemType = reflect.TypeOf(MapItem{}) - durationType = reflect.TypeOf(time.Duration(0)) - defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) - ifaceType = defaultMapType.Elem() - timeType = reflect.TypeOf(time.Time{}) - ptrTimeType = reflect.TypeOf(&time.Time{}) -) - -func newDecoder(strict bool) *decoder { - d := &decoder{mapType: defaultMapType, strict: strict} - d.aliases = make(map[*node]bool) - return d -} - -func (d *decoder) terror(n *node, tag string, out reflect.Value) { - if n.tag != "" { - tag = n.tag - } - value := n.value - if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { - if len(value) > 10 { - value = " `" + value[:7] + "...`" - } else { - value = " `" + value + "`" - } - } - d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) -} - -func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { - terrlen := len(d.terrors) - err := u.UnmarshalYAML(func(v interface{}) (err error) { - defer handleErr(&err) - d.unmarshal(n, reflect.ValueOf(v)) - if len(d.terrors) > terrlen { - issues := d.terrors[terrlen:] - d.terrors = d.terrors[:terrlen] - return &TypeError{issues} - } - return nil - }) - if e, ok := err.(*TypeError); ok { - d.terrors = append(d.terrors, e.Errors...) - return false - } - if err != nil { - fail(err) - } - return true -} - -// d.prepare initializes and dereferences pointers and calls UnmarshalYAML -// if a value is found to implement it. -// It returns the initialized and dereferenced out value, whether -// unmarshalling was already done by UnmarshalYAML, and if so whether -// its types unmarshalled appropriately. -// -// If n holds a null value, prepare returns before doing anything. -func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { - if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { - return out, false, false - } - again := true - for again { - again = false - if out.Kind() == reflect.Ptr { - if out.IsNil() { - out.Set(reflect.New(out.Type().Elem())) - } - out = out.Elem() - again = true - } - if out.CanAddr() { - if u, ok := out.Addr().Interface().(Unmarshaler); ok { - good = d.callUnmarshaler(n, u) - return out, true, good - } - } - } - return out, false, false -} - -const ( - // 400,000 decode operations is ~500kb of dense object declarations, or - // ~5kb of dense object declarations with 10000% alias expansion - alias_ratio_range_low = 400000 - - // 4,000,000 decode operations is ~5MB of dense object declarations, or - // ~4.5MB of dense object declarations with 10% alias expansion - alias_ratio_range_high = 4000000 - - // alias_ratio_range is the range over which we scale allowed alias ratios - alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) -) - -func allowedAliasRatio(decodeCount int) float64 { - switch { - case decodeCount <= alias_ratio_range_low: - // allow 99% to come from alias expansion for small-to-medium documents - return 0.99 - case decodeCount >= alias_ratio_range_high: - // allow 10% to come from alias expansion for very large documents - return 0.10 - default: - // scale smoothly from 99% down to 10% over the range. - // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. - // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). - return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) - } -} - -func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { - d.decodeCount++ - if d.aliasDepth > 0 { - d.aliasCount++ - } - if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { - failf("document contains excessive aliasing") - } - switch n.kind { - case documentNode: - return d.document(n, out) - case aliasNode: - return d.alias(n, out) - } - out, unmarshaled, good := d.prepare(n, out) - if unmarshaled { - return good - } - switch n.kind { - case scalarNode: - good = d.scalar(n, out) - case mappingNode: - good = d.mapping(n, out) - case sequenceNode: - good = d.sequence(n, out) - default: - panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) - } - return good -} - -func (d *decoder) document(n *node, out reflect.Value) (good bool) { - if len(n.children) == 1 { - d.doc = n - d.unmarshal(n.children[0], out) - return true - } - return false -} - -func (d *decoder) alias(n *node, out reflect.Value) (good bool) { - if d.aliases[n] { - // TODO this could actually be allowed in some circumstances. - failf("anchor '%s' value contains itself", n.value) - } - d.aliases[n] = true - d.aliasDepth++ - good = d.unmarshal(n.alias, out) - d.aliasDepth-- - delete(d.aliases, n) - return good -} - -var zeroValue reflect.Value - -func resetMap(out reflect.Value) { - for _, k := range out.MapKeys() { - out.SetMapIndex(k, zeroValue) - } -} - -func (d *decoder) scalar(n *node, out reflect.Value) bool { - var tag string - var resolved interface{} - if n.tag == "" && !n.implicit { - tag = yaml_STR_TAG - resolved = n.value - } else { - tag, resolved = resolve(n.tag, n.value) - if tag == yaml_BINARY_TAG { - data, err := base64.StdEncoding.DecodeString(resolved.(string)) - if err != nil { - failf("!!binary value contains invalid base64 data") - } - resolved = string(data) - } - } - if resolved == nil { - if out.Kind() == reflect.Map && !out.CanAddr() { - resetMap(out) - } else { - out.Set(reflect.Zero(out.Type())) - } - return true - } - if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { - // We've resolved to exactly the type we want, so use that. - out.Set(resolvedv) - return true - } - // Perhaps we can use the value as a TextUnmarshaler to - // set its value. - if out.CanAddr() { - u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) - if ok { - var text []byte - if tag == yaml_BINARY_TAG { - text = []byte(resolved.(string)) - } else { - // We let any value be unmarshaled into TextUnmarshaler. - // That might be more lax than we'd like, but the - // TextUnmarshaler itself should bowl out any dubious values. - text = []byte(n.value) - } - err := u.UnmarshalText(text) - if err != nil { - fail(err) - } - return true - } - } - switch out.Kind() { - case reflect.String: - if tag == yaml_BINARY_TAG { - out.SetString(resolved.(string)) - return true - } - if resolved != nil { - out.SetString(n.value) - return true - } - case reflect.Interface: - if resolved == nil { - out.Set(reflect.Zero(out.Type())) - } else if tag == yaml_TIMESTAMP_TAG { - // It looks like a timestamp but for backward compatibility - // reasons we set it as a string, so that code that unmarshals - // timestamp-like values into interface{} will continue to - // see a string and not a time.Time. - // TODO(v3) Drop this. - out.Set(reflect.ValueOf(n.value)) - } else { - out.Set(reflect.ValueOf(resolved)) - } - return true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - switch resolved := resolved.(type) { - case int: - if !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case int64: - if !out.OverflowInt(resolved) { - out.SetInt(resolved) - return true - } - case uint64: - if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case float64: - if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case string: - if out.Type() == durationType { - d, err := time.ParseDuration(resolved) - if err == nil { - out.SetInt(int64(d)) - return true - } - } - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - switch resolved := resolved.(type) { - case int: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case int64: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case uint64: - if !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case float64: - if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - } - case reflect.Bool: - switch resolved := resolved.(type) { - case bool: - out.SetBool(resolved) - return true - } - case reflect.Float32, reflect.Float64: - switch resolved := resolved.(type) { - case int: - out.SetFloat(float64(resolved)) - return true - case int64: - out.SetFloat(float64(resolved)) - return true - case uint64: - out.SetFloat(float64(resolved)) - return true - case float64: - out.SetFloat(resolved) - return true - } - case reflect.Struct: - if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { - out.Set(resolvedv) - return true - } - case reflect.Ptr: - if out.Type().Elem() == reflect.TypeOf(resolved) { - // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? - elem := reflect.New(out.Type().Elem()) - elem.Elem().Set(reflect.ValueOf(resolved)) - out.Set(elem) - return true - } - } - d.terror(n, tag, out) - return false -} - -func settableValueOf(i interface{}) reflect.Value { - v := reflect.ValueOf(i) - sv := reflect.New(v.Type()).Elem() - sv.Set(v) - return sv -} - -func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { - l := len(n.children) - - var iface reflect.Value - switch out.Kind() { - case reflect.Slice: - out.Set(reflect.MakeSlice(out.Type(), l, l)) - case reflect.Array: - if l != out.Len() { - failf("invalid array: want %d elements but got %d", out.Len(), l) - } - case reflect.Interface: - // No type hints. Will have to use a generic sequence. - iface = out - out = settableValueOf(make([]interface{}, l)) - default: - d.terror(n, yaml_SEQ_TAG, out) - return false - } - et := out.Type().Elem() - - j := 0 - for i := 0; i < l; i++ { - e := reflect.New(et).Elem() - if ok := d.unmarshal(n.children[i], e); ok { - out.Index(j).Set(e) - j++ - } - } - if out.Kind() != reflect.Array { - out.Set(out.Slice(0, j)) - } - if iface.IsValid() { - iface.Set(out) - } - return true -} - -func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { - switch out.Kind() { - case reflect.Struct: - return d.mappingStruct(n, out) - case reflect.Slice: - return d.mappingSlice(n, out) - case reflect.Map: - // okay - case reflect.Interface: - if d.mapType.Kind() == reflect.Map { - iface := out - out = reflect.MakeMap(d.mapType) - iface.Set(out) - } else { - slicev := reflect.New(d.mapType).Elem() - if !d.mappingSlice(n, slicev) { - return false - } - out.Set(slicev) - return true - } - default: - d.terror(n, yaml_MAP_TAG, out) - return false - } - outt := out.Type() - kt := outt.Key() - et := outt.Elem() - - mapType := d.mapType - if outt.Key() == ifaceType && outt.Elem() == ifaceType { - d.mapType = outt - } - - if out.IsNil() { - out.Set(reflect.MakeMap(outt)) - } - l := len(n.children) - for i := 0; i < l; i += 2 { - if isMerge(n.children[i]) { - d.merge(n.children[i+1], out) - continue - } - k := reflect.New(kt).Elem() - if d.unmarshal(n.children[i], k) { - kkind := k.Kind() - if kkind == reflect.Interface { - kkind = k.Elem().Kind() - } - if kkind == reflect.Map || kkind == reflect.Slice { - failf("invalid map key: %#v", k.Interface()) - } - e := reflect.New(et).Elem() - if d.unmarshal(n.children[i+1], e) { - d.setMapIndex(n.children[i+1], out, k, e) - } - } - } - d.mapType = mapType - return true -} - -func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { - if d.strict && out.MapIndex(k) != zeroValue { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) - return - } - out.SetMapIndex(k, v) -} - -func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { - outt := out.Type() - if outt.Elem() != mapItemType { - d.terror(n, yaml_MAP_TAG, out) - return false - } - - mapType := d.mapType - d.mapType = outt - - var slice []MapItem - var l = len(n.children) - for i := 0; i < l; i += 2 { - if isMerge(n.children[i]) { - d.merge(n.children[i+1], out) - continue - } - item := MapItem{} - k := reflect.ValueOf(&item.Key).Elem() - if d.unmarshal(n.children[i], k) { - v := reflect.ValueOf(&item.Value).Elem() - if d.unmarshal(n.children[i+1], v) { - slice = append(slice, item) - } - } - } - out.Set(reflect.ValueOf(slice)) - d.mapType = mapType - return true -} - -func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { - sinfo, err := getStructInfo(out.Type()) - if err != nil { - panic(err) - } - name := settableValueOf("") - l := len(n.children) - - var inlineMap reflect.Value - var elemType reflect.Type - if sinfo.InlineMap != -1 { - inlineMap = out.Field(sinfo.InlineMap) - inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) - elemType = inlineMap.Type().Elem() - } - - var doneFields []bool - if d.strict { - doneFields = make([]bool, len(sinfo.FieldsList)) - } - for i := 0; i < l; i += 2 { - ni := n.children[i] - if isMerge(ni) { - d.merge(n.children[i+1], out) - continue - } - if !d.unmarshal(ni, name) { - continue - } - if info, ok := sinfo.FieldsMap[name.String()]; ok { - if d.strict { - if doneFields[info.Id] { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) - continue - } - doneFields[info.Id] = true - } - var field reflect.Value - if info.Inline == nil { - field = out.Field(info.Num) - } else { - field = out.FieldByIndex(info.Inline) - } - d.unmarshal(n.children[i+1], field) - } else if sinfo.InlineMap != -1 { - if inlineMap.IsNil() { - inlineMap.Set(reflect.MakeMap(inlineMap.Type())) - } - value := reflect.New(elemType).Elem() - d.unmarshal(n.children[i+1], value) - d.setMapIndex(n.children[i+1], inlineMap, name, value) - } else if d.strict { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) - } - } - return true -} - -func failWantMap() { - failf("map merge requires map or sequence of maps as the value") -} - -func (d *decoder) merge(n *node, out reflect.Value) { - switch n.kind { - case mappingNode: - d.unmarshal(n, out) - case aliasNode: - if n.alias != nil && n.alias.kind != mappingNode { - failWantMap() - } - d.unmarshal(n, out) - case sequenceNode: - // Step backwards as earlier nodes take precedence. - for i := len(n.children) - 1; i >= 0; i-- { - ni := n.children[i] - if ni.kind == aliasNode { - if ni.alias != nil && ni.alias.kind != mappingNode { - failWantMap() - } - } else if ni.kind != mappingNode { - failWantMap() - } - d.unmarshal(ni, out) - } - default: - failWantMap() - } -} - -func isMerge(n *node) bool { - return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) -} diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go deleted file mode 100644 index a1c2cc5..0000000 --- a/vendor/gopkg.in/yaml.v2/emitterc.go +++ /dev/null @@ -1,1685 +0,0 @@ -package yaml - -import ( - "bytes" - "fmt" -) - -// Flush the buffer if needed. -func flush(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) { - return yaml_emitter_flush(emitter) - } - return true -} - -// Put a character to the output buffer. -func put(emitter *yaml_emitter_t, value byte) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - emitter.buffer[emitter.buffer_pos] = value - emitter.buffer_pos++ - emitter.column++ - return true -} - -// Put a line break to the output buffer. -func put_break(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - switch emitter.line_break { - case yaml_CR_BREAK: - emitter.buffer[emitter.buffer_pos] = '\r' - emitter.buffer_pos += 1 - case yaml_LN_BREAK: - emitter.buffer[emitter.buffer_pos] = '\n' - emitter.buffer_pos += 1 - case yaml_CRLN_BREAK: - emitter.buffer[emitter.buffer_pos+0] = '\r' - emitter.buffer[emitter.buffer_pos+1] = '\n' - emitter.buffer_pos += 2 - default: - panic("unknown line break setting") - } - emitter.column = 0 - emitter.line++ - return true -} - -// Copy a character from a string into buffer. -func write(emitter *yaml_emitter_t, s []byte, i *int) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - p := emitter.buffer_pos - w := width(s[*i]) - switch w { - case 4: - emitter.buffer[p+3] = s[*i+3] - fallthrough - case 3: - emitter.buffer[p+2] = s[*i+2] - fallthrough - case 2: - emitter.buffer[p+1] = s[*i+1] - fallthrough - case 1: - emitter.buffer[p+0] = s[*i+0] - default: - panic("unknown character width") - } - emitter.column++ - emitter.buffer_pos += w - *i += w - return true -} - -// Write a whole string into buffer. -func write_all(emitter *yaml_emitter_t, s []byte) bool { - for i := 0; i < len(s); { - if !write(emitter, s, &i) { - return false - } - } - return true -} - -// Copy a line break character from a string into buffer. -func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { - if s[*i] == '\n' { - if !put_break(emitter) { - return false - } - *i++ - } else { - if !write(emitter, s, i) { - return false - } - emitter.column = 0 - emitter.line++ - } - return true -} - -// Set an emitter error and return false. -func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_EMITTER_ERROR - emitter.problem = problem - return false -} - -// Emit an event. -func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.events = append(emitter.events, *event) - for !yaml_emitter_need_more_events(emitter) { - event := &emitter.events[emitter.events_head] - if !yaml_emitter_analyze_event(emitter, event) { - return false - } - if !yaml_emitter_state_machine(emitter, event) { - return false - } - yaml_event_delete(event) - emitter.events_head++ - } - return true -} - -// Check if we need to accumulate more events before emitting. -// -// We accumulate extra -// - 1 event for DOCUMENT-START -// - 2 events for SEQUENCE-START -// - 3 events for MAPPING-START -// -func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { - if emitter.events_head == len(emitter.events) { - return true - } - var accumulate int - switch emitter.events[emitter.events_head].typ { - case yaml_DOCUMENT_START_EVENT: - accumulate = 1 - break - case yaml_SEQUENCE_START_EVENT: - accumulate = 2 - break - case yaml_MAPPING_START_EVENT: - accumulate = 3 - break - default: - return false - } - if len(emitter.events)-emitter.events_head > accumulate { - return false - } - var level int - for i := emitter.events_head; i < len(emitter.events); i++ { - switch emitter.events[i].typ { - case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: - level++ - case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: - level-- - } - if level == 0 { - return false - } - } - return true -} - -// Append a directive to the directives stack. -func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { - for i := 0; i < len(emitter.tag_directives); i++ { - if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") - } - } - - // [Go] Do we actually need to copy this given garbage collection - // and the lack of deallocating destructors? - tag_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(tag_copy.handle, value.handle) - copy(tag_copy.prefix, value.prefix) - emitter.tag_directives = append(emitter.tag_directives, tag_copy) - return true -} - -// Increase the indentation level. -func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { - emitter.indents = append(emitter.indents, emitter.indent) - if emitter.indent < 0 { - if flow { - emitter.indent = emitter.best_indent - } else { - emitter.indent = 0 - } - } else if !indentless { - emitter.indent += emitter.best_indent - } - return true -} - -// State dispatcher. -func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { - switch emitter.state { - default: - case yaml_EMIT_STREAM_START_STATE: - return yaml_emitter_emit_stream_start(emitter, event) - - case yaml_EMIT_FIRST_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, true) - - case yaml_EMIT_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, false) - - case yaml_EMIT_DOCUMENT_CONTENT_STATE: - return yaml_emitter_emit_document_content(emitter, event) - - case yaml_EMIT_DOCUMENT_END_STATE: - return yaml_emitter_emit_document_end(emitter, event) - - case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, true) - - case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, false) - - case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, true) - - case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, false) - - case yaml_EMIT_END_STATE: - return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") - } - panic("invalid emitter state") -} - -// Expect STREAM-START. -func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_STREAM_START_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") - } - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = event.encoding - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = yaml_UTF8_ENCODING - } - } - if emitter.best_indent < 2 || emitter.best_indent > 9 { - emitter.best_indent = 2 - } - if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { - emitter.best_width = 80 - } - if emitter.best_width < 0 { - emitter.best_width = 1<<31 - 1 - } - if emitter.line_break == yaml_ANY_BREAK { - emitter.line_break = yaml_LN_BREAK - } - - emitter.indent = -1 - emitter.line = 0 - emitter.column = 0 - emitter.whitespace = true - emitter.indention = true - - if emitter.encoding != yaml_UTF8_ENCODING { - if !yaml_emitter_write_bom(emitter) { - return false - } - } - emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE - return true -} - -// Expect DOCUMENT-START or STREAM-END. -func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - - if event.typ == yaml_DOCUMENT_START_EVENT { - - if event.version_directive != nil { - if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { - return false - } - } - - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { - return false - } - if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { - return false - } - } - - for i := 0; i < len(default_tag_directives); i++ { - tag_directive := &default_tag_directives[i] - if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { - return false - } - } - - implicit := event.implicit - if !first || emitter.canonical { - implicit = false - } - - if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if event.version_directive != nil { - implicit = false - if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if len(event.tag_directives) > 0 { - implicit = false - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { - return false - } - if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - if yaml_emitter_check_empty_document(emitter) { - implicit = false - } - if !implicit { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { - return false - } - if emitter.canonical { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE - return true - } - - if event.typ == yaml_STREAM_END_EVENT { - if emitter.open_ended { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_END_STATE - return true - } - - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") -} - -// Expect the root node. -func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) - return yaml_emitter_emit_node(emitter, event, true, false, false, false) -} - -// Expect DOCUMENT-END. -func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_DOCUMENT_END_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !event.implicit { - // [Go] Allocate the slice elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_DOCUMENT_START_STATE - emitter.tag_directives = emitter.tag_directives[:0] - return true -} - -// Expect a flow item node. -func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a flow key node. -func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_MAPPING_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a flow value node. -func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block item node. -func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { - return false - } - } - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a block key node. -func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, false) { - return false - } - } - if event.typ == yaml_MAPPING_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block value node. -func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a node. -func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, - root bool, sequence bool, mapping bool, simple_key bool) bool { - - emitter.root_context = root - emitter.sequence_context = sequence - emitter.mapping_context = mapping - emitter.simple_key_context = simple_key - - switch event.typ { - case yaml_ALIAS_EVENT: - return yaml_emitter_emit_alias(emitter, event) - case yaml_SCALAR_EVENT: - return yaml_emitter_emit_scalar(emitter, event) - case yaml_SEQUENCE_START_EVENT: - return yaml_emitter_emit_sequence_start(emitter, event) - case yaml_MAPPING_START_EVENT: - return yaml_emitter_emit_mapping_start(emitter, event) - default: - return yaml_emitter_set_emitter_error(emitter, - fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) - } -} - -// Expect ALIAS. -func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SCALAR. -func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_select_scalar_style(emitter, event) { - return false - } - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - if !yaml_emitter_process_scalar(emitter) { - return false - } - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SEQUENCE-START. -func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || - yaml_emitter_check_empty_sequence(emitter) { - emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE - } - return true -} - -// Expect MAPPING-START. -func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || - yaml_emitter_check_empty_mapping(emitter) { - emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE - } - return true -} - -// Check if the document content is an empty scalar. -func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { - return false // [Go] Huh? -} - -// Check if the next events represent an empty sequence. -func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT -} - -// Check if the next events represent an empty mapping. -func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT -} - -// Check if the next node can be expressed as a simple key. -func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { - length := 0 - switch emitter.events[emitter.events_head].typ { - case yaml_ALIAS_EVENT: - length += len(emitter.anchor_data.anchor) - case yaml_SCALAR_EVENT: - if emitter.scalar_data.multiline { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) + - len(emitter.scalar_data.value) - case yaml_SEQUENCE_START_EVENT: - if !yaml_emitter_check_empty_sequence(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - case yaml_MAPPING_START_EVENT: - if !yaml_emitter_check_empty_mapping(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - default: - return false - } - return length <= 128 -} - -// Determine an acceptable scalar style. -func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 - if no_tag && !event.implicit && !event.quoted_implicit { - return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") - } - - style := event.scalar_style() - if style == yaml_ANY_SCALAR_STYLE { - style = yaml_PLAIN_SCALAR_STYLE - } - if emitter.canonical { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - if emitter.simple_key_context && emitter.scalar_data.multiline { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - if style == yaml_PLAIN_SCALAR_STYLE { - if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || - emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if no_tag && !event.implicit { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { - if !emitter.scalar_data.single_quoted_allowed { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { - if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - - if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { - emitter.tag_data.handle = []byte{'!'} - } - emitter.scalar_data.style = style - return true -} - -// Write an anchor. -func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { - if emitter.anchor_data.anchor == nil { - return true - } - c := []byte{'&'} - if emitter.anchor_data.alias { - c[0] = '*' - } - if !yaml_emitter_write_indicator(emitter, c, true, false, false) { - return false - } - return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) -} - -// Write a tag. -func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { - if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { - return true - } - if len(emitter.tag_data.handle) > 0 { - if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { - return false - } - if len(emitter.tag_data.suffix) > 0 { - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - } - } else { - // [Go] Allocate these slices elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { - return false - } - } - return true -} - -// Write a scalar. -func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { - switch emitter.scalar_data.style { - case yaml_PLAIN_SCALAR_STYLE: - return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_SINGLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_DOUBLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_LITERAL_SCALAR_STYLE: - return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) - - case yaml_FOLDED_SCALAR_STYLE: - return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) - } - panic("unknown scalar style") -} - -// Check if a %YAML directive is valid. -func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { - if version_directive.major != 1 || version_directive.minor != 1 { - return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") - } - return true -} - -// Check if a %TAG directive is valid. -func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { - handle := tag_directive.handle - prefix := tag_directive.prefix - if len(handle) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") - } - if handle[0] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") - } - if handle[len(handle)-1] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") - } - for i := 1; i < len(handle)-1; i += width(handle[i]) { - if !is_alpha(handle, i) { - return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") - } - } - if len(prefix) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") - } - return true -} - -// Check if an anchor is valid. -func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { - if len(anchor) == 0 { - problem := "anchor value must not be empty" - if alias { - problem = "alias value must not be empty" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - for i := 0; i < len(anchor); i += width(anchor[i]) { - if !is_alpha(anchor, i) { - problem := "anchor value must contain alphanumerical characters only" - if alias { - problem = "alias value must contain alphanumerical characters only" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - } - emitter.anchor_data.anchor = anchor - emitter.anchor_data.alias = alias - return true -} - -// Check if a tag is valid. -func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { - if len(tag) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") - } - for i := 0; i < len(emitter.tag_directives); i++ { - tag_directive := &emitter.tag_directives[i] - if bytes.HasPrefix(tag, tag_directive.prefix) { - emitter.tag_data.handle = tag_directive.handle - emitter.tag_data.suffix = tag[len(tag_directive.prefix):] - return true - } - } - emitter.tag_data.suffix = tag - return true -} - -// Check if a scalar is valid. -func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { - var ( - block_indicators = false - flow_indicators = false - line_breaks = false - special_characters = false - - leading_space = false - leading_break = false - trailing_space = false - trailing_break = false - break_space = false - space_break = false - - preceded_by_whitespace = false - followed_by_whitespace = false - previous_space = false - previous_break = false - ) - - emitter.scalar_data.value = value - - if len(value) == 0 { - emitter.scalar_data.multiline = false - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = false - return true - } - - if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { - block_indicators = true - flow_indicators = true - } - - preceded_by_whitespace = true - for i, w := 0, 0; i < len(value); i += w { - w = width(value[i]) - followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) - - if i == 0 { - switch value[i] { - case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': - flow_indicators = true - block_indicators = true - case '?', ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '-': - if followed_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } else { - switch value[i] { - case ',', '?', '[', ']', '{', '}': - flow_indicators = true - case ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '#': - if preceded_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } - - if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { - special_characters = true - } - if is_space(value, i) { - if i == 0 { - leading_space = true - } - if i+width(value[i]) == len(value) { - trailing_space = true - } - if previous_break { - break_space = true - } - previous_space = true - previous_break = false - } else if is_break(value, i) { - line_breaks = true - if i == 0 { - leading_break = true - } - if i+width(value[i]) == len(value) { - trailing_break = true - } - if previous_space { - space_break = true - } - previous_space = false - previous_break = true - } else { - previous_space = false - previous_break = false - } - - // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. - preceded_by_whitespace = is_blankz(value, i) - } - - emitter.scalar_data.multiline = line_breaks - emitter.scalar_data.flow_plain_allowed = true - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = true - - if leading_space || leading_break || trailing_space || trailing_break { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if trailing_space { - emitter.scalar_data.block_allowed = false - } - if break_space { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - } - if space_break || special_characters { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - emitter.scalar_data.block_allowed = false - } - if line_breaks { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if flow_indicators { - emitter.scalar_data.flow_plain_allowed = false - } - if block_indicators { - emitter.scalar_data.block_plain_allowed = false - } - return true -} - -// Check if the event data is valid. -func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - emitter.anchor_data.anchor = nil - emitter.tag_data.handle = nil - emitter.tag_data.suffix = nil - emitter.scalar_data.value = nil - - switch event.typ { - case yaml_ALIAS_EVENT: - if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { - return false - } - - case yaml_SCALAR_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - if !yaml_emitter_analyze_scalar(emitter, event.value) { - return false - } - - case yaml_SEQUENCE_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - - case yaml_MAPPING_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - } - return true -} - -// Write the BOM character. -func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { - if !flush(emitter) { - return false - } - pos := emitter.buffer_pos - emitter.buffer[pos+0] = '\xEF' - emitter.buffer[pos+1] = '\xBB' - emitter.buffer[pos+2] = '\xBF' - emitter.buffer_pos += 3 - return true -} - -func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { - indent := emitter.indent - if indent < 0 { - indent = 0 - } - if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { - if !put_break(emitter) { - return false - } - } - for emitter.column < indent { - if !put(emitter, ' ') { - return false - } - } - emitter.whitespace = true - emitter.indention = true - return true -} - -func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, indicator) { - return false - } - emitter.whitespace = is_whitespace - emitter.indention = (emitter.indention && is_indention) - emitter.open_ended = false - return true -} - -func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - for i := 0; i < len(value); { - var must_write bool - switch value[i] { - case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': - must_write = true - default: - must_write = is_alpha(value, i) - } - if must_write { - if !write(emitter, value, &i) { - return false - } - } else { - w := width(value[i]) - for k := 0; k < w; k++ { - octet := value[i] - i++ - if !put(emitter, '%') { - return false - } - - c := octet >> 4 - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - - c = octet & 0x0f - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - } - } - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - - emitter.whitespace = false - emitter.indention = false - if emitter.root_context { - emitter.open_ended = true - } - - return true -} - -func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { - return false - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if value[i] == '\'' { - if !put(emitter, '\'') { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - spaces := false - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { - return false - } - - for i := 0; i < len(value); { - if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || - is_bom(value, i) || is_break(value, i) || - value[i] == '"' || value[i] == '\\' { - - octet := value[i] - - var w int - var v rune - switch { - case octet&0x80 == 0x00: - w, v = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, v = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, v = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, v = 4, rune(octet&0x07) - } - for k := 1; k < w; k++ { - octet = value[i+k] - v = (v << 6) + (rune(octet) & 0x3F) - } - i += w - - if !put(emitter, '\\') { - return false - } - - var ok bool - switch v { - case 0x00: - ok = put(emitter, '0') - case 0x07: - ok = put(emitter, 'a') - case 0x08: - ok = put(emitter, 'b') - case 0x09: - ok = put(emitter, 't') - case 0x0A: - ok = put(emitter, 'n') - case 0x0b: - ok = put(emitter, 'v') - case 0x0c: - ok = put(emitter, 'f') - case 0x0d: - ok = put(emitter, 'r') - case 0x1b: - ok = put(emitter, 'e') - case 0x22: - ok = put(emitter, '"') - case 0x5c: - ok = put(emitter, '\\') - case 0x85: - ok = put(emitter, 'N') - case 0xA0: - ok = put(emitter, '_') - case 0x2028: - ok = put(emitter, 'L') - case 0x2029: - ok = put(emitter, 'P') - default: - if v <= 0xFF { - ok = put(emitter, 'x') - w = 2 - } else if v <= 0xFFFF { - ok = put(emitter, 'u') - w = 4 - } else { - ok = put(emitter, 'U') - w = 8 - } - for k := (w - 1) * 4; ok && k >= 0; k -= 4 { - digit := byte((v >> uint(k)) & 0x0F) - if digit < 10 { - ok = put(emitter, digit+'0') - } else { - ok = put(emitter, digit+'A'-10) - } - } - } - if !ok { - return false - } - spaces = false - } else if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { - if !yaml_emitter_write_indent(emitter) { - return false - } - if is_space(value, i+1) { - if !put(emitter, '\\') { - return false - } - } - i += width(value[i]) - } else if !write(emitter, value, &i) { - return false - } - spaces = true - } else { - if !write(emitter, value, &i) { - return false - } - spaces = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { - if is_space(value, 0) || is_break(value, 0) { - indent_hint := []byte{'0' + byte(emitter.best_indent)} - if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { - return false - } - } - - emitter.open_ended = false - - var chomp_hint [1]byte - if len(value) == 0 { - chomp_hint[0] = '-' - } else { - i := len(value) - 1 - for value[i]&0xC0 == 0x80 { - i-- - } - if !is_break(value, i) { - chomp_hint[0] = '-' - } else if i == 0 { - chomp_hint[0] = '+' - emitter.open_ended = true - } else { - i-- - for value[i]&0xC0 == 0x80 { - i-- - } - if is_break(value, i) { - chomp_hint[0] = '+' - emitter.open_ended = true - } - } - } - if chomp_hint[0] != 0 { - if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { - return false - } - } - return true -} - -func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - breaks := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - breaks = false - } - } - - return true -} - -func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - - breaks := true - leading_spaces := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !breaks && !leading_spaces && value[i] == '\n' { - k := 0 - for is_break(value, k) { - k += width(value[k]) - } - if !is_blankz(value, k) { - if !put_break(emitter) { - return false - } - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - leading_spaces = is_blank(value, i) - } - if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - emitter.indention = false - breaks = false - } - } - return true -} diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go deleted file mode 100644 index 0ee738e..0000000 --- a/vendor/gopkg.in/yaml.v2/encode.go +++ /dev/null @@ -1,390 +0,0 @@ -package yaml - -import ( - "encoding" - "fmt" - "io" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -// jsonNumber is the interface of the encoding/json.Number datatype. -// Repeating the interface here avoids a dependency on encoding/json, and also -// supports other libraries like jsoniter, which use a similar datatype with -// the same interface. Detecting this interface is useful when dealing with -// structures containing json.Number, which is a string under the hood. The -// encoder should prefer the use of Int64(), Float64() and string(), in that -// order, when encoding this type. -type jsonNumber interface { - Float64() (float64, error) - Int64() (int64, error) - String() string -} - -type encoder struct { - emitter yaml_emitter_t - event yaml_event_t - out []byte - flow bool - // doneInit holds whether the initial stream_start_event has been - // emitted. - doneInit bool -} - -func newEncoder() *encoder { - e := &encoder{} - yaml_emitter_initialize(&e.emitter) - yaml_emitter_set_output_string(&e.emitter, &e.out) - yaml_emitter_set_unicode(&e.emitter, true) - return e -} - -func newEncoderWithWriter(w io.Writer) *encoder { - e := &encoder{} - yaml_emitter_initialize(&e.emitter) - yaml_emitter_set_output_writer(&e.emitter, w) - yaml_emitter_set_unicode(&e.emitter, true) - return e -} - -func (e *encoder) init() { - if e.doneInit { - return - } - yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) - e.emit() - e.doneInit = true -} - -func (e *encoder) finish() { - e.emitter.open_ended = false - yaml_stream_end_event_initialize(&e.event) - e.emit() -} - -func (e *encoder) destroy() { - yaml_emitter_delete(&e.emitter) -} - -func (e *encoder) emit() { - // This will internally delete the e.event value. - e.must(yaml_emitter_emit(&e.emitter, &e.event)) -} - -func (e *encoder) must(ok bool) { - if !ok { - msg := e.emitter.problem - if msg == "" { - msg = "unknown problem generating YAML content" - } - failf("%s", msg) - } -} - -func (e *encoder) marshalDoc(tag string, in reflect.Value) { - e.init() - yaml_document_start_event_initialize(&e.event, nil, nil, true) - e.emit() - e.marshal(tag, in) - yaml_document_end_event_initialize(&e.event, true) - e.emit() -} - -func (e *encoder) marshal(tag string, in reflect.Value) { - if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { - e.nilv() - return - } - iface := in.Interface() - switch m := iface.(type) { - case jsonNumber: - integer, err := m.Int64() - if err == nil { - // In this case the json.Number is a valid int64 - in = reflect.ValueOf(integer) - break - } - float, err := m.Float64() - if err == nil { - // In this case the json.Number is a valid float64 - in = reflect.ValueOf(float) - break - } - // fallback case - no number could be obtained - in = reflect.ValueOf(m.String()) - case time.Time, *time.Time: - // Although time.Time implements TextMarshaler, - // we don't want to treat it as a string for YAML - // purposes because YAML has special support for - // timestamps. - case Marshaler: - v, err := m.MarshalYAML() - if err != nil { - fail(err) - } - if v == nil { - e.nilv() - return - } - in = reflect.ValueOf(v) - case encoding.TextMarshaler: - text, err := m.MarshalText() - if err != nil { - fail(err) - } - in = reflect.ValueOf(string(text)) - case nil: - e.nilv() - return - } - switch in.Kind() { - case reflect.Interface: - e.marshal(tag, in.Elem()) - case reflect.Map: - e.mapv(tag, in) - case reflect.Ptr: - if in.Type() == ptrTimeType { - e.timev(tag, in.Elem()) - } else { - e.marshal(tag, in.Elem()) - } - case reflect.Struct: - if in.Type() == timeType { - e.timev(tag, in) - } else { - e.structv(tag, in) - } - case reflect.Slice, reflect.Array: - if in.Type().Elem() == mapItemType { - e.itemsv(tag, in) - } else { - e.slicev(tag, in) - } - case reflect.String: - e.stringv(tag, in) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if in.Type() == durationType { - e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) - } else { - e.intv(tag, in) - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - e.uintv(tag, in) - case reflect.Float32, reflect.Float64: - e.floatv(tag, in) - case reflect.Bool: - e.boolv(tag, in) - default: - panic("cannot marshal type: " + in.Type().String()) - } -} - -func (e *encoder) mapv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - keys := keyList(in.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - e.marshal("", k) - e.marshal("", in.MapIndex(k)) - } - }) -} - -func (e *encoder) itemsv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) - for _, item := range slice { - e.marshal("", reflect.ValueOf(item.Key)) - e.marshal("", reflect.ValueOf(item.Value)) - } - }) -} - -func (e *encoder) structv(tag string, in reflect.Value) { - sinfo, err := getStructInfo(in.Type()) - if err != nil { - panic(err) - } - e.mappingv(tag, func() { - for _, info := range sinfo.FieldsList { - var value reflect.Value - if info.Inline == nil { - value = in.Field(info.Num) - } else { - value = in.FieldByIndex(info.Inline) - } - if info.OmitEmpty && isZero(value) { - continue - } - e.marshal("", reflect.ValueOf(info.Key)) - e.flow = info.Flow - e.marshal("", value) - } - if sinfo.InlineMap >= 0 { - m := in.Field(sinfo.InlineMap) - if m.Len() > 0 { - e.flow = false - keys := keyList(m.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - if _, found := sinfo.FieldsMap[k.String()]; found { - panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) - } - e.marshal("", k) - e.flow = false - e.marshal("", m.MapIndex(k)) - } - } - } - }) -} - -func (e *encoder) mappingv(tag string, f func()) { - implicit := tag == "" - style := yaml_BLOCK_MAPPING_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_MAPPING_STYLE - } - yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) - e.emit() - f() - yaml_mapping_end_event_initialize(&e.event) - e.emit() -} - -func (e *encoder) slicev(tag string, in reflect.Value) { - implicit := tag == "" - style := yaml_BLOCK_SEQUENCE_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_SEQUENCE_STYLE - } - e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) - e.emit() - n := in.Len() - for i := 0; i < n; i++ { - e.marshal("", in.Index(i)) - } - e.must(yaml_sequence_end_event_initialize(&e.event)) - e.emit() -} - -// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. -// -// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported -// in YAML 1.2 and by this package, but these should be marshalled quoted for -// the time being for compatibility with other parsers. -func isBase60Float(s string) (result bool) { - // Fast path. - if s == "" { - return false - } - c := s[0] - if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { - return false - } - // Do the full match. - return base60float.MatchString(s) -} - -// From http://yaml.org/type/float.html, except the regular expression there -// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. -var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) - -func (e *encoder) stringv(tag string, in reflect.Value) { - var style yaml_scalar_style_t - s := in.String() - canUsePlain := true - switch { - case !utf8.ValidString(s): - if tag == yaml_BINARY_TAG { - failf("explicitly tagged !!binary data must be base64-encoded") - } - if tag != "" { - failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) - } - // It can't be encoded directly as YAML so use a binary tag - // and encode it as base64. - tag = yaml_BINARY_TAG - s = encodeBase64(s) - case tag == "": - // Check to see if it would resolve to a specific - // tag when encoded unquoted. If it doesn't, - // there's no need to quote it. - rtag, _ := resolve("", s) - canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) - } - // Note: it's possible for user code to emit invalid YAML - // if they explicitly specify a tag and a string containing - // text that's incompatible with that tag. - switch { - case strings.Contains(s, "\n"): - style = yaml_LITERAL_SCALAR_STYLE - case canUsePlain: - style = yaml_PLAIN_SCALAR_STYLE - default: - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - e.emitScalar(s, "", tag, style) -} - -func (e *encoder) boolv(tag string, in reflect.Value) { - var s string - if in.Bool() { - s = "true" - } else { - s = "false" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) intv(tag string, in reflect.Value) { - s := strconv.FormatInt(in.Int(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) uintv(tag string, in reflect.Value) { - s := strconv.FormatUint(in.Uint(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) timev(tag string, in reflect.Value) { - t := in.Interface().(time.Time) - s := t.Format(time.RFC3339Nano) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) floatv(tag string, in reflect.Value) { - // Issue #352: When formatting, use the precision of the underlying value - precision := 64 - if in.Kind() == reflect.Float32 { - precision = 32 - } - - s := strconv.FormatFloat(in.Float(), 'g', -1, precision) - switch s { - case "+Inf": - s = ".inf" - case "-Inf": - s = "-.inf" - case "NaN": - s = ".nan" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) nilv() { - e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { - implicit := tag == "" - e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) - e.emit() -} diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go deleted file mode 100644 index 81d05df..0000000 --- a/vendor/gopkg.in/yaml.v2/parserc.go +++ /dev/null @@ -1,1095 +0,0 @@ -package yaml - -import ( - "bytes" -) - -// The parser implements the following grammar: -// -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// implicit_document ::= block_node DOCUMENT-END* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// block_node_or_indentless_sequence ::= -// ALIAS -// | properties (block_content | indentless_block_sequence)? -// | block_content -// | indentless_block_sequence -// block_node ::= ALIAS -// | properties block_content? -// | block_content -// flow_node ::= ALIAS -// | properties flow_content? -// | flow_content -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// block_content ::= block_collection | flow_collection | SCALAR -// flow_content ::= flow_collection | SCALAR -// block_collection ::= block_sequence | block_mapping -// flow_collection ::= flow_sequence | flow_mapping -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// block_mapping ::= BLOCK-MAPPING_START -// ((KEY block_node_or_indentless_sequence?)? -// (VALUE block_node_or_indentless_sequence?)?)* -// BLOCK-END -// flow_sequence ::= FLOW-SEQUENCE-START -// (flow_sequence_entry FLOW-ENTRY)* -// flow_sequence_entry? -// FLOW-SEQUENCE-END -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// flow_mapping ::= FLOW-MAPPING-START -// (flow_mapping_entry FLOW-ENTRY)* -// flow_mapping_entry? -// FLOW-MAPPING-END -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - -// Peek the next token in the token queue. -func peek_token(parser *yaml_parser_t) *yaml_token_t { - if parser.token_available || yaml_parser_fetch_more_tokens(parser) { - return &parser.tokens[parser.tokens_head] - } - return nil -} - -// Remove the next token from the queue (must be called after peek_token). -func skip_token(parser *yaml_parser_t) { - parser.token_available = false - parser.tokens_parsed++ - parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN - parser.tokens_head++ -} - -// Get the next event. -func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { - // Erase the event object. - *event = yaml_event_t{} - - // No events after the end of the stream or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { - return true - } - - // Generate the next event. - return yaml_parser_state_machine(parser, event) -} - -// Set parser error. -func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -// State dispatcher. -func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { - //trace("yaml_parser_state_machine", "state:", parser.state.String()) - - switch parser.state { - case yaml_PARSE_STREAM_START_STATE: - return yaml_parser_parse_stream_start(parser, event) - - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, true) - - case yaml_PARSE_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, false) - - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return yaml_parser_parse_document_content(parser, event) - - case yaml_PARSE_DOCUMENT_END_STATE: - return yaml_parser_parse_document_end(parser, event) - - case yaml_PARSE_BLOCK_NODE_STATE: - return yaml_parser_parse_node(parser, event, true, false) - - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return yaml_parser_parse_node(parser, event, true, true) - - case yaml_PARSE_FLOW_NODE_STATE: - return yaml_parser_parse_node(parser, event, false, false) - - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, true) - - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, false) - - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_indentless_sequence_entry(parser, event) - - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, true) - - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, false) - - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return yaml_parser_parse_block_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, true) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, false) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) - - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, true) - - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, true) - - default: - panic("invalid parser state") - } -} - -// Parse the production: -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// ************ -func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_STREAM_START_TOKEN { - return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) - } - parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - encoding: token.encoding, - } - skip_token(parser) - return true -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// * -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// ************************* -func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { - - token := peek_token(parser) - if token == nil { - return false - } - - // Parse extra document end indicators. - if !implicit { - for token.typ == yaml_DOCUMENT_END_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && - token.typ != yaml_TAG_DIRECTIVE_TOKEN && - token.typ != yaml_DOCUMENT_START_TOKEN && - token.typ != yaml_STREAM_END_TOKEN { - // Parse an implicit document. - if !yaml_parser_process_directives(parser, nil, nil) { - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_BLOCK_NODE_STATE - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - } else if token.typ != yaml_STREAM_END_TOKEN { - // Parse an explicit document. - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - start_mark := token.start_mark - if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { - return false - } - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_DOCUMENT_START_TOKEN { - yaml_parser_set_parser_error(parser, - "did not find expected ", token.start_mark) - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE - end_mark := token.end_mark - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: false, - } - skip_token(parser) - - } else { - // Parse the stream end. - parser.state = yaml_PARSE_END_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - } - - return true -} - -// Parse the productions: -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// *********** -// -func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || - token.typ == yaml_TAG_DIRECTIVE_TOKEN || - token.typ == yaml_DOCUMENT_START_TOKEN || - token.typ == yaml_DOCUMENT_END_TOKEN || - token.typ == yaml_STREAM_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - return yaml_parser_process_empty_scalar(parser, event, - token.start_mark) - } - return yaml_parser_parse_node(parser, event, true, false) -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// ************* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// -func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - start_mark := token.start_mark - end_mark := token.start_mark - - implicit := true - if token.typ == yaml_DOCUMENT_END_TOKEN { - end_mark = token.end_mark - skip_token(parser) - implicit = false - } - - parser.tag_directives = parser.tag_directives[:0] - - parser.state = yaml_PARSE_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - start_mark: start_mark, - end_mark: end_mark, - implicit: implicit, - } - return true -} - -// Parse the productions: -// block_node_or_indentless_sequence ::= -// ALIAS -// ***** -// | properties (block_content | indentless_block_sequence)? -// ********** * -// | block_content | indentless_block_sequence -// * -// block_node ::= ALIAS -// ***** -// | properties block_content? -// ********** * -// | block_content -// * -// flow_node ::= ALIAS -// ***** -// | properties flow_content? -// ********** * -// | flow_content -// * -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// ************************* -// block_content ::= block_collection | flow_collection | SCALAR -// ****** -// flow_content ::= flow_collection | SCALAR -// ****** -func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { - //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_ALIAS_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - *event = yaml_event_t{ - typ: yaml_ALIAS_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - anchor: token.value, - } - skip_token(parser) - return true - } - - start_mark := token.start_mark - end_mark := token.start_mark - - var tag_token bool - var tag_handle, tag_suffix, anchor []byte - var tag_mark yaml_mark_t - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - start_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } else if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - start_mark = token.start_mark - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - var tag []byte - if tag_token { - if len(tag_handle) == 0 { - tag = tag_suffix - tag_suffix = nil - } else { - for i := range parser.tag_directives { - if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { - tag = append([]byte(nil), parser.tag_directives[i].prefix...) - tag = append(tag, tag_suffix...) - break - } - } - if len(tag) == 0 { - yaml_parser_set_parser_error_context(parser, - "while parsing a node", start_mark, - "found undefined tag handle", tag_mark) - return false - } - } - } - - implicit := len(tag) == 0 - if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_SCALAR_TOKEN { - var plain_implicit, quoted_implicit bool - end_mark = token.end_mark - if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { - plain_implicit = true - } else if len(tag) == 0 { - quoted_implicit = true - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - value: token.value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(token.style), - } - skip_token(parser) - return true - } - if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { - // [Go] Some of the events below can be merged as they differ only on style. - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_FLOW_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), - } - return true - } - if len(anchor) > 0 || len(tag) > 0 { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - quoted_implicit: false, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true - } - - context := "while parsing a flow node" - if block { - context = "while parsing a block node" - } - yaml_parser_set_parser_error_context(parser, context, start_mark, - "did not find expected node content", token.start_mark) - return false -} - -// Parse the productions: -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// ******************** *********** * ********* -// -func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } else { - parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } - if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block collection", context_mark, - "did not find expected '-' indicator", token.start_mark) -} - -// Parse the productions: -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// *********** * -func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && - token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? - } - return true -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// ******************* -// ((KEY block_node_or_indentless_sequence?)? -// *** * -// (VALUE block_node_or_indentless_sequence?)?)* -// -// BLOCK-END -// ********* -// -func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_KEY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } else { - parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block mapping", context_mark, - "did not find expected key", token.start_mark) -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// -// ((KEY block_node_or_indentless_sequence?)? -// -// (VALUE block_node_or_indentless_sequence?)?)* -// ***** * -// BLOCK-END -// -// -func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence ::= FLOW-SEQUENCE-START -// ******************* -// (flow_sequence_entry FLOW-ENTRY)* -// * ********** -// flow_sequence_entry? -// * -// FLOW-SEQUENCE-END -// ***************** -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow sequence", context_mark, - "did not find expected ',' or ']'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - implicit: true, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - skip_token(parser) - return true - } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true -} - -// -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// *** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - mark := token.end_mark - skip_token(parser) - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// ***** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? - } - return true -} - -// Parse the productions: -// flow_mapping ::= FLOW-MAPPING-START -// ****************** -// (flow_mapping_entry FLOW-ENTRY)* -// * ********** -// flow_mapping_entry? -// ****************** -// FLOW-MAPPING-END -// **************** -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * *** * -// -func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow mapping", context_mark, - "did not find expected ',' or '}'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } else { - parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true -} - -// Parse the productions: -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * ***** * -// -func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { - token := peek_token(parser) - if token == nil { - return false - } - if empty { - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Generate an empty scalar event. -func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: mark, - end_mark: mark, - value: nil, // Empty - implicit: true, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true -} - -var default_tag_directives = []yaml_tag_directive_t{ - {[]byte("!"), []byte("!")}, - {[]byte("!!"), []byte("tag:yaml.org,2002:")}, -} - -// Parse directives. -func yaml_parser_process_directives(parser *yaml_parser_t, - version_directive_ref **yaml_version_directive_t, - tag_directives_ref *[]yaml_tag_directive_t) bool { - - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - - token := peek_token(parser) - if token == nil { - return false - } - - for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { - if version_directive != nil { - yaml_parser_set_parser_error(parser, - "found duplicate %YAML directive", token.start_mark) - return false - } - if token.major != 1 || token.minor != 1 { - yaml_parser_set_parser_error(parser, - "found incompatible YAML document", token.start_mark) - return false - } - version_directive = &yaml_version_directive_t{ - major: token.major, - minor: token.minor, - } - } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { - value := yaml_tag_directive_t{ - handle: token.value, - prefix: token.prefix, - } - if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { - return false - } - tag_directives = append(tag_directives, value) - } - - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - - for i := range default_tag_directives { - if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { - return false - } - } - - if version_directive_ref != nil { - *version_directive_ref = version_directive - } - if tag_directives_ref != nil { - *tag_directives_ref = tag_directives - } - return true -} - -// Append a tag directive to the directives stack. -func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { - for i := range parser.tag_directives { - if bytes.Equal(value.handle, parser.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) - } - } - - // [Go] I suspect the copy is unnecessary. This was likely done - // because there was no way to track ownership of the data. - value_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(value_copy.handle, value.handle) - copy(value_copy.prefix, value.prefix) - parser.tag_directives = append(parser.tag_directives, value_copy) - return true -} diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go deleted file mode 100644 index 7c1f5fa..0000000 --- a/vendor/gopkg.in/yaml.v2/readerc.go +++ /dev/null @@ -1,412 +0,0 @@ -package yaml - -import ( - "io" -) - -// Set the reader error and return 0. -func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { - parser.error = yaml_READER_ERROR - parser.problem = problem - parser.problem_offset = offset - parser.problem_value = value - return false -} - -// Byte order marks. -const ( - bom_UTF8 = "\xef\xbb\xbf" - bom_UTF16LE = "\xff\xfe" - bom_UTF16BE = "\xfe\xff" -) - -// Determine the input stream encoding by checking the BOM symbol. If no BOM is -// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. -func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { - // Ensure that we had enough bytes in the raw buffer. - for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { - if !yaml_parser_update_raw_buffer(parser) { - return false - } - } - - // Determine the encoding. - buf := parser.raw_buffer - pos := parser.raw_buffer_pos - avail := len(buf) - pos - if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { - parser.encoding = yaml_UTF16LE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { - parser.encoding = yaml_UTF16BE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { - parser.encoding = yaml_UTF8_ENCODING - parser.raw_buffer_pos += 3 - parser.offset += 3 - } else { - parser.encoding = yaml_UTF8_ENCODING - } - return true -} - -// Update the raw buffer. -func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { - size_read := 0 - - // Return if the raw buffer is full. - if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { - return true - } - - // Return on EOF. - if parser.eof { - return true - } - - // Move the remaining bytes in the raw buffer to the beginning. - if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { - copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) - } - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] - parser.raw_buffer_pos = 0 - - // Call the read handler to fill the buffer. - size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] - if err == io.EOF { - parser.eof = true - } else if err != nil { - return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) - } - return true -} - -// Ensure that the buffer contains at least `length` characters. -// Return true on success, false on failure. -// -// The length is supposed to be significantly less that the buffer size. -func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { - if parser.read_handler == nil { - panic("read handler must be set") - } - - // [Go] This function was changed to guarantee the requested length size at EOF. - // The fact we need to do this is pretty awful, but the description above implies - // for that to be the case, and there are tests - - // If the EOF flag is set and the raw buffer is empty, do nothing. - if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { - // [Go] ACTUALLY! Read the documentation of this function above. - // This is just broken. To return true, we need to have the - // given length in the buffer. Not doing that means every single - // check that calls this function to make sure the buffer has a - // given length is Go) panicking; or C) accessing invalid memory. - //return true - } - - // Return if the buffer contains enough characters. - if parser.unread >= length { - return true - } - - // Determine the input encoding if it is not known yet. - if parser.encoding == yaml_ANY_ENCODING { - if !yaml_parser_determine_encoding(parser) { - return false - } - } - - // Move the unread characters to the beginning of the buffer. - buffer_len := len(parser.buffer) - if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { - copy(parser.buffer, parser.buffer[parser.buffer_pos:]) - buffer_len -= parser.buffer_pos - parser.buffer_pos = 0 - } else if parser.buffer_pos == buffer_len { - buffer_len = 0 - parser.buffer_pos = 0 - } - - // Open the whole buffer for writing, and cut it before returning. - parser.buffer = parser.buffer[:cap(parser.buffer)] - - // Fill the buffer until it has enough characters. - first := true - for parser.unread < length { - - // Fill the raw buffer if necessary. - if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { - if !yaml_parser_update_raw_buffer(parser) { - parser.buffer = parser.buffer[:buffer_len] - return false - } - } - first = false - - // Decode the raw buffer. - inner: - for parser.raw_buffer_pos != len(parser.raw_buffer) { - var value rune - var width int - - raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos - - // Decode the next character. - switch parser.encoding { - case yaml_UTF8_ENCODING: - // Decode a UTF-8 character. Check RFC 3629 - // (http://www.ietf.org/rfc/rfc3629.txt) for more details. - // - // The following table (taken from the RFC) is used for - // decoding. - // - // Char. number range | UTF-8 octet sequence - // (hexadecimal) | (binary) - // --------------------+------------------------------------ - // 0000 0000-0000 007F | 0xxxxxxx - // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx - // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx - // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - // - // Additionally, the characters in the range 0xD800-0xDFFF - // are prohibited as they are reserved for use with UTF-16 - // surrogate pairs. - - // Determine the length of the UTF-8 sequence. - octet := parser.raw_buffer[parser.raw_buffer_pos] - switch { - case octet&0x80 == 0x00: - width = 1 - case octet&0xE0 == 0xC0: - width = 2 - case octet&0xF0 == 0xE0: - width = 3 - case octet&0xF8 == 0xF0: - width = 4 - default: - // The leading octet is invalid. - return yaml_parser_set_reader_error(parser, - "invalid leading UTF-8 octet", - parser.offset, int(octet)) - } - - // Check if the raw buffer contains an incomplete character. - if width > raw_unread { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-8 octet sequence", - parser.offset, -1) - } - break inner - } - - // Decode the leading octet. - switch { - case octet&0x80 == 0x00: - value = rune(octet & 0x7F) - case octet&0xE0 == 0xC0: - value = rune(octet & 0x1F) - case octet&0xF0 == 0xE0: - value = rune(octet & 0x0F) - case octet&0xF8 == 0xF0: - value = rune(octet & 0x07) - default: - value = 0 - } - - // Check and decode the trailing octets. - for k := 1; k < width; k++ { - octet = parser.raw_buffer[parser.raw_buffer_pos+k] - - // Check if the octet is valid. - if (octet & 0xC0) != 0x80 { - return yaml_parser_set_reader_error(parser, - "invalid trailing UTF-8 octet", - parser.offset+k, int(octet)) - } - - // Decode the octet. - value = (value << 6) + rune(octet&0x3F) - } - - // Check the length of the sequence against the value. - switch { - case width == 1: - case width == 2 && value >= 0x80: - case width == 3 && value >= 0x800: - case width == 4 && value >= 0x10000: - default: - return yaml_parser_set_reader_error(parser, - "invalid length of a UTF-8 sequence", - parser.offset, -1) - } - - // Check the range of the value. - if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { - return yaml_parser_set_reader_error(parser, - "invalid Unicode character", - parser.offset, int(value)) - } - - case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: - var low, high int - if parser.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - low, high = 1, 0 - } - - // The UTF-16 encoding is not as simple as one might - // naively think. Check RFC 2781 - // (http://www.ietf.org/rfc/rfc2781.txt). - // - // Normally, two subsequent bytes describe a Unicode - // character. However a special technique (called a - // surrogate pair) is used for specifying character - // values larger than 0xFFFF. - // - // A surrogate pair consists of two pseudo-characters: - // high surrogate area (0xD800-0xDBFF) - // low surrogate area (0xDC00-0xDFFF) - // - // The following formulas are used for decoding - // and encoding characters using surrogate pairs: - // - // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) - // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) - // W1 = 110110yyyyyyyyyy - // W2 = 110111xxxxxxxxxx - // - // where U is the character value, W1 is the high surrogate - // area, W2 is the low surrogate area. - - // Check for incomplete UTF-16 character. - if raw_unread < 2 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 character", - parser.offset, -1) - } - break inner - } - - // Get the character. - value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) - - // Check for unexpected low surrogate area. - if value&0xFC00 == 0xDC00 { - return yaml_parser_set_reader_error(parser, - "unexpected low surrogate area", - parser.offset, int(value)) - } - - // Check for a high surrogate area. - if value&0xFC00 == 0xD800 { - width = 4 - - // Check for incomplete surrogate pair. - if raw_unread < 4 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 surrogate pair", - parser.offset, -1) - } - break inner - } - - // Get the next character. - value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) - - // Check for a low surrogate area. - if value2&0xFC00 != 0xDC00 { - return yaml_parser_set_reader_error(parser, - "expected low surrogate area", - parser.offset+2, int(value2)) - } - - // Generate the value of the surrogate pair. - value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) - } else { - width = 2 - } - - default: - panic("impossible") - } - - // Check if the character is in the allowed range: - // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) - // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) - // | [#x10000-#x10FFFF] (32 bit) - switch { - case value == 0x09: - case value == 0x0A: - case value == 0x0D: - case value >= 0x20 && value <= 0x7E: - case value == 0x85: - case value >= 0xA0 && value <= 0xD7FF: - case value >= 0xE000 && value <= 0xFFFD: - case value >= 0x10000 && value <= 0x10FFFF: - default: - return yaml_parser_set_reader_error(parser, - "control characters are not allowed", - parser.offset, int(value)) - } - - // Move the raw pointers. - parser.raw_buffer_pos += width - parser.offset += width - - // Finally put the character into the buffer. - if value <= 0x7F { - // 0000 0000-0000 007F . 0xxxxxxx - parser.buffer[buffer_len+0] = byte(value) - buffer_len += 1 - } else if value <= 0x7FF { - // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) - parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) - buffer_len += 2 - } else if value <= 0xFFFF { - // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) - buffer_len += 3 - } else { - // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) - buffer_len += 4 - } - - parser.unread++ - } - - // On EOF, put NUL into the buffer and return. - if parser.eof { - parser.buffer[buffer_len] = 0 - buffer_len++ - parser.unread++ - break - } - } - // [Go] Read the documentation of this function above. To return true, - // we need to have the given length in the buffer. Not doing that means - // every single check that calls this function to make sure the buffer - // has a given length is Go) panicking; or C) accessing invalid memory. - // This happens here due to the EOF above breaking early. - for buffer_len < length { - parser.buffer[buffer_len] = 0 - buffer_len++ - } - parser.buffer = parser.buffer[:buffer_len] - return true -} diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go deleted file mode 100644 index 4120e0c..0000000 --- a/vendor/gopkg.in/yaml.v2/resolve.go +++ /dev/null @@ -1,258 +0,0 @@ -package yaml - -import ( - "encoding/base64" - "math" - "regexp" - "strconv" - "strings" - "time" -) - -type resolveMapItem struct { - value interface{} - tag string -} - -var resolveTable = make([]byte, 256) -var resolveMap = make(map[string]resolveMapItem) - -func init() { - t := resolveTable - t[int('+')] = 'S' // Sign - t[int('-')] = 'S' - for _, c := range "0123456789" { - t[int(c)] = 'D' // Digit - } - for _, c := range "yYnNtTfFoO~" { - t[int(c)] = 'M' // In map - } - t[int('.')] = '.' // Float (potentially in map) - - var resolveMapList = []struct { - v interface{} - tag string - l []string - }{ - {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, - {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, - {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, - {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, - {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, - {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, - {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, - {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, - {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, - {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, - {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, - {"<<", yaml_MERGE_TAG, []string{"<<"}}, - } - - m := resolveMap - for _, item := range resolveMapList { - for _, s := range item.l { - m[s] = resolveMapItem{item.v, item.tag} - } - } -} - -const longTagPrefix = "tag:yaml.org,2002:" - -func shortTag(tag string) string { - // TODO This can easily be made faster and produce less garbage. - if strings.HasPrefix(tag, longTagPrefix) { - return "!!" + tag[len(longTagPrefix):] - } - return tag -} - -func longTag(tag string) string { - if strings.HasPrefix(tag, "!!") { - return longTagPrefix + tag[2:] - } - return tag -} - -func resolvableTag(tag string) bool { - switch tag { - case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: - return true - } - return false -} - -var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) - -func resolve(tag string, in string) (rtag string, out interface{}) { - if !resolvableTag(tag) { - return tag, in - } - - defer func() { - switch tag { - case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: - return - case yaml_FLOAT_TAG: - if rtag == yaml_INT_TAG { - switch v := out.(type) { - case int64: - rtag = yaml_FLOAT_TAG - out = float64(v) - return - case int: - rtag = yaml_FLOAT_TAG - out = float64(v) - return - } - } - } - failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) - }() - - // Any data is accepted as a !!str or !!binary. - // Otherwise, the prefix is enough of a hint about what it might be. - hint := byte('N') - if in != "" { - hint = resolveTable[in[0]] - } - if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { - // Handle things we can lookup in a map. - if item, ok := resolveMap[in]; ok { - return item.tag, item.value - } - - // Base 60 floats are a bad idea, were dropped in YAML 1.2, and - // are purposefully unsupported here. They're still quoted on - // the way out for compatibility with other parser, though. - - switch hint { - case 'M': - // We've already checked the map above. - - case '.': - // Not in the map, so maybe a normal float. - floatv, err := strconv.ParseFloat(in, 64) - if err == nil { - return yaml_FLOAT_TAG, floatv - } - - case 'D', 'S': - // Int, float, or timestamp. - // Only try values as a timestamp if the value is unquoted or there's an explicit - // !!timestamp tag. - if tag == "" || tag == yaml_TIMESTAMP_TAG { - t, ok := parseTimestamp(in) - if ok { - return yaml_TIMESTAMP_TAG, t - } - } - - plain := strings.Replace(in, "_", "", -1) - intv, err := strconv.ParseInt(plain, 0, 64) - if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) - } else { - return yaml_INT_TAG, intv - } - } - uintv, err := strconv.ParseUint(plain, 0, 64) - if err == nil { - return yaml_INT_TAG, uintv - } - if yamlStyleFloat.MatchString(plain) { - floatv, err := strconv.ParseFloat(plain, 64) - if err == nil { - return yaml_FLOAT_TAG, floatv - } - } - if strings.HasPrefix(plain, "0b") { - intv, err := strconv.ParseInt(plain[2:], 2, 64) - if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) - } else { - return yaml_INT_TAG, intv - } - } - uintv, err := strconv.ParseUint(plain[2:], 2, 64) - if err == nil { - return yaml_INT_TAG, uintv - } - } else if strings.HasPrefix(plain, "-0b") { - intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) - if err == nil { - if true || intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) - } else { - return yaml_INT_TAG, intv - } - } - } - default: - panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") - } - } - return yaml_STR_TAG, in -} - -// encodeBase64 encodes s as base64 that is broken up into multiple lines -// as appropriate for the resulting length. -func encodeBase64(s string) string { - const lineLen = 70 - encLen := base64.StdEncoding.EncodedLen(len(s)) - lines := encLen/lineLen + 1 - buf := make([]byte, encLen*2+lines) - in := buf[0:encLen] - out := buf[encLen:] - base64.StdEncoding.Encode(in, []byte(s)) - k := 0 - for i := 0; i < len(in); i += lineLen { - j := i + lineLen - if j > len(in) { - j = len(in) - } - k += copy(out[k:], in[i:j]) - if lines > 1 { - out[k] = '\n' - k++ - } - } - return string(out[:k]) -} - -// This is a subset of the formats allowed by the regular expression -// defined at http://yaml.org/type/timestamp.html. -var allowedTimestampFormats = []string{ - "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. - "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". - "2006-1-2 15:4:5.999999999", // space separated with no time zone - "2006-1-2", // date only - // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" - // from the set of examples. -} - -// parseTimestamp parses s as a timestamp string and -// returns the timestamp and reports whether it succeeded. -// Timestamp formats are defined at http://yaml.org/type/timestamp.html -func parseTimestamp(s string) (time.Time, bool) { - // TODO write code to check all the formats supported by - // http://yaml.org/type/timestamp.html instead of using time.Parse. - - // Quick check: all date formats start with YYYY-. - i := 0 - for ; i < len(s); i++ { - if c := s[i]; c < '0' || c > '9' { - break - } - } - if i != 4 || i == len(s) || s[i] != '-' { - return time.Time{}, false - } - for _, format := range allowedTimestampFormats { - if t, err := time.Parse(format, s); err == nil { - return t, true - } - } - return time.Time{}, false -} diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go deleted file mode 100644 index 0b9bb60..0000000 --- a/vendor/gopkg.in/yaml.v2/scannerc.go +++ /dev/null @@ -1,2711 +0,0 @@ -package yaml - -import ( - "bytes" - "fmt" -) - -// Introduction -// ************ -// -// The following notes assume that you are familiar with the YAML specification -// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in -// some cases we are less restrictive that it requires. -// -// The process of transforming a YAML stream into a sequence of events is -// divided on two steps: Scanning and Parsing. -// -// The Scanner transforms the input stream into a sequence of tokens, while the -// parser transform the sequence of tokens produced by the Scanner into a -// sequence of parsing events. -// -// The Scanner is rather clever and complicated. The Parser, on the contrary, -// is a straightforward implementation of a recursive-descendant parser (or, -// LL(1) parser, as it is usually called). -// -// Actually there are two issues of Scanning that might be called "clever", the -// rest is quite straightforward. The issues are "block collection start" and -// "simple keys". Both issues are explained below in details. -// -// Here the Scanning step is explained and implemented. We start with the list -// of all the tokens produced by the Scanner together with short descriptions. -// -// Now, tokens: -// -// STREAM-START(encoding) # The stream start. -// STREAM-END # The stream end. -// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. -// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. -// DOCUMENT-START # '---' -// DOCUMENT-END # '...' -// BLOCK-SEQUENCE-START # Indentation increase denoting a block -// BLOCK-MAPPING-START # sequence or a block mapping. -// BLOCK-END # Indentation decrease. -// FLOW-SEQUENCE-START # '[' -// FLOW-SEQUENCE-END # ']' -// BLOCK-SEQUENCE-START # '{' -// BLOCK-SEQUENCE-END # '}' -// BLOCK-ENTRY # '-' -// FLOW-ENTRY # ',' -// KEY # '?' or nothing (simple keys). -// VALUE # ':' -// ALIAS(anchor) # '*anchor' -// ANCHOR(anchor) # '&anchor' -// TAG(handle,suffix) # '!handle!suffix' -// SCALAR(value,style) # A scalar. -// -// The following two tokens are "virtual" tokens denoting the beginning and the -// end of the stream: -// -// STREAM-START(encoding) -// STREAM-END -// -// We pass the information about the input stream encoding with the -// STREAM-START token. -// -// The next two tokens are responsible for tags: -// -// VERSION-DIRECTIVE(major,minor) -// TAG-DIRECTIVE(handle,prefix) -// -// Example: -// -// %YAML 1.1 -// %TAG ! !foo -// %TAG !yaml! tag:yaml.org,2002: -// --- -// -// The correspoding sequence of tokens: -// -// STREAM-START(utf-8) -// VERSION-DIRECTIVE(1,1) -// TAG-DIRECTIVE("!","!foo") -// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") -// DOCUMENT-START -// STREAM-END -// -// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole -// line. -// -// The document start and end indicators are represented by: -// -// DOCUMENT-START -// DOCUMENT-END -// -// Note that if a YAML stream contains an implicit document (without '---' -// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be -// produced. -// -// In the following examples, we present whole documents together with the -// produced tokens. -// -// 1. An implicit document: -// -// 'a scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// STREAM-END -// -// 2. An explicit document: -// -// --- -// 'a scalar' -// ... -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// SCALAR("a scalar",single-quoted) -// DOCUMENT-END -// STREAM-END -// -// 3. Several documents in a stream: -// -// 'a scalar' -// --- -// 'another scalar' -// --- -// 'yet another scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// DOCUMENT-START -// SCALAR("another scalar",single-quoted) -// DOCUMENT-START -// SCALAR("yet another scalar",single-quoted) -// STREAM-END -// -// We have already introduced the SCALAR token above. The following tokens are -// used to describe aliases, anchors, tag, and scalars: -// -// ALIAS(anchor) -// ANCHOR(anchor) -// TAG(handle,suffix) -// SCALAR(value,style) -// -// The following series of examples illustrate the usage of these tokens: -// -// 1. A recursive sequence: -// -// &A [ *A ] -// -// Tokens: -// -// STREAM-START(utf-8) -// ANCHOR("A") -// FLOW-SEQUENCE-START -// ALIAS("A") -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A tagged scalar: -// -// !!float "3.14" # A good approximation. -// -// Tokens: -// -// STREAM-START(utf-8) -// TAG("!!","float") -// SCALAR("3.14",double-quoted) -// STREAM-END -// -// 3. Various scalar styles: -// -// --- # Implicit empty plain scalars do not produce tokens. -// --- a plain scalar -// --- 'a single-quoted scalar' -// --- "a double-quoted scalar" -// --- |- -// a literal scalar -// --- >- -// a folded -// scalar -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// DOCUMENT-START -// SCALAR("a plain scalar",plain) -// DOCUMENT-START -// SCALAR("a single-quoted scalar",single-quoted) -// DOCUMENT-START -// SCALAR("a double-quoted scalar",double-quoted) -// DOCUMENT-START -// SCALAR("a literal scalar",literal) -// DOCUMENT-START -// SCALAR("a folded scalar",folded) -// STREAM-END -// -// Now it's time to review collection-related tokens. We will start with -// flow collections: -// -// FLOW-SEQUENCE-START -// FLOW-SEQUENCE-END -// FLOW-MAPPING-START -// FLOW-MAPPING-END -// FLOW-ENTRY -// KEY -// VALUE -// -// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and -// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' -// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the -// indicators '?' and ':', which are used for denoting mapping keys and values, -// are represented by the KEY and VALUE tokens. -// -// The following examples show flow collections: -// -// 1. A flow sequence: -// -// [item 1, item 2, item 3] -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-SEQUENCE-START -// SCALAR("item 1",plain) -// FLOW-ENTRY -// SCALAR("item 2",plain) -// FLOW-ENTRY -// SCALAR("item 3",plain) -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A flow mapping: -// -// { -// a simple key: a value, # Note that the KEY token is produced. -// ? a complex key: another value, -// } -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// FLOW-ENTRY -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// FLOW-ENTRY -// FLOW-MAPPING-END -// STREAM-END -// -// A simple key is a key which is not denoted by the '?' indicator. Note that -// the Scanner still produce the KEY token whenever it encounters a simple key. -// -// For scanning block collections, the following tokens are used (note that we -// repeat KEY and VALUE here): -// -// BLOCK-SEQUENCE-START -// BLOCK-MAPPING-START -// BLOCK-END -// BLOCK-ENTRY -// KEY -// VALUE -// -// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation -// increase that precedes a block collection (cf. the INDENT token in Python). -// The token BLOCK-END denote indentation decrease that ends a block collection -// (cf. the DEDENT token in Python). However YAML has some syntax pecularities -// that makes detections of these tokens more complex. -// -// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators -// '-', '?', and ':' correspondingly. -// -// The following examples show how the tokens BLOCK-SEQUENCE-START, -// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: -// -// 1. Block sequences: -// -// - item 1 -// - item 2 -// - -// - item 3.1 -// - item 3.2 -// - -// key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 3.1",plain) -// BLOCK-ENTRY -// SCALAR("item 3.2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Block mappings: -// -// a simple key: a value # The KEY token is produced here. -// ? a complex key -// : another value -// a mapping: -// key 1: value 1 -// key 2: value 2 -// a sequence: -// - item 1 -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// KEY -// SCALAR("a mapping",plain) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML does not always require to start a new block collection from a new -// line. If the current line contains only '-', '?', and ':' indicators, a new -// block collection may start at the current line. The following examples -// illustrate this case: -// -// 1. Collections in a sequence: -// -// - - item 1 -// - item 2 -// - key 1: value 1 -// key 2: value 2 -// - ? complex key -// : complex value -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("complex key") -// VALUE -// SCALAR("complex value") -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Collections in a mapping: -// -// ? a sequence -// : - item 1 -// - item 2 -// ? a mapping -// : key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// KEY -// SCALAR("a mapping",plain) -// VALUE -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML also permits non-indented sequences if they are included into a block -// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: -// -// key: -// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key",plain) -// VALUE -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// - -// Ensure that the buffer contains the required number of characters. -// Return true on success, false on failure (reader error or memory error). -func cache(parser *yaml_parser_t, length int) bool { - // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) - return parser.unread >= length || yaml_parser_update_buffer(parser, length) -} - -// Advance the buffer pointer. -func skip(parser *yaml_parser_t) { - parser.mark.index++ - parser.mark.column++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) -} - -func skip_line(parser *yaml_parser_t) { - if is_crlf(parser.buffer, parser.buffer_pos) { - parser.mark.index += 2 - parser.mark.column = 0 - parser.mark.line++ - parser.unread -= 2 - parser.buffer_pos += 2 - } else if is_break(parser.buffer, parser.buffer_pos) { - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) - } -} - -// Copy a character to a string buffer and advance pointers. -func read(parser *yaml_parser_t, s []byte) []byte { - w := width(parser.buffer[parser.buffer_pos]) - if w == 0 { - panic("invalid character sequence") - } - if len(s) == 0 { - s = make([]byte, 0, 32) - } - if w == 1 && len(s)+w <= cap(s) { - s = s[:len(s)+1] - s[len(s)-1] = parser.buffer[parser.buffer_pos] - parser.buffer_pos++ - } else { - s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) - parser.buffer_pos += w - } - parser.mark.index++ - parser.mark.column++ - parser.unread-- - return s -} - -// Copy a line break character to a string buffer and advance pointers. -func read_line(parser *yaml_parser_t, s []byte) []byte { - buf := parser.buffer - pos := parser.buffer_pos - switch { - case buf[pos] == '\r' && buf[pos+1] == '\n': - // CR LF . LF - s = append(s, '\n') - parser.buffer_pos += 2 - parser.mark.index++ - parser.unread-- - case buf[pos] == '\r' || buf[pos] == '\n': - // CR|LF . LF - s = append(s, '\n') - parser.buffer_pos += 1 - case buf[pos] == '\xC2' && buf[pos+1] == '\x85': - // NEL . LF - s = append(s, '\n') - parser.buffer_pos += 2 - case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): - // LS|PS . LS|PS - s = append(s, buf[parser.buffer_pos:pos+3]...) - parser.buffer_pos += 3 - default: - return s - } - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - return s -} - -// Get the next token. -func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { - // Erase the token object. - *token = yaml_token_t{} // [Go] Is this necessary? - - // No tokens after STREAM-END or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR { - return true - } - - // Ensure that the tokens queue contains enough tokens. - if !parser.token_available { - if !yaml_parser_fetch_more_tokens(parser) { - return false - } - } - - // Fetch the next token from the queue. - *token = parser.tokens[parser.tokens_head] - parser.tokens_head++ - parser.tokens_parsed++ - parser.token_available = false - - if token.typ == yaml_STREAM_END_TOKEN { - parser.stream_end_produced = true - } - return true -} - -// Set the scanner error and return false. -func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { - parser.error = yaml_SCANNER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = parser.mark - return false -} - -func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { - context := "while parsing a tag" - if directive { - context = "while parsing a %TAG directive" - } - return yaml_parser_set_scanner_error(parser, context, context_mark, problem) -} - -func trace(args ...interface{}) func() { - pargs := append([]interface{}{"+++"}, args...) - fmt.Println(pargs...) - pargs = append([]interface{}{"---"}, args...) - return func() { fmt.Println(pargs...) } -} - -// Ensure that the tokens queue contains at least one token which can be -// returned to the Parser. -func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { - // While we need more tokens to fetch, do it. - for { - if parser.tokens_head != len(parser.tokens) { - // If queue is non-empty, check if any potential simple key may - // occupy the head position. - head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] - if !ok { - break - } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { - return false - } else if !valid { - break - } - } - // Fetch the next token. - if !yaml_parser_fetch_next_token(parser) { - return false - } - } - - parser.token_available = true - return true -} - -// The dispatcher for token fetchers. -func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { - // Ensure that the buffer is initialized. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we just started scanning. Fetch STREAM-START then. - if !parser.stream_start_produced { - return yaml_parser_fetch_stream_start(parser) - } - - // Eat whitespaces and comments until we reach the next token. - if !yaml_parser_scan_to_next_token(parser) { - return false - } - - // Check the indentation level against the current column. - if !yaml_parser_unroll_indent(parser, parser.mark.column) { - return false - } - - // Ensure that the buffer contains at least 4 characters. 4 is the length - // of the longest indicators ('--- ' and '... '). - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - // Is it the end of the stream? - if is_z(parser.buffer, parser.buffer_pos) { - return yaml_parser_fetch_stream_end(parser) - } - - // Is it a directive? - if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { - return yaml_parser_fetch_directive(parser) - } - - buf := parser.buffer - pos := parser.buffer_pos - - // Is it the document start indicator? - if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) - } - - // Is it the document end indicator? - if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) - } - - // Is it the flow sequence start indicator? - if buf[pos] == '[' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) - } - - // Is it the flow mapping start indicator? - if parser.buffer[parser.buffer_pos] == '{' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) - } - - // Is it the flow sequence end indicator? - if parser.buffer[parser.buffer_pos] == ']' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_SEQUENCE_END_TOKEN) - } - - // Is it the flow mapping end indicator? - if parser.buffer[parser.buffer_pos] == '}' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_MAPPING_END_TOKEN) - } - - // Is it the flow entry indicator? - if parser.buffer[parser.buffer_pos] == ',' { - return yaml_parser_fetch_flow_entry(parser) - } - - // Is it the block entry indicator? - if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { - return yaml_parser_fetch_block_entry(parser) - } - - // Is it the key indicator? - if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_key(parser) - } - - // Is it the value indicator? - if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_value(parser) - } - - // Is it an alias? - if parser.buffer[parser.buffer_pos] == '*' { - return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) - } - - // Is it an anchor? - if parser.buffer[parser.buffer_pos] == '&' { - return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) - } - - // Is it a tag? - if parser.buffer[parser.buffer_pos] == '!' { - return yaml_parser_fetch_tag(parser) - } - - // Is it a literal scalar? - if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, true) - } - - // Is it a folded scalar? - if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, false) - } - - // Is it a single-quoted scalar? - if parser.buffer[parser.buffer_pos] == '\'' { - return yaml_parser_fetch_flow_scalar(parser, true) - } - - // Is it a double-quoted scalar? - if parser.buffer[parser.buffer_pos] == '"' { - return yaml_parser_fetch_flow_scalar(parser, false) - } - - // Is it a plain scalar? - // - // A plain scalar may start with any non-blank characters except - // - // '-', '?', ':', ',', '[', ']', '{', '}', - // '#', '&', '*', '!', '|', '>', '\'', '\"', - // '%', '@', '`'. - // - // In the block context (and, for the '-' indicator, in the flow context - // too), it may also start with the characters - // - // '-', '?', ':' - // - // if it is followed by a non-space character. - // - // The last rule is more restrictive than the specification requires. - // [Go] Make this logic more reasonable. - //switch parser.buffer[parser.buffer_pos] { - //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': - //} - if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || - parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || - parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || - (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level == 0 && - (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && - !is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_plain_scalar(parser) - } - - // If we don't determine the token type so far, it is an error. - return yaml_parser_set_scanner_error(parser, - "while scanning for the next token", parser.mark, - "found character that cannot start any token") -} - -func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { - if !simple_key.possible { - return false, true - } - - // The 1.2 specification says: - // - // "If the ? indicator is omitted, parsing needs to see past the - // implicit key to recognize it as such. To limit the amount of - // lookahead required, the “:” indicator must appear at most 1024 - // Unicode characters beyond the start of the key. In addition, the key - // is restricted to a single line." - // - if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { - // Check if the potential simple key to be removed is required. - if simple_key.required { - return false, yaml_parser_set_scanner_error(parser, - "while scanning a simple key", simple_key.mark, - "could not find expected ':'") - } - simple_key.possible = false - return false, true - } - return true, true -} - -// Check if a simple key may start at the current position and add it if -// needed. -func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { - // A simple key is required at the current position if the scanner is in - // the block context and the current column coincides with the indentation - // level. - - required := parser.flow_level == 0 && parser.indent == parser.mark.column - - // - // If the current position may start a simple key, save it. - // - if parser.simple_key_allowed { - simple_key := yaml_simple_key_t{ - possible: true, - required: required, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - mark: parser.mark, - } - - if !yaml_parser_remove_simple_key(parser) { - return false - } - parser.simple_keys[len(parser.simple_keys)-1] = simple_key - parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 - } - return true -} - -// Remove a potential simple key at the current flow level. -func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { - i := len(parser.simple_keys) - 1 - if parser.simple_keys[i].possible { - // If the key is required, it is an error. - if parser.simple_keys[i].required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", parser.simple_keys[i].mark, - "could not find expected ':'") - } - // Remove the key from the stack. - parser.simple_keys[i].possible = false - delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) - } - return true -} - -// max_flow_level limits the flow_level -const max_flow_level = 10000 - -// Increase the flow level and resize the simple key list if needed. -func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { - // Reset the simple key on the next level. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ - possible: false, - required: false, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - mark: parser.mark, - }) - - // Increase the flow level. - parser.flow_level++ - if parser.flow_level > max_flow_level { - return yaml_parser_set_scanner_error(parser, - "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, - fmt.Sprintf("exceeded max depth of %d", max_flow_level)) - } - return true -} - -// Decrease the flow level. -func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { - if parser.flow_level > 0 { - parser.flow_level-- - last := len(parser.simple_keys) - 1 - delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) - parser.simple_keys = parser.simple_keys[:last] - } - return true -} - -// max_indents limits the indents stack size -const max_indents = 10000 - -// Push the current indentation level to the stack and set the new level -// the current column is greater than the indentation level. In this case, -// append or insert the specified token into the token queue. -func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - if parser.indent < column { - // Push the current indentation level to the stack and set the new - // indentation level. - parser.indents = append(parser.indents, parser.indent) - parser.indent = column - if len(parser.indents) > max_indents { - return yaml_parser_set_scanner_error(parser, - "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, - fmt.Sprintf("exceeded max depth of %d", max_indents)) - } - - // Create a token and insert it into the queue. - token := yaml_token_t{ - typ: typ, - start_mark: mark, - end_mark: mark, - } - if number > -1 { - number -= parser.tokens_parsed - } - yaml_insert_token(parser, number, &token) - } - return true -} - -// Pop indentation levels from the indents stack until the current level -// becomes less or equal to the column. For each indentation level, append -// the BLOCK-END token. -func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - // Loop through the indentation levels in the stack. - for parser.indent > column { - // Create a token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - - // Pop the indentation level. - parser.indent = parser.indents[len(parser.indents)-1] - parser.indents = parser.indents[:len(parser.indents)-1] - } - return true -} - -// Initialize the scanner and produce the STREAM-START token. -func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { - - // Set the initial indentation. - parser.indent = -1 - - // Initialize the simple key stack. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - parser.simple_keys_by_tok = make(map[int]int) - - // A simple key is allowed at the beginning of the stream. - parser.simple_key_allowed = true - - // We have started. - parser.stream_start_produced = true - - // Create the STREAM-START token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_START_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - encoding: parser.encoding, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the STREAM-END token and shut down the scanner. -func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { - - // Force new line. - if parser.mark.column != 0 { - parser.mark.column = 0 - parser.mark.line++ - } - - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the STREAM-END token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. -func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. - token := yaml_token_t{} - if !yaml_parser_scan_directive(parser, &token) { - return false - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the DOCUMENT-START or DOCUMENT-END token. -func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Consume the token. - start_mark := parser.mark - - skip(parser) - skip(parser) - skip(parser) - - end_mark := parser.mark - - // Create the DOCUMENT-START or DOCUMENT-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. -func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // The indicators '[' and '{' may start a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // Increase the flow level. - if !yaml_parser_increase_flow_level(parser) { - return false - } - - // A simple key may follow the indicators '[' and '{'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. -func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset any potential simple key on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Decrease the flow level. - if !yaml_parser_decrease_flow_level(parser) { - return false - } - - // No simple keys after the indicators ']' and '}'. - parser.simple_key_allowed = false - - // Consume the token. - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-ENTRY token. -func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after ','. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_FLOW_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the BLOCK-ENTRY token. -func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { - // Check if the scanner is in the block context. - if parser.flow_level == 0 { - // Check if we are allowed to start a new entry. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "block sequence entries are not allowed in this context") - } - // Add the BLOCK-SEQUENCE-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { - return false - } - } else { - // It is an error for the '-' indicator to occur in the flow context, - // but we let the Parser detect and report about it because the Parser - // is able to point to the context. - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '-'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the BLOCK-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the KEY token. -func yaml_parser_fetch_key(parser *yaml_parser_t) bool { - - // In the block context, additional checks are required. - if parser.flow_level == 0 { - // Check if we are allowed to start a new key (not nessesary simple). - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping keys are not allowed in this context") - } - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '?' in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the KEY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the VALUE token. -func yaml_parser_fetch_value(parser *yaml_parser_t) bool { - - simple_key := &parser.simple_keys[len(parser.simple_keys)-1] - - // Have we found a simple key? - if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { - return false - - } else if valid { - - // Create the KEY token and insert it into the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: simple_key.mark, - end_mark: simple_key.mark, - } - yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) - - // In the block context, we may need to add the BLOCK-MAPPING-START token. - if !yaml_parser_roll_indent(parser, simple_key.mark.column, - simple_key.token_number, - yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { - return false - } - - // Remove the simple key. - simple_key.possible = false - delete(parser.simple_keys_by_tok, simple_key.token_number) - - // A simple key cannot follow another simple key. - parser.simple_key_allowed = false - - } else { - // The ':' indicator follows a complex key. - - // In the block context, extra checks are required. - if parser.flow_level == 0 { - - // Check if we are allowed to start a complex value. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping values are not allowed in this context") - } - - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Simple keys after ':' are allowed in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - } - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the VALUE token and append it to the queue. - token := yaml_token_t{ - typ: yaml_VALUE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the ALIAS or ANCHOR token. -func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // An anchor or an alias could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow an anchor or an alias. - parser.simple_key_allowed = false - - // Create the ALIAS or ANCHOR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_anchor(parser, &token, typ) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the TAG token. -func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { - // A tag could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a tag. - parser.simple_key_allowed = false - - // Create the TAG token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_tag(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. -func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { - // Remove any potential simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // A simple key may follow a block scalar. - parser.simple_key_allowed = true - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_block_scalar(parser, &token, literal) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. -func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_flow_scalar(parser, &token, single) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,plain) token. -func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_plain_scalar(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Eat whitespaces and comments until the next token is found. -func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { - - // Until the next token is not found. - for { - // Allow the BOM mark to start a line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { - skip(parser) - } - - // Eat whitespaces. - // Tabs are allowed: - // - in the flow context - // - in the block context, but not at the beginning of the line or - // after '-', '?', or ':' (complex value). - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Eat a comment until a line break. - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // If it is a line break, eat it. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - - // In the block context, a new line may start a simple key. - if parser.flow_level == 0 { - parser.simple_key_allowed = true - } - } else { - break // We have found a token. - } - } - - return true -} - -// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { - // Eat '%'. - start_mark := parser.mark - skip(parser) - - // Scan the directive name. - var name []byte - if !yaml_parser_scan_directive_name(parser, start_mark, &name) { - return false - } - - // Is it a YAML directive? - if bytes.Equal(name, []byte("YAML")) { - // Scan the VERSION directive value. - var major, minor int8 - if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { - return false - } - end_mark := parser.mark - - // Create a VERSION-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_VERSION_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - major: major, - minor: minor, - } - - // Is it a TAG directive? - } else if bytes.Equal(name, []byte("TAG")) { - // Scan the TAG directive value. - var handle, prefix []byte - if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { - return false - } - end_mark := parser.mark - - // Create a TAG-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_TAG_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - prefix: prefix, - } - - // Unknown directive. - } else { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unknown directive name") - return false - } - - // Eat the rest of the line including any comments. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - return true -} - -// Scan the directive name. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^ -// -func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { - // Consume the directive name. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - var s []byte - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the name is empty. - if len(s) == 0 { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "could not find expected directive name") - return false - } - - // Check for an blank character after the name. - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unexpected non-alphabetical character") - return false - } - *name = s - return true -} - -// Scan the value of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^ -func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the major version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { - return false - } - - // Eat '.'. - if parser.buffer[parser.buffer_pos] != '.' { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected digit or '.' character") - } - - skip(parser) - - // Consume the minor version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { - return false - } - return true -} - -const max_number_length = 2 - -// Scan the version number of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^ -// %YAML 1.1 # a comment \n -// ^ -func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { - - // Repeat while the next character is digit. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var value, length int8 - for is_digit(parser.buffer, parser.buffer_pos) { - // Check if the number is too long. - length++ - if length > max_number_length { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "found extremely long version number") - } - value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the number was present. - if length == 0 { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected version number") - } - *number = value - return true -} - -// Scan the value of a TAG-DIRECTIVE token. -// -// Scope: -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { - var handle_value, prefix_value []byte - - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a handle. - if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { - return false - } - - // Expect a whitespace. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blank(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace") - return false - } - - // Eat whitespaces. - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a prefix. - if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { - return false - } - - // Expect a whitespace or line break. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace or line break") - return false - } - - *handle = handle_value - *prefix = prefix_value - return true -} - -func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { - var s []byte - - // Eat the indicator character. - start_mark := parser.mark - skip(parser) - - // Consume the value. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - end_mark := parser.mark - - /* - * Check if length of the anchor is greater than 0 and it is followed by - * a whitespace character or one of the indicators: - * - * '?', ':', ',', ']', '}', '%', '@', '`'. - */ - - if len(s) == 0 || - !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || - parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '`') { - context := "while scanning an alias" - if typ == yaml_ANCHOR_TOKEN { - context = "while scanning an anchor" - } - yaml_parser_set_scanner_error(parser, context, start_mark, - "did not find expected alphabetic or numeric character") - return false - } - - // Create a token. - *token = yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - value: s, - } - - return true -} - -/* - * Scan a TAG token. - */ - -func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { - var handle, suffix []byte - - start_mark := parser.mark - - // Check if the tag is in the canonical form. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - if parser.buffer[parser.buffer_pos+1] == '<' { - // Keep the handle as '' - - // Eat '!<' - skip(parser) - skip(parser) - - // Consume the tag value. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - - // Check for '>' and eat it. - if parser.buffer[parser.buffer_pos] != '>' { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find the expected '>'") - return false - } - - skip(parser) - } else { - // The tag has either the '!suffix' or the '!handle!suffix' form. - - // First, try to scan a handle. - if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { - return false - } - - // Check if it is, indeed, handle. - if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { - // Scan the suffix now. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - } else { - // It wasn't a handle after all. Scan the rest of the tag. - if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { - return false - } - - // Set the handle to '!'. - handle = []byte{'!'} - - // A special case: the '!' tag. Set the handle to '' and the - // suffix to '!'. - if len(suffix) == 0 { - handle, suffix = suffix, handle - } - } - } - - // Check the character which ends the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find expected whitespace or line break") - return false - } - - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_TAG_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - suffix: suffix, - } - return true -} - -// Scan a tag handle. -func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { - // Check the initial '!' character. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] != '!' { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - - var s []byte - - // Copy the '!' character. - s = read(parser, s) - - // Copy all subsequent alphabetical and numerical characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the trailing character is '!' and copy it. - if parser.buffer[parser.buffer_pos] == '!' { - s = read(parser, s) - } else { - // It's either the '!' tag or not really a tag handle. If it's a %TAG - // directive, it's an error. If it's a tag token, it must be a part of URI. - if directive && string(s) != "!" { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - } - - *handle = s - return true -} - -// Scan a tag. -func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { - //size_t length = head ? strlen((char *)head) : 0 - var s []byte - hasTag := len(head) > 0 - - // Copy the head if needed. - // - // Note that we don't copy the leading '!' character. - if len(head) > 1 { - s = append(s, head[1:]...) - } - - // Scan the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // The set of characters that may appear in URI is as follows: - // - // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', - // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', - // '%'. - // [Go] Convert this into more reasonable logic. - for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || - parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || - parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || - parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || - parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || - parser.buffer[parser.buffer_pos] == '%' { - // Check if it is a URI-escape sequence. - if parser.buffer[parser.buffer_pos] == '%' { - if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { - return false - } - } else { - s = read(parser, s) - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - hasTag = true - } - - if !hasTag { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected tag URI") - return false - } - *uri = s - return true -} - -// Decode an URI-escape sequence corresponding to a single UTF-8 character. -func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { - - // Decode the required number of characters. - w := 1024 - for w > 0 { - // Check for a URI-escaped octet. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - - if !(parser.buffer[parser.buffer_pos] == '%' && - is_hex(parser.buffer, parser.buffer_pos+1) && - is_hex(parser.buffer, parser.buffer_pos+2)) { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find URI escaped octet") - } - - // Get the octet. - octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) - - // If it is the leading octet, determine the length of the UTF-8 sequence. - if w == 1024 { - w = width(octet) - if w == 0 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect leading UTF-8 octet") - } - } else { - // Check if the trailing octet is correct. - if octet&0xC0 != 0x80 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect trailing UTF-8 octet") - } - } - - // Copy the octet and move the pointers. - *s = append(*s, octet) - skip(parser) - skip(parser) - skip(parser) - w-- - } - return true -} - -// Scan a block scalar. -func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { - // Eat the indicator '|' or '>'. - start_mark := parser.mark - skip(parser) - - // Scan the additional block scalar indicators. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check for a chomping indicator. - var chomping, increment int - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - // Set the chomping method and eat the indicator. - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - - // Check for an indentation indicator. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if is_digit(parser.buffer, parser.buffer_pos) { - // Check that the indentation is greater than 0. - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - - // Get the indentation level and eat the indicator. - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - } - - } else if is_digit(parser.buffer, parser.buffer_pos) { - // Do the same as above, but in the opposite order. - - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - } - } - - // Eat whitespaces and comments to the end of the line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - end_mark := parser.mark - - // Set the indentation level if it was specified. - var indent int - if increment > 0 { - if parser.indent >= 0 { - indent = parser.indent + increment - } else { - indent = increment - } - } - - // Scan the leading line breaks and determine the indentation level if needed. - var s, leading_break, trailing_breaks []byte - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - - // Scan the block scalar content. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var leading_blank, trailing_blank bool - for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { - // We are at the beginning of a non-empty line. - - // Is it a trailing whitespace? - trailing_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Check if we need to fold the leading line break. - if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { - // Do we need to join the lines by space? - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } - } else { - s = append(s, leading_break...) - } - leading_break = leading_break[:0] - - // Append the remaining line breaks. - s = append(s, trailing_breaks...) - trailing_breaks = trailing_breaks[:0] - - // Is it a leading whitespace? - leading_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Consume the current line. - for !is_breakz(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - leading_break = read_line(parser, leading_break) - - // Eat the following indentation spaces and line breaks. - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - } - - // Chomp the tail. - if chomping != -1 { - s = append(s, leading_break...) - } - if chomping == 1 { - s = append(s, trailing_breaks...) - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_LITERAL_SCALAR_STYLE, - } - if !literal { - token.style = yaml_FOLDED_SCALAR_STYLE - } - return true -} - -// Scan indentation spaces and line breaks for a block scalar. Determine the -// indentation level if needed. -func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { - *end_mark = parser.mark - - // Eat the indentation spaces and line breaks. - max_indent := 0 - for { - // Eat the indentation spaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.mark.column > max_indent { - max_indent = parser.mark.column - } - - // Check for a tab character messing the indentation. - if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { - return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found a tab character where an indentation space is expected") - } - - // Have we found a non-empty line? - if !is_break(parser.buffer, parser.buffer_pos) { - break - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - // [Go] Should really be returning breaks instead. - *breaks = read_line(parser, *breaks) - *end_mark = parser.mark - } - - // Determine the indentation level if needed. - if *indent == 0 { - *indent = max_indent - if *indent < parser.indent+1 { - *indent = parser.indent + 1 - } - if *indent < 1 { - *indent = 1 - } - } - return true -} - -// Scan a quoted scalar. -func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { - // Eat the left quote. - start_mark := parser.mark - skip(parser) - - // Consume the content of the quoted scalar. - var s, leading_break, trailing_breaks, whitespaces []byte - for { - // Check that there are no document indicators at the beginning of the line. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected document indicator") - return false - } - - // Check for EOF. - if is_z(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected end of stream") - return false - } - - // Consume non-blank characters. - leading_blanks := false - for !is_blankz(parser.buffer, parser.buffer_pos) { - if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { - // Is is an escaped single quote. - s = append(s, '\'') - skip(parser) - skip(parser) - - } else if single && parser.buffer[parser.buffer_pos] == '\'' { - // It is a right single quote. - break - } else if !single && parser.buffer[parser.buffer_pos] == '"' { - // It is a right double quote. - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { - // It is an escaped line break. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - skip(parser) - skip_line(parser) - leading_blanks = true - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' { - // It is an escape sequence. - code_length := 0 - - // Check the escape character. - switch parser.buffer[parser.buffer_pos+1] { - case '0': - s = append(s, 0) - case 'a': - s = append(s, '\x07') - case 'b': - s = append(s, '\x08') - case 't', '\t': - s = append(s, '\x09') - case 'n': - s = append(s, '\x0A') - case 'v': - s = append(s, '\x0B') - case 'f': - s = append(s, '\x0C') - case 'r': - s = append(s, '\x0D') - case 'e': - s = append(s, '\x1B') - case ' ': - s = append(s, '\x20') - case '"': - s = append(s, '"') - case '\'': - s = append(s, '\'') - case '\\': - s = append(s, '\\') - case 'N': // NEL (#x85) - s = append(s, '\xC2') - s = append(s, '\x85') - case '_': // #xA0 - s = append(s, '\xC2') - s = append(s, '\xA0') - case 'L': // LS (#x2028) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA8') - case 'P': // PS (#x2029) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA9') - case 'x': - code_length = 2 - case 'u': - code_length = 4 - case 'U': - code_length = 8 - default: - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found unknown escape character") - return false - } - - skip(parser) - skip(parser) - - // Consume an arbitrary escape code. - if code_length > 0 { - var value int - - // Scan the character value. - if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { - return false - } - for k := 0; k < code_length; k++ { - if !is_hex(parser.buffer, parser.buffer_pos+k) { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "did not find expected hexdecimal number") - return false - } - value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) - } - - // Check the value and write the character. - if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found invalid Unicode character escape code") - return false - } - if value <= 0x7F { - s = append(s, byte(value)) - } else if value <= 0x7FF { - s = append(s, byte(0xC0+(value>>6))) - s = append(s, byte(0x80+(value&0x3F))) - } else if value <= 0xFFFF { - s = append(s, byte(0xE0+(value>>12))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } else { - s = append(s, byte(0xF0+(value>>18))) - s = append(s, byte(0x80+((value>>12)&0x3F))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } - - // Advance the pointer. - for k := 0; k < code_length; k++ { - skip(parser) - } - } - } else { - // It is a non-escaped non-blank character. - s = read(parser, s) - } - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we are at the end of the scalar. - if single { - if parser.buffer[parser.buffer_pos] == '\'' { - break - } - } else { - if parser.buffer[parser.buffer_pos] == '"' { - break - } - } - - // Consume blank characters. - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Join the whitespaces or fold line breaks. - if leading_blanks { - // Do we need to fold line breaks? - if len(leading_break) > 0 && leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Eat the right quote. - skip(parser) - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_SINGLE_QUOTED_SCALAR_STYLE, - } - if !single { - token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - return true -} - -// Scan a plain scalar. -func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { - - var s, leading_break, trailing_breaks, whitespaces []byte - var leading_blanks bool - var indent = parser.indent + 1 - - start_mark := parser.mark - end_mark := parser.mark - - // Consume the content of the plain scalar. - for { - // Check for a document indicator. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - break - } - - // Check for a comment. - if parser.buffer[parser.buffer_pos] == '#' { - break - } - - // Consume non-blank characters. - for !is_blankz(parser.buffer, parser.buffer_pos) { - - // Check for indicators that may end a plain scalar. - if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level > 0 && - (parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}')) { - break - } - - // Check if we need to join whitespaces and breaks. - if leading_blanks || len(whitespaces) > 0 { - if leading_blanks { - // Do we need to fold line breaks? - if leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - leading_blanks = false - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Copy the character. - s = read(parser, s) - - end_mark = parser.mark - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - // Is it the end? - if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { - break - } - - // Consume blank characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - - // Check for tab characters that abuse indentation. - if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found a tab character that violates indentation") - return false - } - - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check indentation level. - if parser.flow_level == 0 && parser.mark.column < indent { - break - } - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_PLAIN_SCALAR_STYLE, - } - - // Note that we change the 'simple_key_allowed' flag. - if leading_blanks { - parser.simple_key_allowed = true - } - return true -} diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go deleted file mode 100644 index 4c45e66..0000000 --- a/vendor/gopkg.in/yaml.v2/sorter.go +++ /dev/null @@ -1,113 +0,0 @@ -package yaml - -import ( - "reflect" - "unicode" -) - -type keyList []reflect.Value - -func (l keyList) Len() int { return len(l) } -func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } -func (l keyList) Less(i, j int) bool { - a := l[i] - b := l[j] - ak := a.Kind() - bk := b.Kind() - for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { - a = a.Elem() - ak = a.Kind() - } - for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { - b = b.Elem() - bk = b.Kind() - } - af, aok := keyFloat(a) - bf, bok := keyFloat(b) - if aok && bok { - if af != bf { - return af < bf - } - if ak != bk { - return ak < bk - } - return numLess(a, b) - } - if ak != reflect.String || bk != reflect.String { - return ak < bk - } - ar, br := []rune(a.String()), []rune(b.String()) - for i := 0; i < len(ar) && i < len(br); i++ { - if ar[i] == br[i] { - continue - } - al := unicode.IsLetter(ar[i]) - bl := unicode.IsLetter(br[i]) - if al && bl { - return ar[i] < br[i] - } - if al || bl { - return bl - } - var ai, bi int - var an, bn int64 - if ar[i] == '0' || br[i] == '0' { - for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { - if ar[j] != '0' { - an = 1 - bn = 1 - break - } - } - } - for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { - an = an*10 + int64(ar[ai]-'0') - } - for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { - bn = bn*10 + int64(br[bi]-'0') - } - if an != bn { - return an < bn - } - if ai != bi { - return ai < bi - } - return ar[i] < br[i] - } - return len(ar) < len(br) -} - -// keyFloat returns a float value for v if it is a number/bool -// and whether it is a number/bool or not. -func keyFloat(v reflect.Value) (f float64, ok bool) { - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return float64(v.Int()), true - case reflect.Float32, reflect.Float64: - return v.Float(), true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return float64(v.Uint()), true - case reflect.Bool: - if v.Bool() { - return 1, true - } - return 0, true - } - return 0, false -} - -// numLess returns whether a < b. -// a and b must necessarily have the same kind. -func numLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return a.Int() < b.Int() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Bool: - return !a.Bool() && b.Bool() - } - panic("not a number") -} diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go deleted file mode 100644 index a2dde60..0000000 --- a/vendor/gopkg.in/yaml.v2/writerc.go +++ /dev/null @@ -1,26 +0,0 @@ -package yaml - -// Set the writer error and return false. -func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_WRITER_ERROR - emitter.problem = problem - return false -} - -// Flush the output buffer. -func yaml_emitter_flush(emitter *yaml_emitter_t) bool { - if emitter.write_handler == nil { - panic("write handler not set") - } - - // Check if the buffer is empty. - if emitter.buffer_pos == 0 { - return true - } - - if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - return true -} diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go deleted file mode 100644 index 3081388..0000000 --- a/vendor/gopkg.in/yaml.v2/yaml.go +++ /dev/null @@ -1,478 +0,0 @@ -// Package yaml implements YAML support for the Go language. -// -// Source code and other details for the project are available at GitHub: -// -// https://github.com/go-yaml/yaml -// -package yaml - -import ( - "errors" - "fmt" - "io" - "reflect" - "strings" - "sync" -) - -// MapSlice encodes and decodes as a YAML map. -// The order of keys is preserved when encoding and decoding. -type MapSlice []MapItem - -// MapItem is an item in a MapSlice. -type MapItem struct { - Key, Value interface{} -} - -// The Unmarshaler interface may be implemented by types to customize their -// behavior when being unmarshaled from a YAML document. The UnmarshalYAML -// method receives a function that may be called to unmarshal the original -// YAML value into a field or variable. It is safe to call the unmarshal -// function parameter more than once if necessary. -type Unmarshaler interface { - UnmarshalYAML(unmarshal func(interface{}) error) error -} - -// The Marshaler interface may be implemented by types to customize their -// behavior when being marshaled into a YAML document. The returned value -// is marshaled in place of the original value implementing Marshaler. -// -// If an error is returned by MarshalYAML, the marshaling procedure stops -// and returns with the provided error. -type Marshaler interface { - MarshalYAML() (interface{}, error) -} - -// Unmarshal decodes the first document found within the in byte slice -// and assigns decoded values into the out value. -// -// Maps and pointers (to a struct, string, int, etc) are accepted as out -// values. If an internal pointer within a struct is not initialized, -// the yaml package will initialize it if necessary for unmarshalling -// the provided data. The out parameter must not be nil. -// -// The type of the decoded values should be compatible with the respective -// values in out. If one or more values cannot be decoded due to a type -// mismatches, decoding continues partially until the end of the YAML -// content, and a *yaml.TypeError is returned with details for all -// missed values. -// -// Struct fields are only unmarshalled if they are exported (have an -// upper case first letter), and are unmarshalled using the field name -// lowercased as the default key. Custom keys may be defined via the -// "yaml" name in the field tag: the content preceding the first comma -// is used as the key, and the following comma-separated options are -// used to tweak the marshalling process (see Marshal). -// Conflicting names result in a runtime error. -// -// For example: -// -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// var t T -// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) -// -// See the documentation of Marshal for the format of tags and a list of -// supported tag options. -// -func Unmarshal(in []byte, out interface{}) (err error) { - return unmarshal(in, out, false) -} - -// UnmarshalStrict is like Unmarshal except that any fields that are found -// in the data that do not have corresponding struct members, or mapping -// keys that are duplicates, will result in -// an error. -func UnmarshalStrict(in []byte, out interface{}) (err error) { - return unmarshal(in, out, true) -} - -// A Decoder reads and decodes YAML values from an input stream. -type Decoder struct { - strict bool - parser *parser -} - -// NewDecoder returns a new decoder that reads from r. -// -// The decoder introduces its own buffering and may read -// data from r beyond the YAML values requested. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{ - parser: newParserFromReader(r), - } -} - -// SetStrict sets whether strict decoding behaviour is enabled when -// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. -func (dec *Decoder) SetStrict(strict bool) { - dec.strict = strict -} - -// Decode reads the next YAML-encoded value from its input -// and stores it in the value pointed to by v. -// -// See the documentation for Unmarshal for details about the -// conversion of YAML into a Go value. -func (dec *Decoder) Decode(v interface{}) (err error) { - d := newDecoder(dec.strict) - defer handleErr(&err) - node := dec.parser.parse() - if node == nil { - return io.EOF - } - out := reflect.ValueOf(v) - if out.Kind() == reflect.Ptr && !out.IsNil() { - out = out.Elem() - } - d.unmarshal(node, out) - if len(d.terrors) > 0 { - return &TypeError{d.terrors} - } - return nil -} - -func unmarshal(in []byte, out interface{}, strict bool) (err error) { - defer handleErr(&err) - d := newDecoder(strict) - p := newParser(in) - defer p.destroy() - node := p.parse() - if node != nil { - v := reflect.ValueOf(out) - if v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - d.unmarshal(node, v) - } - if len(d.terrors) > 0 { - return &TypeError{d.terrors} - } - return nil -} - -// Marshal serializes the value provided into a YAML document. The structure -// of the generated document will reflect the structure of the value itself. -// Maps and pointers (to struct, string, int, etc) are accepted as the in value. -// -// Struct fields are only marshalled if they are exported (have an upper case -// first letter), and are marshalled using the field name lowercased as the -// default key. Custom keys may be defined via the "yaml" name in the field -// tag: the content preceding the first comma is used as the key, and the -// following comma-separated options are used to tweak the marshalling process. -// Conflicting names result in a runtime error. -// -// The field tag format accepted is: -// -// `(...) yaml:"[][,[,]]" (...)` -// -// The following flags are currently supported: -// -// omitempty Only include the field if it's not set to the zero -// value for the type or to empty slices or maps. -// Zero valued structs will be omitted if all their public -// fields are zero, unless they implement an IsZero -// method (see the IsZeroer interface type), in which -// case the field will be excluded if IsZero returns true. -// -// flow Marshal using a flow style (useful for structs, -// sequences and maps). -// -// inline Inline the field, which must be a struct or a map, -// causing all of its fields or keys to be processed as if -// they were part of the outer struct. For maps, keys must -// not conflict with the yaml keys of other struct fields. -// -// In addition, if the key is "-", the field is ignored. -// -// For example: -// -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" -// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" -// -func Marshal(in interface{}) (out []byte, err error) { - defer handleErr(&err) - e := newEncoder() - defer e.destroy() - e.marshalDoc("", reflect.ValueOf(in)) - e.finish() - out = e.out - return -} - -// An Encoder writes YAML values to an output stream. -type Encoder struct { - encoder *encoder -} - -// NewEncoder returns a new encoder that writes to w. -// The Encoder should be closed after use to flush all data -// to w. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - encoder: newEncoderWithWriter(w), - } -} - -// Encode writes the YAML encoding of v to the stream. -// If multiple items are encoded to the stream, the -// second and subsequent document will be preceded -// with a "---" document separator, but the first will not. -// -// See the documentation for Marshal for details about the conversion of Go -// values to YAML. -func (e *Encoder) Encode(v interface{}) (err error) { - defer handleErr(&err) - e.encoder.marshalDoc("", reflect.ValueOf(v)) - return nil -} - -// Close closes the encoder by writing any remaining data. -// It does not write a stream terminating string "...". -func (e *Encoder) Close() (err error) { - defer handleErr(&err) - e.encoder.finish() - return nil -} - -func handleErr(err *error) { - if v := recover(); v != nil { - if e, ok := v.(yamlError); ok { - *err = e.err - } else { - panic(v) - } - } -} - -type yamlError struct { - err error -} - -func fail(err error) { - panic(yamlError{err}) -} - -func failf(format string, args ...interface{}) { - panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) -} - -// A TypeError is returned by Unmarshal when one or more fields in -// the YAML document cannot be properly decoded into the requested -// types. When this error is returned, the value is still -// unmarshaled partially. -type TypeError struct { - Errors []string -} - -func (e *TypeError) Error() string { - return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) -} - -// -------------------------------------------------------------------------- -// Maintain a mapping of keys to structure field indexes - -// The code in this section was copied from mgo/bson. - -// structInfo holds details for the serialization of fields of -// a given struct. -type structInfo struct { - FieldsMap map[string]fieldInfo - FieldsList []fieldInfo - - // InlineMap is the number of the field in the struct that - // contains an ,inline map, or -1 if there's none. - InlineMap int -} - -type fieldInfo struct { - Key string - Num int - OmitEmpty bool - Flow bool - // Id holds the unique field identifier, so we can cheaply - // check for field duplicates without maintaining an extra map. - Id int - - // Inline holds the field index if the field is part of an inlined struct. - Inline []int -} - -var structMap = make(map[reflect.Type]*structInfo) -var fieldMapMutex sync.RWMutex - -func getStructInfo(st reflect.Type) (*structInfo, error) { - fieldMapMutex.RLock() - sinfo, found := structMap[st] - fieldMapMutex.RUnlock() - if found { - return sinfo, nil - } - - n := st.NumField() - fieldsMap := make(map[string]fieldInfo) - fieldsList := make([]fieldInfo, 0, n) - inlineMap := -1 - for i := 0; i != n; i++ { - field := st.Field(i) - if field.PkgPath != "" && !field.Anonymous { - continue // Private field - } - - info := fieldInfo{Num: i} - - tag := field.Tag.Get("yaml") - if tag == "" && strings.Index(string(field.Tag), ":") < 0 { - tag = string(field.Tag) - } - if tag == "-" { - continue - } - - inline := false - fields := strings.Split(tag, ",") - if len(fields) > 1 { - for _, flag := range fields[1:] { - switch flag { - case "omitempty": - info.OmitEmpty = true - case "flow": - info.Flow = true - case "inline": - inline = true - default: - return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) - } - } - tag = fields[0] - } - - if inline { - switch field.Type.Kind() { - case reflect.Map: - if inlineMap >= 0 { - return nil, errors.New("Multiple ,inline maps in struct " + st.String()) - } - if field.Type.Key() != reflect.TypeOf("") { - return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) - } - inlineMap = info.Num - case reflect.Struct: - sinfo, err := getStructInfo(field.Type) - if err != nil { - return nil, err - } - for _, finfo := range sinfo.FieldsList { - if _, found := fieldsMap[finfo.Key]; found { - msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - if finfo.Inline == nil { - finfo.Inline = []int{i, finfo.Num} - } else { - finfo.Inline = append([]int{i}, finfo.Inline...) - } - finfo.Id = len(fieldsList) - fieldsMap[finfo.Key] = finfo - fieldsList = append(fieldsList, finfo) - } - default: - //return nil, errors.New("Option ,inline needs a struct value or map field") - return nil, errors.New("Option ,inline needs a struct value field") - } - continue - } - - if tag != "" { - info.Key = tag - } else { - info.Key = strings.ToLower(field.Name) - } - - if _, found = fieldsMap[info.Key]; found { - msg := "Duplicated key '" + info.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - - info.Id = len(fieldsList) - fieldsList = append(fieldsList, info) - fieldsMap[info.Key] = info - } - - sinfo = &structInfo{ - FieldsMap: fieldsMap, - FieldsList: fieldsList, - InlineMap: inlineMap, - } - - fieldMapMutex.Lock() - structMap[st] = sinfo - fieldMapMutex.Unlock() - return sinfo, nil -} - -// IsZeroer is used to check whether an object is zero to -// determine whether it should be omitted when marshaling -// with the omitempty flag. One notable implementation -// is time.Time. -type IsZeroer interface { - IsZero() bool -} - -func isZero(v reflect.Value) bool { - kind := v.Kind() - if z, ok := v.Interface().(IsZeroer); ok { - if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { - return true - } - return z.IsZero() - } - switch kind { - case reflect.String: - return len(v.String()) == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflect.Slice: - return v.Len() == 0 - case reflect.Map: - return v.Len() == 0 - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Struct: - vt := v.Type() - for i := v.NumField() - 1; i >= 0; i-- { - if vt.Field(i).PkgPath != "" { - continue // Private field - } - if !isZero(v.Field(i)) { - return false - } - } - return true - } - return false -} - -// FutureLineWrap globally disables line wrapping when encoding long strings. -// This is a temporary and thus deprecated method introduced to faciliate -// migration towards v3, which offers more control of line lengths on -// individual encodings, and has a default matching the behavior introduced -// by this function. -// -// The default formatting of v2 was erroneously changed in v2.3.0 and reverted -// in v2.4.0, at which point this function was introduced to help migration. -func FutureLineWrap() { - disableLineWrapping = true -} diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go deleted file mode 100644 index f6a9c8e..0000000 --- a/vendor/gopkg.in/yaml.v2/yamlh.go +++ /dev/null @@ -1,739 +0,0 @@ -package yaml - -import ( - "fmt" - "io" -) - -// The version directive data. -type yaml_version_directive_t struct { - major int8 // The major version number. - minor int8 // The minor version number. -} - -// The tag directive data. -type yaml_tag_directive_t struct { - handle []byte // The tag handle. - prefix []byte // The tag prefix. -} - -type yaml_encoding_t int - -// The stream encoding. -const ( - // Let the parser choose the encoding. - yaml_ANY_ENCODING yaml_encoding_t = iota - - yaml_UTF8_ENCODING // The default UTF-8 encoding. - yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. - yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. -) - -type yaml_break_t int - -// Line break types. -const ( - // Let the parser choose the break type. - yaml_ANY_BREAK yaml_break_t = iota - - yaml_CR_BREAK // Use CR for line breaks (Mac style). - yaml_LN_BREAK // Use LN for line breaks (Unix style). - yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). -) - -type yaml_error_type_t int - -// Many bad things could happen with the parser and emitter. -const ( - // No error is produced. - yaml_NO_ERROR yaml_error_type_t = iota - - yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. - yaml_READER_ERROR // Cannot read or decode the input stream. - yaml_SCANNER_ERROR // Cannot scan the input stream. - yaml_PARSER_ERROR // Cannot parse the input stream. - yaml_COMPOSER_ERROR // Cannot compose a YAML document. - yaml_WRITER_ERROR // Cannot write to the output stream. - yaml_EMITTER_ERROR // Cannot emit a YAML stream. -) - -// The pointer position. -type yaml_mark_t struct { - index int // The position index. - line int // The position line. - column int // The position column. -} - -// Node Styles - -type yaml_style_t int8 - -type yaml_scalar_style_t yaml_style_t - -// Scalar styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota - - yaml_PLAIN_SCALAR_STYLE // The plain scalar style. - yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. - yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. - yaml_LITERAL_SCALAR_STYLE // The literal scalar style. - yaml_FOLDED_SCALAR_STYLE // The folded scalar style. -) - -type yaml_sequence_style_t yaml_style_t - -// Sequence styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota - - yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. - yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. -) - -type yaml_mapping_style_t yaml_style_t - -// Mapping styles. -const ( - // Let the emitter choose the style. - yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota - - yaml_BLOCK_MAPPING_STYLE // The block mapping style. - yaml_FLOW_MAPPING_STYLE // The flow mapping style. -) - -// Tokens - -type yaml_token_type_t int - -// Token types. -const ( - // An empty token. - yaml_NO_TOKEN yaml_token_type_t = iota - - yaml_STREAM_START_TOKEN // A STREAM-START token. - yaml_STREAM_END_TOKEN // A STREAM-END token. - - yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. - yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. - yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. - yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. - - yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. - yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. - yaml_BLOCK_END_TOKEN // A BLOCK-END token. - - yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. - yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. - yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. - yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. - - yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. - yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. - yaml_KEY_TOKEN // A KEY token. - yaml_VALUE_TOKEN // A VALUE token. - - yaml_ALIAS_TOKEN // An ALIAS token. - yaml_ANCHOR_TOKEN // An ANCHOR token. - yaml_TAG_TOKEN // A TAG token. - yaml_SCALAR_TOKEN // A SCALAR token. -) - -func (tt yaml_token_type_t) String() string { - switch tt { - case yaml_NO_TOKEN: - return "yaml_NO_TOKEN" - case yaml_STREAM_START_TOKEN: - return "yaml_STREAM_START_TOKEN" - case yaml_STREAM_END_TOKEN: - return "yaml_STREAM_END_TOKEN" - case yaml_VERSION_DIRECTIVE_TOKEN: - return "yaml_VERSION_DIRECTIVE_TOKEN" - case yaml_TAG_DIRECTIVE_TOKEN: - return "yaml_TAG_DIRECTIVE_TOKEN" - case yaml_DOCUMENT_START_TOKEN: - return "yaml_DOCUMENT_START_TOKEN" - case yaml_DOCUMENT_END_TOKEN: - return "yaml_DOCUMENT_END_TOKEN" - case yaml_BLOCK_SEQUENCE_START_TOKEN: - return "yaml_BLOCK_SEQUENCE_START_TOKEN" - case yaml_BLOCK_MAPPING_START_TOKEN: - return "yaml_BLOCK_MAPPING_START_TOKEN" - case yaml_BLOCK_END_TOKEN: - return "yaml_BLOCK_END_TOKEN" - case yaml_FLOW_SEQUENCE_START_TOKEN: - return "yaml_FLOW_SEQUENCE_START_TOKEN" - case yaml_FLOW_SEQUENCE_END_TOKEN: - return "yaml_FLOW_SEQUENCE_END_TOKEN" - case yaml_FLOW_MAPPING_START_TOKEN: - return "yaml_FLOW_MAPPING_START_TOKEN" - case yaml_FLOW_MAPPING_END_TOKEN: - return "yaml_FLOW_MAPPING_END_TOKEN" - case yaml_BLOCK_ENTRY_TOKEN: - return "yaml_BLOCK_ENTRY_TOKEN" - case yaml_FLOW_ENTRY_TOKEN: - return "yaml_FLOW_ENTRY_TOKEN" - case yaml_KEY_TOKEN: - return "yaml_KEY_TOKEN" - case yaml_VALUE_TOKEN: - return "yaml_VALUE_TOKEN" - case yaml_ALIAS_TOKEN: - return "yaml_ALIAS_TOKEN" - case yaml_ANCHOR_TOKEN: - return "yaml_ANCHOR_TOKEN" - case yaml_TAG_TOKEN: - return "yaml_TAG_TOKEN" - case yaml_SCALAR_TOKEN: - return "yaml_SCALAR_TOKEN" - } - return "" -} - -// The token structure. -type yaml_token_t struct { - // The token type. - typ yaml_token_type_t - - // The start/end of the token. - start_mark, end_mark yaml_mark_t - - // The stream encoding (for yaml_STREAM_START_TOKEN). - encoding yaml_encoding_t - - // The alias/anchor/scalar value or tag/tag directive handle - // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). - value []byte - - // The tag suffix (for yaml_TAG_TOKEN). - suffix []byte - - // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). - prefix []byte - - // The scalar style (for yaml_SCALAR_TOKEN). - style yaml_scalar_style_t - - // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). - major, minor int8 -} - -// Events - -type yaml_event_type_t int8 - -// Event types. -const ( - // An empty event. - yaml_NO_EVENT yaml_event_type_t = iota - - yaml_STREAM_START_EVENT // A STREAM-START event. - yaml_STREAM_END_EVENT // A STREAM-END event. - yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. - yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. - yaml_ALIAS_EVENT // An ALIAS event. - yaml_SCALAR_EVENT // A SCALAR event. - yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. - yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. - yaml_MAPPING_START_EVENT // A MAPPING-START event. - yaml_MAPPING_END_EVENT // A MAPPING-END event. -) - -var eventStrings = []string{ - yaml_NO_EVENT: "none", - yaml_STREAM_START_EVENT: "stream start", - yaml_STREAM_END_EVENT: "stream end", - yaml_DOCUMENT_START_EVENT: "document start", - yaml_DOCUMENT_END_EVENT: "document end", - yaml_ALIAS_EVENT: "alias", - yaml_SCALAR_EVENT: "scalar", - yaml_SEQUENCE_START_EVENT: "sequence start", - yaml_SEQUENCE_END_EVENT: "sequence end", - yaml_MAPPING_START_EVENT: "mapping start", - yaml_MAPPING_END_EVENT: "mapping end", -} - -func (e yaml_event_type_t) String() string { - if e < 0 || int(e) >= len(eventStrings) { - return fmt.Sprintf("unknown event %d", e) - } - return eventStrings[e] -} - -// The event structure. -type yaml_event_t struct { - - // The event type. - typ yaml_event_type_t - - // The start and end of the event. - start_mark, end_mark yaml_mark_t - - // The document encoding (for yaml_STREAM_START_EVENT). - encoding yaml_encoding_t - - // The version directive (for yaml_DOCUMENT_START_EVENT). - version_directive *yaml_version_directive_t - - // The list of tag directives (for yaml_DOCUMENT_START_EVENT). - tag_directives []yaml_tag_directive_t - - // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). - anchor []byte - - // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - tag []byte - - // The scalar value (for yaml_SCALAR_EVENT). - value []byte - - // Is the document start/end indicator implicit, or the tag optional? - // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). - implicit bool - - // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). - quoted_implicit bool - - // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - style yaml_style_t -} - -func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } -func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } -func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } - -// Nodes - -const ( - yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. - yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. - yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. - yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. - yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. - yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. - - yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. - yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. - - // Not in original libyaml. - yaml_BINARY_TAG = "tag:yaml.org,2002:binary" - yaml_MERGE_TAG = "tag:yaml.org,2002:merge" - - yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. - yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. - yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. -) - -type yaml_node_type_t int - -// Node types. -const ( - // An empty node. - yaml_NO_NODE yaml_node_type_t = iota - - yaml_SCALAR_NODE // A scalar node. - yaml_SEQUENCE_NODE // A sequence node. - yaml_MAPPING_NODE // A mapping node. -) - -// An element of a sequence node. -type yaml_node_item_t int - -// An element of a mapping node. -type yaml_node_pair_t struct { - key int // The key of the element. - value int // The value of the element. -} - -// The node structure. -type yaml_node_t struct { - typ yaml_node_type_t // The node type. - tag []byte // The node tag. - - // The node data. - - // The scalar parameters (for yaml_SCALAR_NODE). - scalar struct { - value []byte // The scalar value. - length int // The length of the scalar value. - style yaml_scalar_style_t // The scalar style. - } - - // The sequence parameters (for YAML_SEQUENCE_NODE). - sequence struct { - items_data []yaml_node_item_t // The stack of sequence items. - style yaml_sequence_style_t // The sequence style. - } - - // The mapping parameters (for yaml_MAPPING_NODE). - mapping struct { - pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). - pairs_start *yaml_node_pair_t // The beginning of the stack. - pairs_end *yaml_node_pair_t // The end of the stack. - pairs_top *yaml_node_pair_t // The top of the stack. - style yaml_mapping_style_t // The mapping style. - } - - start_mark yaml_mark_t // The beginning of the node. - end_mark yaml_mark_t // The end of the node. - -} - -// The document structure. -type yaml_document_t struct { - - // The document nodes. - nodes []yaml_node_t - - // The version directive. - version_directive *yaml_version_directive_t - - // The list of tag directives. - tag_directives_data []yaml_tag_directive_t - tag_directives_start int // The beginning of the tag directives list. - tag_directives_end int // The end of the tag directives list. - - start_implicit int // Is the document start indicator implicit? - end_implicit int // Is the document end indicator implicit? - - // The start/end of the document. - start_mark, end_mark yaml_mark_t -} - -// The prototype of a read handler. -// -// The read handler is called when the parser needs to read more bytes from the -// source. The handler should write not more than size bytes to the buffer. -// The number of written bytes should be set to the size_read variable. -// -// [in,out] data A pointer to an application data specified by -// yaml_parser_set_input(). -// [out] buffer The buffer to write the data from the source. -// [in] size The size of the buffer. -// [out] size_read The actual number of bytes read from the source. -// -// On success, the handler should return 1. If the handler failed, -// the returned value should be 0. On EOF, the handler should set the -// size_read to 0 and return 1. -type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) - -// This structure holds information about a potential simple key. -type yaml_simple_key_t struct { - possible bool // Is a simple key possible? - required bool // Is a simple key required? - token_number int // The number of the token. - mark yaml_mark_t // The position mark. -} - -// The states of the parser. -type yaml_parser_state_t int - -const ( - yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota - - yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. - yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. - yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. - yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. - yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. - yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. - yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. - yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. - yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. - yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. - yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. - yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. - yaml_PARSE_END_STATE // Expect nothing. -) - -func (ps yaml_parser_state_t) String() string { - switch ps { - case yaml_PARSE_STREAM_START_STATE: - return "yaml_PARSE_STREAM_START_STATE" - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_START_STATE: - return "yaml_PARSE_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return "yaml_PARSE_DOCUMENT_CONTENT_STATE" - case yaml_PARSE_DOCUMENT_END_STATE: - return "yaml_PARSE_DOCUMENT_END_STATE" - case yaml_PARSE_BLOCK_NODE_STATE: - return "yaml_PARSE_BLOCK_NODE_STATE" - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" - case yaml_PARSE_FLOW_NODE_STATE: - return "yaml_PARSE_FLOW_NODE_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" - case yaml_PARSE_END_STATE: - return "yaml_PARSE_END_STATE" - } - return "" -} - -// This structure holds aliases data. -type yaml_alias_data_t struct { - anchor []byte // The anchor. - index int // The node id. - mark yaml_mark_t // The anchor mark. -} - -// The parser structure. -// -// All members are internal. Manage the structure using the -// yaml_parser_ family of functions. -type yaml_parser_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - - problem string // Error description. - - // The byte about which the problem occurred. - problem_offset int - problem_value int - problem_mark yaml_mark_t - - // The error context. - context string - context_mark yaml_mark_t - - // Reader stuff - - read_handler yaml_read_handler_t // Read handler. - - input_reader io.Reader // File input data. - input []byte // String input data. - input_pos int - - eof bool // EOF flag - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - unread int // The number of unread characters in the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The input encoding. - - offset int // The offset of the current position (in bytes). - mark yaml_mark_t // The mark of the current position. - - // Scanner stuff - - stream_start_produced bool // Have we started to scan the input stream? - stream_end_produced bool // Have we reached the end of the input stream? - - flow_level int // The number of unclosed '[' and '{' indicators. - - tokens []yaml_token_t // The tokens queue. - tokens_head int // The head of the tokens queue. - tokens_parsed int // The number of tokens fetched from the queue. - token_available bool // Does the tokens queue contain a token ready for dequeueing. - - indent int // The current indentation level. - indents []int // The indentation levels stack. - - simple_key_allowed bool // May a simple key occur at the current position? - simple_keys []yaml_simple_key_t // The stack of simple keys. - simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number - - // Parser stuff - - state yaml_parser_state_t // The current parser state. - states []yaml_parser_state_t // The parser states stack. - marks []yaml_mark_t // The stack of marks. - tag_directives []yaml_tag_directive_t // The list of TAG directives. - - // Dumper stuff - - aliases []yaml_alias_data_t // The alias data. - - document *yaml_document_t // The currently parsed document. -} - -// Emitter Definitions - -// The prototype of a write handler. -// -// The write handler is called when the emitter needs to flush the accumulated -// characters to the output. The handler should write @a size bytes of the -// @a buffer to the output. -// -// @param[in,out] data A pointer to an application data specified by -// yaml_emitter_set_output(). -// @param[in] buffer The buffer with bytes to be written. -// @param[in] size The size of the buffer. -// -// @returns On success, the handler should return @c 1. If the handler failed, -// the returned value should be @c 0. -// -type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error - -type yaml_emitter_state_t int - -// The emitter states. -const ( - // Expect STREAM-START. - yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota - - yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. - yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. - yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. - yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. - yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. - yaml_EMIT_END_STATE // Expect nothing. -) - -// The emitter structure. -// -// All members are internal. Manage the structure using the @c yaml_emitter_ -// family of functions. -type yaml_emitter_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - problem string // Error description. - - // Writer stuff - - write_handler yaml_write_handler_t // Write handler. - - output_buffer *[]byte // String output data. - output_writer io.Writer // File output data. - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The stream encoding. - - // Emitter stuff - - canonical bool // If the output is in the canonical style? - best_indent int // The number of indentation spaces. - best_width int // The preferred width of the output lines. - unicode bool // Allow unescaped non-ASCII characters? - line_break yaml_break_t // The preferred line break. - - state yaml_emitter_state_t // The current emitter state. - states []yaml_emitter_state_t // The stack of states. - - events []yaml_event_t // The event queue. - events_head int // The head of the event queue. - - indents []int // The stack of indentation levels. - - tag_directives []yaml_tag_directive_t // The list of tag directives. - - indent int // The current indentation level. - - flow_level int // The current flow level. - - root_context bool // Is it the document root context? - sequence_context bool // Is it a sequence context? - mapping_context bool // Is it a mapping context? - simple_key_context bool // Is it a simple mapping key context? - - line int // The current line. - column int // The current column. - whitespace bool // If the last character was a whitespace? - indention bool // If the last character was an indentation character (' ', '-', '?', ':')? - open_ended bool // If an explicit document end is required? - - // Anchor analysis. - anchor_data struct { - anchor []byte // The anchor value. - alias bool // Is it an alias? - } - - // Tag analysis. - tag_data struct { - handle []byte // The tag handle. - suffix []byte // The tag suffix. - } - - // Scalar analysis. - scalar_data struct { - value []byte // The scalar value. - multiline bool // Does the scalar contain line breaks? - flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? - block_plain_allowed bool // Can the scalar be expressed in the block plain style? - single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? - block_allowed bool // Can the scalar be expressed in the literal or folded styles? - style yaml_scalar_style_t // The output style. - } - - // Dumper stuff - - opened bool // If the stream was already opened? - closed bool // If the stream was already closed? - - // The information associated with the document nodes. - anchors *struct { - references int // The number of references. - anchor int // The anchor id. - serialized bool // If the node has been emitted? - } - - last_anchor_id int // The last assigned anchor id. - - document *yaml_document_t // The currently emitted document. -} diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v2/yamlprivateh.go deleted file mode 100644 index 8110ce3..0000000 --- a/vendor/gopkg.in/yaml.v2/yamlprivateh.go +++ /dev/null @@ -1,173 +0,0 @@ -package yaml - -const ( - // The size of the input raw buffer. - input_raw_buffer_size = 512 - - // The size of the input buffer. - // It should be possible to decode the whole raw buffer. - input_buffer_size = input_raw_buffer_size * 3 - - // The size of the output buffer. - output_buffer_size = 128 - - // The size of the output raw buffer. - // It should be possible to encode the whole output buffer. - output_raw_buffer_size = (output_buffer_size*2 + 2) - - // The size of other stacks and queues. - initial_stack_size = 16 - initial_queue_size = 16 - initial_string_size = 16 -) - -// Check if the character at the specified position is an alphabetical -// character, a digit, '_', or '-'. -func is_alpha(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' -} - -// Check if the character at the specified position is a digit. -func is_digit(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' -} - -// Get the value of a digit. -func as_digit(b []byte, i int) int { - return int(b[i]) - '0' -} - -// Check if the character at the specified position is a hex-digit. -func is_hex(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' -} - -// Get the value of a hex-digit. -func as_hex(b []byte, i int) int { - bi := b[i] - if bi >= 'A' && bi <= 'F' { - return int(bi) - 'A' + 10 - } - if bi >= 'a' && bi <= 'f' { - return int(bi) - 'a' + 10 - } - return int(bi) - '0' -} - -// Check if the character is ASCII. -func is_ascii(b []byte, i int) bool { - return b[i] <= 0x7F -} - -// Check if the character at the start of the buffer can be printed unescaped. -func is_printable(b []byte, i int) bool { - return ((b[i] == 0x0A) || // . == #x0A - (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E - (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF - (b[i] > 0xC2 && b[i] < 0xED) || - (b[i] == 0xED && b[i+1] < 0xA0) || - (b[i] == 0xEE) || - (b[i] == 0xEF && // #xE000 <= . <= #xFFFD - !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF - !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) -} - -// Check if the character at the specified position is NUL. -func is_z(b []byte, i int) bool { - return b[i] == 0x00 -} - -// Check if the beginning of the buffer is a BOM. -func is_bom(b []byte, i int) bool { - return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF -} - -// Check if the character at the specified position is space. -func is_space(b []byte, i int) bool { - return b[i] == ' ' -} - -// Check if the character at the specified position is tab. -func is_tab(b []byte, i int) bool { - return b[i] == '\t' -} - -// Check if the character at the specified position is blank (space or tab). -func is_blank(b []byte, i int) bool { - //return is_space(b, i) || is_tab(b, i) - return b[i] == ' ' || b[i] == '\t' -} - -// Check if the character at the specified position is a line break. -func is_break(b []byte, i int) bool { - return (b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) -} - -func is_crlf(b []byte, i int) bool { - return b[i] == '\r' && b[i+1] == '\n' -} - -// Check if the character is a line break or NUL. -func is_breakz(b []byte, i int) bool { - //return is_break(b, i) || is_z(b, i) - return ( // is_break: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - // is_z: - b[i] == 0) -} - -// Check if the character is a line break, space, or NUL. -func is_spacez(b []byte, i int) bool { - //return is_space(b, i) || is_breakz(b, i) - return ( // is_space: - b[i] == ' ' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Check if the character is a line break, space, tab, or NUL. -func is_blankz(b []byte, i int) bool { - //return is_blank(b, i) || is_breakz(b, i) - return ( // is_blank: - b[i] == ' ' || b[i] == '\t' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Determine the width of the character. -func width(b byte) int { - // Don't replace these by a switch without first - // confirming that it is being inlined. - if b&0x80 == 0x00 { - return 1 - } - if b&0xE0 == 0xC0 { - return 2 - } - if b&0xF0 == 0xE0 { - return 3 - } - if b&0xF8 == 0xF0 { - return 4 - } - return 0 - -} diff --git a/vendor/modules.txt b/vendor/modules.txt deleted file mode 100644 index ca9c78e..0000000 --- a/vendor/modules.txt +++ /dev/null @@ -1,35 +0,0 @@ -# github.com/digitalocean/godo v1.119.0 -## explicit; go 1.20 -github.com/digitalocean/godo -github.com/digitalocean/godo/metrics -# github.com/golang/mock v1.6.0 -## explicit; go 1.11 -github.com/golang/mock/gomock -# github.com/google/go-querystring v1.1.0 -## explicit; go 1.10 -github.com/google/go-querystring/query -# github.com/hashicorp/go-cleanhttp v0.5.2 -## explicit; go 1.13 -github.com/hashicorp/go-cleanhttp -# github.com/hashicorp/go-retryablehttp v0.7.7 -## explicit; go 1.19 -github.com/hashicorp/go-retryablehttp -# github.com/pkg/errors v0.9.1 -## explicit -github.com/pkg/errors -# golang.org/x/oauth2 v0.21.0 -## explicit; go 1.18 -golang.org/x/oauth2 -golang.org/x/oauth2/internal -# golang.org/x/sys v0.22.0 -## explicit; go 1.18 -# golang.org/x/time v0.5.0 -## explicit; go 1.18 -golang.org/x/time/rate -# gopkg.in/yaml.v2 v2.4.0 -## explicit; go 1.15 -gopkg.in/yaml.v2 -# sigs.k8s.io/yaml v1.4.0 -## explicit; go 1.12 -sigs.k8s.io/yaml -sigs.k8s.io/yaml/goyaml.v2 diff --git a/vendor/sigs.k8s.io/yaml/.gitignore b/vendor/sigs.k8s.io/yaml/.gitignore deleted file mode 100644 index 2dc9290..0000000 --- a/vendor/sigs.k8s.io/yaml/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# OSX leaves these everywhere on SMB shares -._* - -# Eclipse files -.classpath -.project -.settings/** - -# Idea files -.idea/** -.idea/ - -# Emacs save files -*~ - -# Vim-related files -[._]*.s[a-w][a-z] -[._]s[a-w][a-z] -*.un~ -Session.vim -.netrwhist - -# Go test binaries -*.test diff --git a/vendor/sigs.k8s.io/yaml/.travis.yml b/vendor/sigs.k8s.io/yaml/.travis.yml deleted file mode 100644 index 54ed8f9..0000000 --- a/vendor/sigs.k8s.io/yaml/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go -arch: arm64 -dist: focal -go: 1.15.x -script: - - diff -u <(echo -n) <(gofmt -d *.go) - - diff -u <(echo -n) <(golint $(go list -e ./...) | grep -v YAMLToJSON) - - GO111MODULE=on go vet . - - GO111MODULE=on go test -v -race ./... - - git diff --exit-code -install: - - GO111MODULE=off go get golang.org/x/lint/golint diff --git a/vendor/sigs.k8s.io/yaml/CONTRIBUTING.md b/vendor/sigs.k8s.io/yaml/CONTRIBUTING.md deleted file mode 100644 index de47115..0000000 --- a/vendor/sigs.k8s.io/yaml/CONTRIBUTING.md +++ /dev/null @@ -1,31 +0,0 @@ -# Contributing Guidelines - -Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt: - -_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ - -## Getting Started - -We have full documentation on how to get started contributing here: - - - -- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests -- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing) -- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers - -## Mentorship - -- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! - - diff --git a/vendor/sigs.k8s.io/yaml/LICENSE b/vendor/sigs.k8s.io/yaml/LICENSE deleted file mode 100644 index 093d6d3..0000000 --- a/vendor/sigs.k8s.io/yaml/LICENSE +++ /dev/null @@ -1,306 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Sam Ghods - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -# The forked go-yaml.v3 library under this project is covered by two -different licenses (MIT and Apache): - -#### MIT License #### - -The following files were ported to Go from C files of libyaml, and thus -are still covered by their original MIT license, with the additional -copyright staring in 2011 when the project was ported over: - - apic.go emitterc.go parserc.go readerc.go scannerc.go - writerc.go yamlh.go yamlprivateh.go - -Copyright (c) 2006-2010 Kirill Simonov -Copyright (c) 2006-2011 Kirill Simonov - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -### Apache License ### - -All the remaining project files are covered by the Apache license: - -Copyright (c) 2011-2019 Canonical Ltd - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -# The forked go-yaml.v2 library under the project is covered by an -Apache license: - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/sigs.k8s.io/yaml/OWNERS b/vendor/sigs.k8s.io/yaml/OWNERS deleted file mode 100644 index 003a149..0000000 --- a/vendor/sigs.k8s.io/yaml/OWNERS +++ /dev/null @@ -1,23 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: -- dims -- jpbetz -- smarterclayton -- deads2k -- sttts -- liggitt -reviewers: -- dims -- thockin -- jpbetz -- smarterclayton -- wojtek-t -- deads2k -- derekwaynecarr -- mikedanese -- liggitt -- sttts -- tallclair -labels: -- sig/api-machinery diff --git a/vendor/sigs.k8s.io/yaml/README.md b/vendor/sigs.k8s.io/yaml/README.md deleted file mode 100644 index e81cc42..0000000 --- a/vendor/sigs.k8s.io/yaml/README.md +++ /dev/null @@ -1,123 +0,0 @@ -# YAML marshaling and unmarshaling support for Go - -[![Build Status](https://travis-ci.org/kubernetes-sigs/yaml.svg)](https://travis-ci.org/kubernetes-sigs/yaml) - -kubernetes-sigs/yaml is a permanent fork of [ghodss/yaml](https://github.com/ghodss/yaml). - -## Introduction - -A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. - -In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://web.archive.org/web/20190603050330/http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). - -## Compatibility - -This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). - -## Caveats - -**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example: - -``` -BAD: - exampleKey: !!binary gIGC - -GOOD: - exampleKey: gIGC -... and decode the base64 data in your code. -``` - -**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys. - -## Installation and usage - -To install, run: - -``` -$ go get sigs.k8s.io/yaml -``` - -And import using: - -``` -import "sigs.k8s.io/yaml" -``` - -Usage is very similar to the JSON library: - -```go -package main - -import ( - "fmt" - - "sigs.k8s.io/yaml" -) - -type Person struct { - Name string `json:"name"` // Affects YAML field names too. - Age int `json:"age"` -} - -func main() { - // Marshal a Person struct to YAML. - p := Person{"John", 30} - y, err := yaml.Marshal(p) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(string(y)) - /* Output: - age: 30 - name: John - */ - - // Unmarshal the YAML back into a Person struct. - var p2 Person - err = yaml.Unmarshal(y, &p2) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(p2) - /* Output: - {John 30} - */ -} -``` - -`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available: - -```go -package main - -import ( - "fmt" - - "sigs.k8s.io/yaml" -) - -func main() { - j := []byte(`{"name": "John", "age": 30}`) - y, err := yaml.JSONToYAML(j) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(string(y)) - /* Output: - age: 30 - name: John - */ - j2, err := yaml.YAMLToJSON(y) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(string(j2)) - /* Output: - {"age":30,"name":"John"} - */ -} -``` diff --git a/vendor/sigs.k8s.io/yaml/RELEASE.md b/vendor/sigs.k8s.io/yaml/RELEASE.md deleted file mode 100644 index 6b64246..0000000 --- a/vendor/sigs.k8s.io/yaml/RELEASE.md +++ /dev/null @@ -1,9 +0,0 @@ -# Release Process - -The `yaml` Project is released on an as-needed basis. The process is as follows: - -1. An issue is proposing a new release with a changelog since the last release -1. All [OWNERS](OWNERS) must LGTM this release -1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION` -1. The release issue is closed -1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released` diff --git a/vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS b/vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS deleted file mode 100644 index 0648a8e..0000000 --- a/vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS +++ /dev/null @@ -1,17 +0,0 @@ -# Defined below are the security contacts for this repo. -# -# They are the contact point for the Product Security Team to reach out -# to for triaging and handling of incoming issues. -# -# The below names agree to abide by the -# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) -# and will be removed and replaced if they violate that agreement. -# -# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE -# INSTRUCTIONS AT https://kubernetes.io/security/ - -cjcullen -jessfraz -liggitt -philips -tallclair diff --git a/vendor/sigs.k8s.io/yaml/code-of-conduct.md b/vendor/sigs.k8s.io/yaml/code-of-conduct.md deleted file mode 100644 index 0d15c00..0000000 --- a/vendor/sigs.k8s.io/yaml/code-of-conduct.md +++ /dev/null @@ -1,3 +0,0 @@ -# Kubernetes Community Code of Conduct - -Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/sigs.k8s.io/yaml/fields.go b/vendor/sigs.k8s.io/yaml/fields.go deleted file mode 100644 index 0ea28bd..0000000 --- a/vendor/sigs.k8s.io/yaml/fields.go +++ /dev/null @@ -1,501 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package yaml - -import ( - "bytes" - "encoding" - "encoding/json" - "reflect" - "sort" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -// indirect walks down 'value' allocating pointers as needed, -// until it gets to a non-pointer. -// if it encounters an Unmarshaler, indirect stops and returns that. -// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func indirect(value reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If 'value' is a named type and is addressable, - // start with its address, so that if the type has pointer methods, - // we find them. - if value.Kind() != reflect.Ptr && value.Type().Name() != "" && value.CanAddr() { - value = value.Addr() - } - for { - // Load value from interface, but only if the result will be - // usefully addressable. - if value.Kind() == reflect.Interface && !value.IsNil() { - element := value.Elem() - if element.Kind() == reflect.Ptr && !element.IsNil() && (!decodingNull || element.Elem().Kind() == reflect.Ptr) { - value = element - continue - } - } - - if value.Kind() != reflect.Ptr { - break - } - - if value.Elem().Kind() != reflect.Ptr && decodingNull && value.CanSet() { - break - } - if value.IsNil() { - if value.CanSet() { - value.Set(reflect.New(value.Type().Elem())) - } else { - value = reflect.New(value.Type().Elem()) - } - } - if value.Type().NumMethod() > 0 { - if u, ok := value.Interface().(json.Unmarshaler); ok { - return u, nil, reflect.Value{} - } - if u, ok := value.Interface().(encoding.TextUnmarshaler); ok { - return nil, u, reflect.Value{} - } - } - value = value.Elem() - } - return nil, nil, value -} - -// A field represents a single field found in a struct. -type field struct { - name string - nameBytes []byte // []byte(name) - equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent - - tag bool - index []int - typ reflect.Type - omitEmpty bool - quoted bool -} - -func fillField(f field) field { - f.nameBytes = []byte(f.name) - f.equalFold = foldFunc(f.nameBytes) - return f -} - -// byName sorts field by name, breaking ties with depth, -// then breaking ties with "name came from json tag", then -// breaking ties with index sequence. -type byName []field - -func (x byName) Len() int { return len(x) } - -func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byName) Less(i, j int) bool { - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].index) != len(x[j].index) { - return len(x[i].index) < len(x[j].index) - } - if x[i].tag != x[j].tag { - return x[i].tag - } - return byIndex(x).Less(i, j) -} - -// byIndex sorts field by index sequence. -type byIndex []field - -func (x byIndex) Len() int { return len(x) } - -func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byIndex) Less(i, j int) bool { - for k, xik := range x[i].index { - if k >= len(x[j].index) { - return false - } - if xik != x[j].index[k] { - return xik < x[j].index[k] - } - } - return len(x[i].index) < len(x[j].index) -} - -// typeFields returns a list of fields that JSON should recognize for the given type. -// The algorithm is breadth-first search over the set of structs to include - the top struct -// and then any reachable anonymous structs. -func typeFields(t reflect.Type) []field { - // Anonymous fields to explore at the current level and the next. - current := []field{} - next := []field{{typ: t}} - - // Count of queued names for current level and the next. - var count map[reflect.Type]int - var nextCount map[reflect.Type]int - - // Types already visited at an earlier level. - visited := map[reflect.Type]bool{} - - // Fields found. - var fields []field - - for len(next) > 0 { - current, next = next, current[:0] - count, nextCount = nextCount, map[reflect.Type]int{} - - for _, f := range current { - if visited[f.typ] { - continue - } - visited[f.typ] = true - - // Scan f.typ for fields to include. - for i := 0; i < f.typ.NumField(); i++ { - sf := f.typ.Field(i) - if sf.PkgPath != "" { // unexported - continue - } - tag := sf.Tag.Get("json") - if tag == "-" { - continue - } - name, opts := parseTag(tag) - if !isValidTag(name) { - name = "" - } - index := make([]int, len(f.index)+1) - copy(index, f.index) - index[len(f.index)] = i - - ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { - // Follow pointer. - ft = ft.Elem() - } - - // Record found field and index sequence. - if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { - tagged := name != "" - if name == "" { - name = sf.Name - } - fields = append(fields, fillField(field{ - name: name, - tag: tagged, - index: index, - typ: ft, - omitEmpty: opts.Contains("omitempty"), - quoted: opts.Contains("string"), - })) - if count[f.typ] > 1 { - // If there were multiple instances, add a second, - // so that the annihilation code will see a duplicate. - // It only cares about the distinction between 1 or 2, - // so don't bother generating any more copies. - fields = append(fields, fields[len(fields)-1]) - } - continue - } - - // Record new anonymous struct to explore in next round. - nextCount[ft]++ - if nextCount[ft] == 1 { - next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) - } - } - } - } - - sort.Sort(byName(fields)) - - // Delete all fields that are hidden by the Go rules for embedded fields, - // except that fields with JSON tags are promoted. - - // The fields are sorted in primary order of name, secondary order - // of field index length. Loop over names; for each name, delete - // hidden fields by choosing the one dominant field that survives. - out := fields[:0] - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - out = append(out, fi) - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if ok { - out = append(out, dominant) - } - } - - fields = out - sort.Sort(byIndex(fields)) - - return fields -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's embedding rules, modified by the presence of -// JSON tags. If there are multiple top-level fields, the boolean -// will be false: This condition is an error in Go and we skip all -// the fields. -func dominantField(fields []field) (field, bool) { - // The fields are sorted in increasing index-length order. The winner - // must therefore be one with the shortest index length. Drop all - // longer entries, which is easy: just truncate the slice. - length := len(fields[0].index) - tagged := -1 // Index of first tagged field. - for i, f := range fields { - if len(f.index) > length { - fields = fields[:i] - break - } - if f.tag { - if tagged >= 0 { - // Multiple tagged fields at the same level: conflict. - // Return no field. - return field{}, false - } - tagged = i - } - } - if tagged >= 0 { - return fields[tagged], true - } - // All remaining fields have the same length. If there's more than one, - // we have a conflict (two fields named "X" at the same level) and we - // return no field. - if len(fields) > 1 { - return field{}, false - } - return fields[0], true -} - -var fieldCache struct { - sync.RWMutex - m map[reflect.Type][]field -} - -// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. -func cachedTypeFields(t reflect.Type) []field { - fieldCache.RLock() - f := fieldCache.m[t] - fieldCache.RUnlock() - if f != nil { - return f - } - - // Compute fields without lock. - // Might duplicate effort but won't hold other computations back. - f = typeFields(t) - if f == nil { - f = []field{} - } - - fieldCache.Lock() - if fieldCache.m == nil { - fieldCache.m = map[reflect.Type][]field{} - } - fieldCache.m[t] = f - fieldCache.Unlock() - return f -} - -func isValidTag(s string) bool { - if s == "" { - return false - } - for _, c := range s { - switch { - case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): - // Backslash and quote chars are reserved, but - // otherwise any punctuation chars are allowed - // in a tag name. - default: - if !unicode.IsLetter(c) && !unicode.IsDigit(c) { - return false - } - } - } - return true -} - -const ( - caseMask = ^byte(0x20) // Mask to ignore case in ASCII. - kelvin = '\u212a' - smallLongEss = '\u017f' -) - -// foldFunc returns one of four different case folding equivalence -// functions, from most general (and slow) to fastest: -// -// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 -// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') -// 3) asciiEqualFold, no special, but includes non-letters (including _) -// 4) simpleLetterEqualFold, no specials, no non-letters. -// -// The letters S and K are special because they map to 3 runes, not just 2: -// - S maps to s and to U+017F 'ſ' Latin small letter long s -// - k maps to K and to U+212A 'K' Kelvin sign -// -// See http://play.golang.org/p/tTxjOc0OGo -// -// The returned function is specialized for matching against s and -// should only be given s. It's not curried for performance reasons. -func foldFunc(s []byte) func(s, t []byte) bool { - nonLetter := false - special := false // special letter - for _, b := range s { - if b >= utf8.RuneSelf { - return bytes.EqualFold - } - upper := b & caseMask - if upper < 'A' || upper > 'Z' { - nonLetter = true - } else if upper == 'K' || upper == 'S' { - // See above for why these letters are special. - special = true - } - } - if special { - return equalFoldRight - } - if nonLetter { - return asciiEqualFold - } - return simpleLetterEqualFold -} - -// equalFoldRight is a specialization of bytes.EqualFold when s is -// known to be all ASCII (including punctuation), but contains an 's', -// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. -// See comments on foldFunc. -func equalFoldRight(s, t []byte) bool { - for _, sb := range s { - if len(t) == 0 { - return false - } - tb := t[0] - if tb < utf8.RuneSelf { - if sb != tb { - sbUpper := sb & caseMask - if 'A' <= sbUpper && sbUpper <= 'Z' { - if sbUpper != tb&caseMask { - return false - } - } else { - return false - } - } - t = t[1:] - continue - } - // sb is ASCII and t is not. t must be either kelvin - // sign or long s; sb must be s, S, k, or K. - tr, size := utf8.DecodeRune(t) - switch sb { - case 's', 'S': - if tr != smallLongEss { - return false - } - case 'k', 'K': - if tr != kelvin { - return false - } - default: - return false - } - t = t[size:] - - } - - return len(t) <= 0 -} - -// asciiEqualFold is a specialization of bytes.EqualFold for use when -// s is all ASCII (but may contain non-letters) and contains no -// special-folding letters. -// See comments on foldFunc. -func asciiEqualFold(s, t []byte) bool { - if len(s) != len(t) { - return false - } - for i, sb := range s { - tb := t[i] - if sb == tb { - continue - } - if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { - if sb&caseMask != tb&caseMask { - return false - } - } else { - return false - } - } - return true -} - -// simpleLetterEqualFold is a specialization of bytes.EqualFold for -// use when s is all ASCII letters (no underscores, etc) and also -// doesn't contain 'k', 'K', 's', or 'S'. -// See comments on foldFunc. -func simpleLetterEqualFold(s, t []byte) bool { - if len(s) != len(t) { - return false - } - for i, b := range s { - if b&caseMask != t[i]&caseMask { - return false - } - } - return true -} - -// tagOptions is the string following a comma in a struct field's "json" -// tag, or the empty string. It does not include the leading comma. -type tagOptions string - -// parseTag splits a struct field's json tag into its name and -// comma-separated options. -func parseTag(tag string) (string, tagOptions) { - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx], tagOptions(tag[idx+1:]) - } - return tag, tagOptions("") -} - -// Contains reports whether a comma-separated list of options -// contains a particular substr flag. substr must be surrounded by a -// string boundary or commas. -func (o tagOptions) Contains(optionName string) bool { - if len(o) == 0 { - return false - } - s := string(o) - for s != "" { - var next string - i := strings.Index(s, ",") - if i >= 0 { - s, next = s[:i], s[i+1:] - } - if s == optionName { - return true - } - s = next - } - return false -} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE b/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE deleted file mode 100644 index 8dada3e..0000000 --- a/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml b/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml deleted file mode 100644 index 8da58fb..0000000 --- a/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml +++ /dev/null @@ -1,31 +0,0 @@ -The following files were ported to Go from C files of libyaml, and thus -are still covered by their original copyright and license: - - apic.go - emitterc.go - parserc.go - readerc.go - scannerc.go - writerc.go - yamlh.go - yamlprivateh.go - -Copyright (c) 2006 Kirill Simonov - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE b/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE deleted file mode 100644 index 866d74a..0000000 --- a/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2011-2016 Canonical Ltd. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS b/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS deleted file mode 100644 index 73be0a3..0000000 --- a/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS +++ /dev/null @@ -1,24 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: -- dims -- jpbetz -- smarterclayton -- deads2k -- sttts -- liggitt -- natasha41575 -- knverey -reviewers: -- dims -- thockin -- jpbetz -- smarterclayton -- deads2k -- derekwaynecarr -- mikedanese -- liggitt -- sttts -- tallclair -labels: -- sig/api-machinery diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md b/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md deleted file mode 100644 index 53f4139..0000000 --- a/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md +++ /dev/null @@ -1,143 +0,0 @@ -# go-yaml fork - -This package is a fork of the go-yaml library and is intended solely for consumption -by kubernetes projects. In this fork, we plan to support only critical changes required for -kubernetes, such as small bug fixes and regressions. Larger, general-purpose feature requests -should be made in the upstream go-yaml library, and we will reject such changes in this fork -unless we are pulling them from upstream. - -This fork is based on v2.4.0: https://github.com/go-yaml/yaml/releases/tag/v2.4.0 - -# YAML support for the Go language - -Introduction ------------- - -The yaml package enables Go programs to comfortably encode and decode YAML -values. It was developed within [Canonical](https://www.canonical.com) as -part of the [juju](https://juju.ubuntu.com) project, and is based on a -pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) -C library to parse and generate YAML data quickly and reliably. - -Compatibility -------------- - -The yaml package supports most of YAML 1.1 and 1.2, including support for -anchors, tags, map merging, etc. Multi-document unmarshalling is not yet -implemented, and base-60 floats from YAML 1.1 are purposefully not -supported since they're a poor design and are gone in YAML 1.2. - -Installation and usage ----------------------- - -The import path for the package is *gopkg.in/yaml.v2*. - -To install it, run: - - go get gopkg.in/yaml.v2 - -API documentation ------------------ - -If opened in a browser, the import path itself leads to the API documentation: - - * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) - -API stability -------------- - -The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). - - -License -------- - -The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. - - -Example -------- - -```Go -package main - -import ( - "fmt" - "log" - - "gopkg.in/yaml.v2" -) - -var data = ` -a: Easy! -b: - c: 2 - d: [3, 4] -` - -// Note: struct fields must be public in order for unmarshal to -// correctly populate the data. -type T struct { - A string - B struct { - RenamedC int `yaml:"c"` - D []int `yaml:",flow"` - } -} - -func main() { - t := T{} - - err := yaml.Unmarshal([]byte(data), &t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t:\n%v\n\n", t) - - d, err := yaml.Marshal(&t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t dump:\n%s\n\n", string(d)) - - m := make(map[interface{}]interface{}) - - err = yaml.Unmarshal([]byte(data), &m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m:\n%v\n\n", m) - - d, err = yaml.Marshal(&m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m dump:\n%s\n\n", string(d)) -} -``` - -This example will generate the following output: - -``` ---- t: -{Easy! {2 [3 4]}} - ---- t dump: -a: Easy! -b: - c: 2 - d: [3, 4] - - ---- m: -map[a:Easy! b:map[c:2 d:[3 4]]] - ---- m dump: -a: Easy! -b: - c: 2 - d: - - 3 - - 4 -``` - diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go deleted file mode 100644 index acf7140..0000000 --- a/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go +++ /dev/null @@ -1,744 +0,0 @@ -package yaml - -import ( - "io" -) - -func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { - //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) - - // Check if we can move the queue at the beginning of the buffer. - if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { - if parser.tokens_head != len(parser.tokens) { - copy(parser.tokens, parser.tokens[parser.tokens_head:]) - } - parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] - parser.tokens_head = 0 - } - parser.tokens = append(parser.tokens, *token) - if pos < 0 { - return - } - copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) - parser.tokens[parser.tokens_head+pos] = *token -} - -// Create a new parser object. -func yaml_parser_initialize(parser *yaml_parser_t) bool { - *parser = yaml_parser_t{ - raw_buffer: make([]byte, 0, input_raw_buffer_size), - buffer: make([]byte, 0, input_buffer_size), - } - return true -} - -// Destroy a parser object. -func yaml_parser_delete(parser *yaml_parser_t) { - *parser = yaml_parser_t{} -} - -// String read handler. -func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - if parser.input_pos == len(parser.input) { - return 0, io.EOF - } - n = copy(buffer, parser.input[parser.input_pos:]) - parser.input_pos += n - return n, nil -} - -// Reader read handler. -func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - return parser.input_reader.Read(buffer) -} - -// Set a string input. -func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_string_read_handler - parser.input = input - parser.input_pos = 0 -} - -// Set a file input. -func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_reader_read_handler - parser.input_reader = r -} - -// Set the source encoding. -func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { - if parser.encoding != yaml_ANY_ENCODING { - panic("must set the encoding only once") - } - parser.encoding = encoding -} - -var disableLineWrapping = false - -// Create a new emitter object. -func yaml_emitter_initialize(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{ - buffer: make([]byte, output_buffer_size), - raw_buffer: make([]byte, 0, output_raw_buffer_size), - states: make([]yaml_emitter_state_t, 0, initial_stack_size), - events: make([]yaml_event_t, 0, initial_queue_size), - } - if disableLineWrapping { - emitter.best_width = -1 - } -} - -// Destroy an emitter object. -func yaml_emitter_delete(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{} -} - -// String write handler. -func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - *emitter.output_buffer = append(*emitter.output_buffer, buffer...) - return nil -} - -// yaml_writer_write_handler uses emitter.output_writer to write the -// emitted text. -func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - _, err := emitter.output_writer.Write(buffer) - return err -} - -// Set a string output. -func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_string_write_handler - emitter.output_buffer = output_buffer -} - -// Set a file output. -func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_writer_write_handler - emitter.output_writer = w -} - -// Set the output encoding. -func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { - if emitter.encoding != yaml_ANY_ENCODING { - panic("must set the output encoding only once") - } - emitter.encoding = encoding -} - -// Set the canonical output style. -func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { - emitter.canonical = canonical -} - -//// Set the indentation increment. -func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { - if indent < 2 || indent > 9 { - indent = 2 - } - emitter.best_indent = indent -} - -// Set the preferred line width. -func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { - if width < 0 { - width = -1 - } - emitter.best_width = width -} - -// Set if unescaped non-ASCII characters are allowed. -func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { - emitter.unicode = unicode -} - -// Set the preferred line break character. -func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { - emitter.line_break = line_break -} - -///* -// * Destroy a token object. -// */ -// -//YAML_DECLARE(void) -//yaml_token_delete(yaml_token_t *token) -//{ -// assert(token); // Non-NULL token object expected. -// -// switch (token.type) -// { -// case YAML_TAG_DIRECTIVE_TOKEN: -// yaml_free(token.data.tag_directive.handle); -// yaml_free(token.data.tag_directive.prefix); -// break; -// -// case YAML_ALIAS_TOKEN: -// yaml_free(token.data.alias.value); -// break; -// -// case YAML_ANCHOR_TOKEN: -// yaml_free(token.data.anchor.value); -// break; -// -// case YAML_TAG_TOKEN: -// yaml_free(token.data.tag.handle); -// yaml_free(token.data.tag.suffix); -// break; -// -// case YAML_SCALAR_TOKEN: -// yaml_free(token.data.scalar.value); -// break; -// -// default: -// break; -// } -// -// memset(token, 0, sizeof(yaml_token_t)); -//} -// -///* -// * Check if a string is a valid UTF-8 sequence. -// * -// * Check 'reader.c' for more details on UTF-8 encoding. -// */ -// -//static int -//yaml_check_utf8(yaml_char_t *start, size_t length) -//{ -// yaml_char_t *end = start+length; -// yaml_char_t *pointer = start; -// -// while (pointer < end) { -// unsigned char octet; -// unsigned int width; -// unsigned int value; -// size_t k; -// -// octet = pointer[0]; -// width = (octet & 0x80) == 0x00 ? 1 : -// (octet & 0xE0) == 0xC0 ? 2 : -// (octet & 0xF0) == 0xE0 ? 3 : -// (octet & 0xF8) == 0xF0 ? 4 : 0; -// value = (octet & 0x80) == 0x00 ? octet & 0x7F : -// (octet & 0xE0) == 0xC0 ? octet & 0x1F : -// (octet & 0xF0) == 0xE0 ? octet & 0x0F : -// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; -// if (!width) return 0; -// if (pointer+width > end) return 0; -// for (k = 1; k < width; k ++) { -// octet = pointer[k]; -// if ((octet & 0xC0) != 0x80) return 0; -// value = (value << 6) + (octet & 0x3F); -// } -// if (!((width == 1) || -// (width == 2 && value >= 0x80) || -// (width == 3 && value >= 0x800) || -// (width == 4 && value >= 0x10000))) return 0; -// -// pointer += width; -// } -// -// return 1; -//} -// - -// Create STREAM-START. -func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - encoding: encoding, - } -} - -// Create STREAM-END. -func yaml_stream_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - } -} - -// Create DOCUMENT-START. -func yaml_document_start_event_initialize( - event *yaml_event_t, - version_directive *yaml_version_directive_t, - tag_directives []yaml_tag_directive_t, - implicit bool, -) { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: implicit, - } -} - -// Create DOCUMENT-END. -func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - implicit: implicit, - } -} - -///* -// * Create ALIAS. -// */ -// -//YAML_DECLARE(int) -//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) -//{ -// mark yaml_mark_t = { 0, 0, 0 } -// anchor_copy *yaml_char_t = NULL -// -// assert(event) // Non-NULL event object is expected. -// assert(anchor) // Non-NULL anchor is expected. -// -// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 -// -// anchor_copy = yaml_strdup(anchor) -// if (!anchor_copy) -// return 0 -// -// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) -// -// return 1 -//} - -// Create SCALAR. -func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - anchor: anchor, - tag: tag, - value: value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-START. -func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-END. -func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - } - return true -} - -// Create MAPPING-START. -func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } -} - -// Create MAPPING-END. -func yaml_mapping_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - } -} - -// Destroy an event object. -func yaml_event_delete(event *yaml_event_t) { - *event = yaml_event_t{} -} - -///* -// * Create a document object. -// */ -// -//YAML_DECLARE(int) -//yaml_document_initialize(document *yaml_document_t, -// version_directive *yaml_version_directive_t, -// tag_directives_start *yaml_tag_directive_t, -// tag_directives_end *yaml_tag_directive_t, -// start_implicit int, end_implicit int) -//{ -// struct { -// error yaml_error_type_t -// } context -// struct { -// start *yaml_node_t -// end *yaml_node_t -// top *yaml_node_t -// } nodes = { NULL, NULL, NULL } -// version_directive_copy *yaml_version_directive_t = NULL -// struct { -// start *yaml_tag_directive_t -// end *yaml_tag_directive_t -// top *yaml_tag_directive_t -// } tag_directives_copy = { NULL, NULL, NULL } -// value yaml_tag_directive_t = { NULL, NULL } -// mark yaml_mark_t = { 0, 0, 0 } -// -// assert(document) // Non-NULL document object is expected. -// assert((tag_directives_start && tag_directives_end) || -// (tag_directives_start == tag_directives_end)) -// // Valid tag directives are expected. -// -// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error -// -// if (version_directive) { -// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) -// if (!version_directive_copy) goto error -// version_directive_copy.major = version_directive.major -// version_directive_copy.minor = version_directive.minor -// } -// -// if (tag_directives_start != tag_directives_end) { -// tag_directive *yaml_tag_directive_t -// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) -// goto error -// for (tag_directive = tag_directives_start -// tag_directive != tag_directives_end; tag_directive ++) { -// assert(tag_directive.handle) -// assert(tag_directive.prefix) -// if (!yaml_check_utf8(tag_directive.handle, -// strlen((char *)tag_directive.handle))) -// goto error -// if (!yaml_check_utf8(tag_directive.prefix, -// strlen((char *)tag_directive.prefix))) -// goto error -// value.handle = yaml_strdup(tag_directive.handle) -// value.prefix = yaml_strdup(tag_directive.prefix) -// if (!value.handle || !value.prefix) goto error -// if (!PUSH(&context, tag_directives_copy, value)) -// goto error -// value.handle = NULL -// value.prefix = NULL -// } -// } -// -// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, -// tag_directives_copy.start, tag_directives_copy.top, -// start_implicit, end_implicit, mark, mark) -// -// return 1 -// -//error: -// STACK_DEL(&context, nodes) -// yaml_free(version_directive_copy) -// while (!STACK_EMPTY(&context, tag_directives_copy)) { -// value yaml_tag_directive_t = POP(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// } -// STACK_DEL(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// -// return 0 -//} -// -///* -// * Destroy a document object. -// */ -// -//YAML_DECLARE(void) -//yaml_document_delete(document *yaml_document_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// tag_directive *yaml_tag_directive_t -// -// context.error = YAML_NO_ERROR // Eliminate a compiler warning. -// -// assert(document) // Non-NULL document object is expected. -// -// while (!STACK_EMPTY(&context, document.nodes)) { -// node yaml_node_t = POP(&context, document.nodes) -// yaml_free(node.tag) -// switch (node.type) { -// case YAML_SCALAR_NODE: -// yaml_free(node.data.scalar.value) -// break -// case YAML_SEQUENCE_NODE: -// STACK_DEL(&context, node.data.sequence.items) -// break -// case YAML_MAPPING_NODE: -// STACK_DEL(&context, node.data.mapping.pairs) -// break -// default: -// assert(0) // Should not happen. -// } -// } -// STACK_DEL(&context, document.nodes) -// -// yaml_free(document.version_directive) -// for (tag_directive = document.tag_directives.start -// tag_directive != document.tag_directives.end -// tag_directive++) { -// yaml_free(tag_directive.handle) -// yaml_free(tag_directive.prefix) -// } -// yaml_free(document.tag_directives.start) -// -// memset(document, 0, sizeof(yaml_document_t)) -//} -// -///** -// * Get a document node. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_node(document *yaml_document_t, index int) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (index > 0 && document.nodes.start + index <= document.nodes.top) { -// return document.nodes.start + index - 1 -// } -// return NULL -//} -// -///** -// * Get the root object. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_root_node(document *yaml_document_t) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (document.nodes.top != document.nodes.start) { -// return document.nodes.start -// } -// return NULL -//} -// -///* -// * Add a scalar node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_scalar(document *yaml_document_t, -// tag *yaml_char_t, value *yaml_char_t, length int, -// style yaml_scalar_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// value_copy *yaml_char_t = NULL -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// assert(value) // Non-NULL value is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (length < 0) { -// length = strlen((char *)value) -// } -// -// if (!yaml_check_utf8(value, length)) goto error -// value_copy = yaml_malloc(length+1) -// if (!value_copy) goto error -// memcpy(value_copy, value, length) -// value_copy[length] = '\0' -// -// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// yaml_free(tag_copy) -// yaml_free(value_copy) -// -// return 0 -//} -// -///* -// * Add a sequence node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_sequence(document *yaml_document_t, -// tag *yaml_char_t, style yaml_sequence_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_item_t -// end *yaml_node_item_t -// top *yaml_node_item_t -// } items = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error -// -// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, items) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Add a mapping node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_mapping(document *yaml_document_t, -// tag *yaml_char_t, style yaml_mapping_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_pair_t -// end *yaml_node_pair_t -// top *yaml_node_pair_t -// } pairs = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error -// -// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, pairs) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Append an item to a sequence node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_sequence_item(document *yaml_document_t, -// sequence int, item int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// assert(document) // Non-NULL document is required. -// assert(sequence > 0 -// && document.nodes.start + sequence <= document.nodes.top) -// // Valid sequence id is required. -// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) -// // A sequence node is required. -// assert(item > 0 && document.nodes.start + item <= document.nodes.top) -// // Valid item id is required. -// -// if (!PUSH(&context, -// document.nodes.start[sequence-1].data.sequence.items, item)) -// return 0 -// -// return 1 -//} -// -///* -// * Append a pair of a key and a value to a mapping node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_mapping_pair(document *yaml_document_t, -// mapping int, key int, value int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// pair yaml_node_pair_t -// -// assert(document) // Non-NULL document is required. -// assert(mapping > 0 -// && document.nodes.start + mapping <= document.nodes.top) -// // Valid mapping id is required. -// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) -// // A mapping node is required. -// assert(key > 0 && document.nodes.start + key <= document.nodes.top) -// // Valid key id is required. -// assert(value > 0 && document.nodes.start + value <= document.nodes.top) -// // Valid value id is required. -// -// pair.key = key -// pair.value = value -// -// if (!PUSH(&context, -// document.nodes.start[mapping-1].data.mapping.pairs, pair)) -// return 0 -// -// return 1 -//} -// -// diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go deleted file mode 100644 index 129bc2a..0000000 --- a/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go +++ /dev/null @@ -1,815 +0,0 @@ -package yaml - -import ( - "encoding" - "encoding/base64" - "fmt" - "io" - "math" - "reflect" - "strconv" - "time" -) - -const ( - documentNode = 1 << iota - mappingNode - sequenceNode - scalarNode - aliasNode -) - -type node struct { - kind int - line, column int - tag string - // For an alias node, alias holds the resolved alias. - alias *node - value string - implicit bool - children []*node - anchors map[string]*node -} - -// ---------------------------------------------------------------------------- -// Parser, produces a node tree out of a libyaml event stream. - -type parser struct { - parser yaml_parser_t - event yaml_event_t - doc *node - doneInit bool -} - -func newParser(b []byte) *parser { - p := parser{} - if !yaml_parser_initialize(&p.parser) { - panic("failed to initialize YAML emitter") - } - if len(b) == 0 { - b = []byte{'\n'} - } - yaml_parser_set_input_string(&p.parser, b) - return &p -} - -func newParserFromReader(r io.Reader) *parser { - p := parser{} - if !yaml_parser_initialize(&p.parser) { - panic("failed to initialize YAML emitter") - } - yaml_parser_set_input_reader(&p.parser, r) - return &p -} - -func (p *parser) init() { - if p.doneInit { - return - } - p.expect(yaml_STREAM_START_EVENT) - p.doneInit = true -} - -func (p *parser) destroy() { - if p.event.typ != yaml_NO_EVENT { - yaml_event_delete(&p.event) - } - yaml_parser_delete(&p.parser) -} - -// expect consumes an event from the event stream and -// checks that it's of the expected type. -func (p *parser) expect(e yaml_event_type_t) { - if p.event.typ == yaml_NO_EVENT { - if !yaml_parser_parse(&p.parser, &p.event) { - p.fail() - } - } - if p.event.typ == yaml_STREAM_END_EVENT { - failf("attempted to go past the end of stream; corrupted value?") - } - if p.event.typ != e { - p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) - p.fail() - } - yaml_event_delete(&p.event) - p.event.typ = yaml_NO_EVENT -} - -// peek peeks at the next event in the event stream, -// puts the results into p.event and returns the event type. -func (p *parser) peek() yaml_event_type_t { - if p.event.typ != yaml_NO_EVENT { - return p.event.typ - } - if !yaml_parser_parse(&p.parser, &p.event) { - p.fail() - } - return p.event.typ -} - -func (p *parser) fail() { - var where string - var line int - if p.parser.problem_mark.line != 0 { - line = p.parser.problem_mark.line - // Scanner errors don't iterate line before returning error - if p.parser.error == yaml_SCANNER_ERROR { - line++ - } - } else if p.parser.context_mark.line != 0 { - line = p.parser.context_mark.line - } - if line != 0 { - where = "line " + strconv.Itoa(line) + ": " - } - var msg string - if len(p.parser.problem) > 0 { - msg = p.parser.problem - } else { - msg = "unknown problem parsing YAML content" - } - failf("%s%s", where, msg) -} - -func (p *parser) anchor(n *node, anchor []byte) { - if anchor != nil { - p.doc.anchors[string(anchor)] = n - } -} - -func (p *parser) parse() *node { - p.init() - switch p.peek() { - case yaml_SCALAR_EVENT: - return p.scalar() - case yaml_ALIAS_EVENT: - return p.alias() - case yaml_MAPPING_START_EVENT: - return p.mapping() - case yaml_SEQUENCE_START_EVENT: - return p.sequence() - case yaml_DOCUMENT_START_EVENT: - return p.document() - case yaml_STREAM_END_EVENT: - // Happens when attempting to decode an empty buffer. - return nil - default: - panic("attempted to parse unknown event: " + p.event.typ.String()) - } -} - -func (p *parser) node(kind int) *node { - return &node{ - kind: kind, - line: p.event.start_mark.line, - column: p.event.start_mark.column, - } -} - -func (p *parser) document() *node { - n := p.node(documentNode) - n.anchors = make(map[string]*node) - p.doc = n - p.expect(yaml_DOCUMENT_START_EVENT) - n.children = append(n.children, p.parse()) - p.expect(yaml_DOCUMENT_END_EVENT) - return n -} - -func (p *parser) alias() *node { - n := p.node(aliasNode) - n.value = string(p.event.anchor) - n.alias = p.doc.anchors[n.value] - if n.alias == nil { - failf("unknown anchor '%s' referenced", n.value) - } - p.expect(yaml_ALIAS_EVENT) - return n -} - -func (p *parser) scalar() *node { - n := p.node(scalarNode) - n.value = string(p.event.value) - n.tag = string(p.event.tag) - n.implicit = p.event.implicit - p.anchor(n, p.event.anchor) - p.expect(yaml_SCALAR_EVENT) - return n -} - -func (p *parser) sequence() *node { - n := p.node(sequenceNode) - p.anchor(n, p.event.anchor) - p.expect(yaml_SEQUENCE_START_EVENT) - for p.peek() != yaml_SEQUENCE_END_EVENT { - n.children = append(n.children, p.parse()) - } - p.expect(yaml_SEQUENCE_END_EVENT) - return n -} - -func (p *parser) mapping() *node { - n := p.node(mappingNode) - p.anchor(n, p.event.anchor) - p.expect(yaml_MAPPING_START_EVENT) - for p.peek() != yaml_MAPPING_END_EVENT { - n.children = append(n.children, p.parse(), p.parse()) - } - p.expect(yaml_MAPPING_END_EVENT) - return n -} - -// ---------------------------------------------------------------------------- -// Decoder, unmarshals a node into a provided value. - -type decoder struct { - doc *node - aliases map[*node]bool - mapType reflect.Type - terrors []string - strict bool - - decodeCount int - aliasCount int - aliasDepth int -} - -var ( - mapItemType = reflect.TypeOf(MapItem{}) - durationType = reflect.TypeOf(time.Duration(0)) - defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) - ifaceType = defaultMapType.Elem() - timeType = reflect.TypeOf(time.Time{}) - ptrTimeType = reflect.TypeOf(&time.Time{}) -) - -func newDecoder(strict bool) *decoder { - d := &decoder{mapType: defaultMapType, strict: strict} - d.aliases = make(map[*node]bool) - return d -} - -func (d *decoder) terror(n *node, tag string, out reflect.Value) { - if n.tag != "" { - tag = n.tag - } - value := n.value - if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { - if len(value) > 10 { - value = " `" + value[:7] + "...`" - } else { - value = " `" + value + "`" - } - } - d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) -} - -func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { - terrlen := len(d.terrors) - err := u.UnmarshalYAML(func(v interface{}) (err error) { - defer handleErr(&err) - d.unmarshal(n, reflect.ValueOf(v)) - if len(d.terrors) > terrlen { - issues := d.terrors[terrlen:] - d.terrors = d.terrors[:terrlen] - return &TypeError{issues} - } - return nil - }) - if e, ok := err.(*TypeError); ok { - d.terrors = append(d.terrors, e.Errors...) - return false - } - if err != nil { - fail(err) - } - return true -} - -// d.prepare initializes and dereferences pointers and calls UnmarshalYAML -// if a value is found to implement it. -// It returns the initialized and dereferenced out value, whether -// unmarshalling was already done by UnmarshalYAML, and if so whether -// its types unmarshalled appropriately. -// -// If n holds a null value, prepare returns before doing anything. -func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { - if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { - return out, false, false - } - again := true - for again { - again = false - if out.Kind() == reflect.Ptr { - if out.IsNil() { - out.Set(reflect.New(out.Type().Elem())) - } - out = out.Elem() - again = true - } - if out.CanAddr() { - if u, ok := out.Addr().Interface().(Unmarshaler); ok { - good = d.callUnmarshaler(n, u) - return out, true, good - } - } - } - return out, false, false -} - -const ( - // 400,000 decode operations is ~500kb of dense object declarations, or - // ~5kb of dense object declarations with 10000% alias expansion - alias_ratio_range_low = 400000 - - // 4,000,000 decode operations is ~5MB of dense object declarations, or - // ~4.5MB of dense object declarations with 10% alias expansion - alias_ratio_range_high = 4000000 - - // alias_ratio_range is the range over which we scale allowed alias ratios - alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) -) - -func allowedAliasRatio(decodeCount int) float64 { - switch { - case decodeCount <= alias_ratio_range_low: - // allow 99% to come from alias expansion for small-to-medium documents - return 0.99 - case decodeCount >= alias_ratio_range_high: - // allow 10% to come from alias expansion for very large documents - return 0.10 - default: - // scale smoothly from 99% down to 10% over the range. - // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. - // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). - return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) - } -} - -func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { - d.decodeCount++ - if d.aliasDepth > 0 { - d.aliasCount++ - } - if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { - failf("document contains excessive aliasing") - } - switch n.kind { - case documentNode: - return d.document(n, out) - case aliasNode: - return d.alias(n, out) - } - out, unmarshaled, good := d.prepare(n, out) - if unmarshaled { - return good - } - switch n.kind { - case scalarNode: - good = d.scalar(n, out) - case mappingNode: - good = d.mapping(n, out) - case sequenceNode: - good = d.sequence(n, out) - default: - panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) - } - return good -} - -func (d *decoder) document(n *node, out reflect.Value) (good bool) { - if len(n.children) == 1 { - d.doc = n - d.unmarshal(n.children[0], out) - return true - } - return false -} - -func (d *decoder) alias(n *node, out reflect.Value) (good bool) { - if d.aliases[n] { - // TODO this could actually be allowed in some circumstances. - failf("anchor '%s' value contains itself", n.value) - } - d.aliases[n] = true - d.aliasDepth++ - good = d.unmarshal(n.alias, out) - d.aliasDepth-- - delete(d.aliases, n) - return good -} - -var zeroValue reflect.Value - -func resetMap(out reflect.Value) { - for _, k := range out.MapKeys() { - out.SetMapIndex(k, zeroValue) - } -} - -func (d *decoder) scalar(n *node, out reflect.Value) bool { - var tag string - var resolved interface{} - if n.tag == "" && !n.implicit { - tag = yaml_STR_TAG - resolved = n.value - } else { - tag, resolved = resolve(n.tag, n.value) - if tag == yaml_BINARY_TAG { - data, err := base64.StdEncoding.DecodeString(resolved.(string)) - if err != nil { - failf("!!binary value contains invalid base64 data") - } - resolved = string(data) - } - } - if resolved == nil { - if out.Kind() == reflect.Map && !out.CanAddr() { - resetMap(out) - } else { - out.Set(reflect.Zero(out.Type())) - } - return true - } - if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { - // We've resolved to exactly the type we want, so use that. - out.Set(resolvedv) - return true - } - // Perhaps we can use the value as a TextUnmarshaler to - // set its value. - if out.CanAddr() { - u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) - if ok { - var text []byte - if tag == yaml_BINARY_TAG { - text = []byte(resolved.(string)) - } else { - // We let any value be unmarshaled into TextUnmarshaler. - // That might be more lax than we'd like, but the - // TextUnmarshaler itself should bowl out any dubious values. - text = []byte(n.value) - } - err := u.UnmarshalText(text) - if err != nil { - fail(err) - } - return true - } - } - switch out.Kind() { - case reflect.String: - if tag == yaml_BINARY_TAG { - out.SetString(resolved.(string)) - return true - } - if resolved != nil { - out.SetString(n.value) - return true - } - case reflect.Interface: - if resolved == nil { - out.Set(reflect.Zero(out.Type())) - } else if tag == yaml_TIMESTAMP_TAG { - // It looks like a timestamp but for backward compatibility - // reasons we set it as a string, so that code that unmarshals - // timestamp-like values into interface{} will continue to - // see a string and not a time.Time. - // TODO(v3) Drop this. - out.Set(reflect.ValueOf(n.value)) - } else { - out.Set(reflect.ValueOf(resolved)) - } - return true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - switch resolved := resolved.(type) { - case int: - if !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case int64: - if !out.OverflowInt(resolved) { - out.SetInt(resolved) - return true - } - case uint64: - if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case float64: - if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case string: - if out.Type() == durationType { - d, err := time.ParseDuration(resolved) - if err == nil { - out.SetInt(int64(d)) - return true - } - } - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - switch resolved := resolved.(type) { - case int: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case int64: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case uint64: - if !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case float64: - if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - } - case reflect.Bool: - switch resolved := resolved.(type) { - case bool: - out.SetBool(resolved) - return true - } - case reflect.Float32, reflect.Float64: - switch resolved := resolved.(type) { - case int: - out.SetFloat(float64(resolved)) - return true - case int64: - out.SetFloat(float64(resolved)) - return true - case uint64: - out.SetFloat(float64(resolved)) - return true - case float64: - out.SetFloat(resolved) - return true - } - case reflect.Struct: - if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { - out.Set(resolvedv) - return true - } - case reflect.Ptr: - if out.Type().Elem() == reflect.TypeOf(resolved) { - // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? - elem := reflect.New(out.Type().Elem()) - elem.Elem().Set(reflect.ValueOf(resolved)) - out.Set(elem) - return true - } - } - d.terror(n, tag, out) - return false -} - -func settableValueOf(i interface{}) reflect.Value { - v := reflect.ValueOf(i) - sv := reflect.New(v.Type()).Elem() - sv.Set(v) - return sv -} - -func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { - l := len(n.children) - - var iface reflect.Value - switch out.Kind() { - case reflect.Slice: - out.Set(reflect.MakeSlice(out.Type(), l, l)) - case reflect.Array: - if l != out.Len() { - failf("invalid array: want %d elements but got %d", out.Len(), l) - } - case reflect.Interface: - // No type hints. Will have to use a generic sequence. - iface = out - out = settableValueOf(make([]interface{}, l)) - default: - d.terror(n, yaml_SEQ_TAG, out) - return false - } - et := out.Type().Elem() - - j := 0 - for i := 0; i < l; i++ { - e := reflect.New(et).Elem() - if ok := d.unmarshal(n.children[i], e); ok { - out.Index(j).Set(e) - j++ - } - } - if out.Kind() != reflect.Array { - out.Set(out.Slice(0, j)) - } - if iface.IsValid() { - iface.Set(out) - } - return true -} - -func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { - switch out.Kind() { - case reflect.Struct: - return d.mappingStruct(n, out) - case reflect.Slice: - return d.mappingSlice(n, out) - case reflect.Map: - // okay - case reflect.Interface: - if d.mapType.Kind() == reflect.Map { - iface := out - out = reflect.MakeMap(d.mapType) - iface.Set(out) - } else { - slicev := reflect.New(d.mapType).Elem() - if !d.mappingSlice(n, slicev) { - return false - } - out.Set(slicev) - return true - } - default: - d.terror(n, yaml_MAP_TAG, out) - return false - } - outt := out.Type() - kt := outt.Key() - et := outt.Elem() - - mapType := d.mapType - if outt.Key() == ifaceType && outt.Elem() == ifaceType { - d.mapType = outt - } - - if out.IsNil() { - out.Set(reflect.MakeMap(outt)) - } - l := len(n.children) - for i := 0; i < l; i += 2 { - if isMerge(n.children[i]) { - d.merge(n.children[i+1], out) - continue - } - k := reflect.New(kt).Elem() - if d.unmarshal(n.children[i], k) { - kkind := k.Kind() - if kkind == reflect.Interface { - kkind = k.Elem().Kind() - } - if kkind == reflect.Map || kkind == reflect.Slice { - failf("invalid map key: %#v", k.Interface()) - } - e := reflect.New(et).Elem() - if d.unmarshal(n.children[i+1], e) { - d.setMapIndex(n.children[i+1], out, k, e) - } - } - } - d.mapType = mapType - return true -} - -func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { - if d.strict && out.MapIndex(k) != zeroValue { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) - return - } - out.SetMapIndex(k, v) -} - -func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { - outt := out.Type() - if outt.Elem() != mapItemType { - d.terror(n, yaml_MAP_TAG, out) - return false - } - - mapType := d.mapType - d.mapType = outt - - var slice []MapItem - var l = len(n.children) - for i := 0; i < l; i += 2 { - if isMerge(n.children[i]) { - d.merge(n.children[i+1], out) - continue - } - item := MapItem{} - k := reflect.ValueOf(&item.Key).Elem() - if d.unmarshal(n.children[i], k) { - v := reflect.ValueOf(&item.Value).Elem() - if d.unmarshal(n.children[i+1], v) { - slice = append(slice, item) - } - } - } - out.Set(reflect.ValueOf(slice)) - d.mapType = mapType - return true -} - -func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { - sinfo, err := getStructInfo(out.Type()) - if err != nil { - panic(err) - } - name := settableValueOf("") - l := len(n.children) - - var inlineMap reflect.Value - var elemType reflect.Type - if sinfo.InlineMap != -1 { - inlineMap = out.Field(sinfo.InlineMap) - inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) - elemType = inlineMap.Type().Elem() - } - - var doneFields []bool - if d.strict { - doneFields = make([]bool, len(sinfo.FieldsList)) - } - for i := 0; i < l; i += 2 { - ni := n.children[i] - if isMerge(ni) { - d.merge(n.children[i+1], out) - continue - } - if !d.unmarshal(ni, name) { - continue - } - if info, ok := sinfo.FieldsMap[name.String()]; ok { - if d.strict { - if doneFields[info.Id] { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) - continue - } - doneFields[info.Id] = true - } - var field reflect.Value - if info.Inline == nil { - field = out.Field(info.Num) - } else { - field = out.FieldByIndex(info.Inline) - } - d.unmarshal(n.children[i+1], field) - } else if sinfo.InlineMap != -1 { - if inlineMap.IsNil() { - inlineMap.Set(reflect.MakeMap(inlineMap.Type())) - } - value := reflect.New(elemType).Elem() - d.unmarshal(n.children[i+1], value) - d.setMapIndex(n.children[i+1], inlineMap, name, value) - } else if d.strict { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) - } - } - return true -} - -func failWantMap() { - failf("map merge requires map or sequence of maps as the value") -} - -func (d *decoder) merge(n *node, out reflect.Value) { - switch n.kind { - case mappingNode: - d.unmarshal(n, out) - case aliasNode: - if n.alias != nil && n.alias.kind != mappingNode { - failWantMap() - } - d.unmarshal(n, out) - case sequenceNode: - // Step backwards as earlier nodes take precedence. - for i := len(n.children) - 1; i >= 0; i-- { - ni := n.children[i] - if ni.kind == aliasNode { - if ni.alias != nil && ni.alias.kind != mappingNode { - failWantMap() - } - } else if ni.kind != mappingNode { - failWantMap() - } - d.unmarshal(ni, out) - } - default: - failWantMap() - } -} - -func isMerge(n *node) bool { - return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) -} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go deleted file mode 100644 index a1c2cc5..0000000 --- a/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go +++ /dev/null @@ -1,1685 +0,0 @@ -package yaml - -import ( - "bytes" - "fmt" -) - -// Flush the buffer if needed. -func flush(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) { - return yaml_emitter_flush(emitter) - } - return true -} - -// Put a character to the output buffer. -func put(emitter *yaml_emitter_t, value byte) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - emitter.buffer[emitter.buffer_pos] = value - emitter.buffer_pos++ - emitter.column++ - return true -} - -// Put a line break to the output buffer. -func put_break(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - switch emitter.line_break { - case yaml_CR_BREAK: - emitter.buffer[emitter.buffer_pos] = '\r' - emitter.buffer_pos += 1 - case yaml_LN_BREAK: - emitter.buffer[emitter.buffer_pos] = '\n' - emitter.buffer_pos += 1 - case yaml_CRLN_BREAK: - emitter.buffer[emitter.buffer_pos+0] = '\r' - emitter.buffer[emitter.buffer_pos+1] = '\n' - emitter.buffer_pos += 2 - default: - panic("unknown line break setting") - } - emitter.column = 0 - emitter.line++ - return true -} - -// Copy a character from a string into buffer. -func write(emitter *yaml_emitter_t, s []byte, i *int) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - p := emitter.buffer_pos - w := width(s[*i]) - switch w { - case 4: - emitter.buffer[p+3] = s[*i+3] - fallthrough - case 3: - emitter.buffer[p+2] = s[*i+2] - fallthrough - case 2: - emitter.buffer[p+1] = s[*i+1] - fallthrough - case 1: - emitter.buffer[p+0] = s[*i+0] - default: - panic("unknown character width") - } - emitter.column++ - emitter.buffer_pos += w - *i += w - return true -} - -// Write a whole string into buffer. -func write_all(emitter *yaml_emitter_t, s []byte) bool { - for i := 0; i < len(s); { - if !write(emitter, s, &i) { - return false - } - } - return true -} - -// Copy a line break character from a string into buffer. -func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { - if s[*i] == '\n' { - if !put_break(emitter) { - return false - } - *i++ - } else { - if !write(emitter, s, i) { - return false - } - emitter.column = 0 - emitter.line++ - } - return true -} - -// Set an emitter error and return false. -func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_EMITTER_ERROR - emitter.problem = problem - return false -} - -// Emit an event. -func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.events = append(emitter.events, *event) - for !yaml_emitter_need_more_events(emitter) { - event := &emitter.events[emitter.events_head] - if !yaml_emitter_analyze_event(emitter, event) { - return false - } - if !yaml_emitter_state_machine(emitter, event) { - return false - } - yaml_event_delete(event) - emitter.events_head++ - } - return true -} - -// Check if we need to accumulate more events before emitting. -// -// We accumulate extra -// - 1 event for DOCUMENT-START -// - 2 events for SEQUENCE-START -// - 3 events for MAPPING-START -// -func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { - if emitter.events_head == len(emitter.events) { - return true - } - var accumulate int - switch emitter.events[emitter.events_head].typ { - case yaml_DOCUMENT_START_EVENT: - accumulate = 1 - break - case yaml_SEQUENCE_START_EVENT: - accumulate = 2 - break - case yaml_MAPPING_START_EVENT: - accumulate = 3 - break - default: - return false - } - if len(emitter.events)-emitter.events_head > accumulate { - return false - } - var level int - for i := emitter.events_head; i < len(emitter.events); i++ { - switch emitter.events[i].typ { - case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: - level++ - case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: - level-- - } - if level == 0 { - return false - } - } - return true -} - -// Append a directive to the directives stack. -func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { - for i := 0; i < len(emitter.tag_directives); i++ { - if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") - } - } - - // [Go] Do we actually need to copy this given garbage collection - // and the lack of deallocating destructors? - tag_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(tag_copy.handle, value.handle) - copy(tag_copy.prefix, value.prefix) - emitter.tag_directives = append(emitter.tag_directives, tag_copy) - return true -} - -// Increase the indentation level. -func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { - emitter.indents = append(emitter.indents, emitter.indent) - if emitter.indent < 0 { - if flow { - emitter.indent = emitter.best_indent - } else { - emitter.indent = 0 - } - } else if !indentless { - emitter.indent += emitter.best_indent - } - return true -} - -// State dispatcher. -func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { - switch emitter.state { - default: - case yaml_EMIT_STREAM_START_STATE: - return yaml_emitter_emit_stream_start(emitter, event) - - case yaml_EMIT_FIRST_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, true) - - case yaml_EMIT_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, false) - - case yaml_EMIT_DOCUMENT_CONTENT_STATE: - return yaml_emitter_emit_document_content(emitter, event) - - case yaml_EMIT_DOCUMENT_END_STATE: - return yaml_emitter_emit_document_end(emitter, event) - - case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, true) - - case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, false) - - case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, true) - - case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, false) - - case yaml_EMIT_END_STATE: - return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") - } - panic("invalid emitter state") -} - -// Expect STREAM-START. -func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_STREAM_START_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") - } - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = event.encoding - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = yaml_UTF8_ENCODING - } - } - if emitter.best_indent < 2 || emitter.best_indent > 9 { - emitter.best_indent = 2 - } - if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { - emitter.best_width = 80 - } - if emitter.best_width < 0 { - emitter.best_width = 1<<31 - 1 - } - if emitter.line_break == yaml_ANY_BREAK { - emitter.line_break = yaml_LN_BREAK - } - - emitter.indent = -1 - emitter.line = 0 - emitter.column = 0 - emitter.whitespace = true - emitter.indention = true - - if emitter.encoding != yaml_UTF8_ENCODING { - if !yaml_emitter_write_bom(emitter) { - return false - } - } - emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE - return true -} - -// Expect DOCUMENT-START or STREAM-END. -func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - - if event.typ == yaml_DOCUMENT_START_EVENT { - - if event.version_directive != nil { - if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { - return false - } - } - - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { - return false - } - if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { - return false - } - } - - for i := 0; i < len(default_tag_directives); i++ { - tag_directive := &default_tag_directives[i] - if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { - return false - } - } - - implicit := event.implicit - if !first || emitter.canonical { - implicit = false - } - - if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if event.version_directive != nil { - implicit = false - if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if len(event.tag_directives) > 0 { - implicit = false - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { - return false - } - if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - if yaml_emitter_check_empty_document(emitter) { - implicit = false - } - if !implicit { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { - return false - } - if emitter.canonical { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE - return true - } - - if event.typ == yaml_STREAM_END_EVENT { - if emitter.open_ended { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_END_STATE - return true - } - - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") -} - -// Expect the root node. -func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) - return yaml_emitter_emit_node(emitter, event, true, false, false, false) -} - -// Expect DOCUMENT-END. -func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_DOCUMENT_END_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !event.implicit { - // [Go] Allocate the slice elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_DOCUMENT_START_STATE - emitter.tag_directives = emitter.tag_directives[:0] - return true -} - -// Expect a flow item node. -func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a flow key node. -func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_MAPPING_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a flow value node. -func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block item node. -func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { - return false - } - } - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a block key node. -func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, false) { - return false - } - } - if event.typ == yaml_MAPPING_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block value node. -func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a node. -func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, - root bool, sequence bool, mapping bool, simple_key bool) bool { - - emitter.root_context = root - emitter.sequence_context = sequence - emitter.mapping_context = mapping - emitter.simple_key_context = simple_key - - switch event.typ { - case yaml_ALIAS_EVENT: - return yaml_emitter_emit_alias(emitter, event) - case yaml_SCALAR_EVENT: - return yaml_emitter_emit_scalar(emitter, event) - case yaml_SEQUENCE_START_EVENT: - return yaml_emitter_emit_sequence_start(emitter, event) - case yaml_MAPPING_START_EVENT: - return yaml_emitter_emit_mapping_start(emitter, event) - default: - return yaml_emitter_set_emitter_error(emitter, - fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) - } -} - -// Expect ALIAS. -func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SCALAR. -func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_select_scalar_style(emitter, event) { - return false - } - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - if !yaml_emitter_process_scalar(emitter) { - return false - } - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SEQUENCE-START. -func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || - yaml_emitter_check_empty_sequence(emitter) { - emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE - } - return true -} - -// Expect MAPPING-START. -func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || - yaml_emitter_check_empty_mapping(emitter) { - emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE - } - return true -} - -// Check if the document content is an empty scalar. -func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { - return false // [Go] Huh? -} - -// Check if the next events represent an empty sequence. -func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT -} - -// Check if the next events represent an empty mapping. -func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT -} - -// Check if the next node can be expressed as a simple key. -func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { - length := 0 - switch emitter.events[emitter.events_head].typ { - case yaml_ALIAS_EVENT: - length += len(emitter.anchor_data.anchor) - case yaml_SCALAR_EVENT: - if emitter.scalar_data.multiline { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) + - len(emitter.scalar_data.value) - case yaml_SEQUENCE_START_EVENT: - if !yaml_emitter_check_empty_sequence(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - case yaml_MAPPING_START_EVENT: - if !yaml_emitter_check_empty_mapping(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - default: - return false - } - return length <= 128 -} - -// Determine an acceptable scalar style. -func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 - if no_tag && !event.implicit && !event.quoted_implicit { - return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") - } - - style := event.scalar_style() - if style == yaml_ANY_SCALAR_STYLE { - style = yaml_PLAIN_SCALAR_STYLE - } - if emitter.canonical { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - if emitter.simple_key_context && emitter.scalar_data.multiline { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - if style == yaml_PLAIN_SCALAR_STYLE { - if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || - emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if no_tag && !event.implicit { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { - if !emitter.scalar_data.single_quoted_allowed { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { - if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - - if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { - emitter.tag_data.handle = []byte{'!'} - } - emitter.scalar_data.style = style - return true -} - -// Write an anchor. -func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { - if emitter.anchor_data.anchor == nil { - return true - } - c := []byte{'&'} - if emitter.anchor_data.alias { - c[0] = '*' - } - if !yaml_emitter_write_indicator(emitter, c, true, false, false) { - return false - } - return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) -} - -// Write a tag. -func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { - if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { - return true - } - if len(emitter.tag_data.handle) > 0 { - if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { - return false - } - if len(emitter.tag_data.suffix) > 0 { - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - } - } else { - // [Go] Allocate these slices elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { - return false - } - } - return true -} - -// Write a scalar. -func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { - switch emitter.scalar_data.style { - case yaml_PLAIN_SCALAR_STYLE: - return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_SINGLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_DOUBLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_LITERAL_SCALAR_STYLE: - return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) - - case yaml_FOLDED_SCALAR_STYLE: - return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) - } - panic("unknown scalar style") -} - -// Check if a %YAML directive is valid. -func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { - if version_directive.major != 1 || version_directive.minor != 1 { - return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") - } - return true -} - -// Check if a %TAG directive is valid. -func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { - handle := tag_directive.handle - prefix := tag_directive.prefix - if len(handle) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") - } - if handle[0] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") - } - if handle[len(handle)-1] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") - } - for i := 1; i < len(handle)-1; i += width(handle[i]) { - if !is_alpha(handle, i) { - return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") - } - } - if len(prefix) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") - } - return true -} - -// Check if an anchor is valid. -func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { - if len(anchor) == 0 { - problem := "anchor value must not be empty" - if alias { - problem = "alias value must not be empty" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - for i := 0; i < len(anchor); i += width(anchor[i]) { - if !is_alpha(anchor, i) { - problem := "anchor value must contain alphanumerical characters only" - if alias { - problem = "alias value must contain alphanumerical characters only" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - } - emitter.anchor_data.anchor = anchor - emitter.anchor_data.alias = alias - return true -} - -// Check if a tag is valid. -func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { - if len(tag) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") - } - for i := 0; i < len(emitter.tag_directives); i++ { - tag_directive := &emitter.tag_directives[i] - if bytes.HasPrefix(tag, tag_directive.prefix) { - emitter.tag_data.handle = tag_directive.handle - emitter.tag_data.suffix = tag[len(tag_directive.prefix):] - return true - } - } - emitter.tag_data.suffix = tag - return true -} - -// Check if a scalar is valid. -func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { - var ( - block_indicators = false - flow_indicators = false - line_breaks = false - special_characters = false - - leading_space = false - leading_break = false - trailing_space = false - trailing_break = false - break_space = false - space_break = false - - preceded_by_whitespace = false - followed_by_whitespace = false - previous_space = false - previous_break = false - ) - - emitter.scalar_data.value = value - - if len(value) == 0 { - emitter.scalar_data.multiline = false - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = false - return true - } - - if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { - block_indicators = true - flow_indicators = true - } - - preceded_by_whitespace = true - for i, w := 0, 0; i < len(value); i += w { - w = width(value[i]) - followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) - - if i == 0 { - switch value[i] { - case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': - flow_indicators = true - block_indicators = true - case '?', ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '-': - if followed_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } else { - switch value[i] { - case ',', '?', '[', ']', '{', '}': - flow_indicators = true - case ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '#': - if preceded_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } - - if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { - special_characters = true - } - if is_space(value, i) { - if i == 0 { - leading_space = true - } - if i+width(value[i]) == len(value) { - trailing_space = true - } - if previous_break { - break_space = true - } - previous_space = true - previous_break = false - } else if is_break(value, i) { - line_breaks = true - if i == 0 { - leading_break = true - } - if i+width(value[i]) == len(value) { - trailing_break = true - } - if previous_space { - space_break = true - } - previous_space = false - previous_break = true - } else { - previous_space = false - previous_break = false - } - - // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. - preceded_by_whitespace = is_blankz(value, i) - } - - emitter.scalar_data.multiline = line_breaks - emitter.scalar_data.flow_plain_allowed = true - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = true - - if leading_space || leading_break || trailing_space || trailing_break { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if trailing_space { - emitter.scalar_data.block_allowed = false - } - if break_space { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - } - if space_break || special_characters { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - emitter.scalar_data.block_allowed = false - } - if line_breaks { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if flow_indicators { - emitter.scalar_data.flow_plain_allowed = false - } - if block_indicators { - emitter.scalar_data.block_plain_allowed = false - } - return true -} - -// Check if the event data is valid. -func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - emitter.anchor_data.anchor = nil - emitter.tag_data.handle = nil - emitter.tag_data.suffix = nil - emitter.scalar_data.value = nil - - switch event.typ { - case yaml_ALIAS_EVENT: - if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { - return false - } - - case yaml_SCALAR_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - if !yaml_emitter_analyze_scalar(emitter, event.value) { - return false - } - - case yaml_SEQUENCE_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - - case yaml_MAPPING_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - } - return true -} - -// Write the BOM character. -func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { - if !flush(emitter) { - return false - } - pos := emitter.buffer_pos - emitter.buffer[pos+0] = '\xEF' - emitter.buffer[pos+1] = '\xBB' - emitter.buffer[pos+2] = '\xBF' - emitter.buffer_pos += 3 - return true -} - -func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { - indent := emitter.indent - if indent < 0 { - indent = 0 - } - if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { - if !put_break(emitter) { - return false - } - } - for emitter.column < indent { - if !put(emitter, ' ') { - return false - } - } - emitter.whitespace = true - emitter.indention = true - return true -} - -func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, indicator) { - return false - } - emitter.whitespace = is_whitespace - emitter.indention = (emitter.indention && is_indention) - emitter.open_ended = false - return true -} - -func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - for i := 0; i < len(value); { - var must_write bool - switch value[i] { - case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': - must_write = true - default: - must_write = is_alpha(value, i) - } - if must_write { - if !write(emitter, value, &i) { - return false - } - } else { - w := width(value[i]) - for k := 0; k < w; k++ { - octet := value[i] - i++ - if !put(emitter, '%') { - return false - } - - c := octet >> 4 - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - - c = octet & 0x0f - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - } - } - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - - emitter.whitespace = false - emitter.indention = false - if emitter.root_context { - emitter.open_ended = true - } - - return true -} - -func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { - return false - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if value[i] == '\'' { - if !put(emitter, '\'') { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - spaces := false - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { - return false - } - - for i := 0; i < len(value); { - if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || - is_bom(value, i) || is_break(value, i) || - value[i] == '"' || value[i] == '\\' { - - octet := value[i] - - var w int - var v rune - switch { - case octet&0x80 == 0x00: - w, v = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, v = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, v = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, v = 4, rune(octet&0x07) - } - for k := 1; k < w; k++ { - octet = value[i+k] - v = (v << 6) + (rune(octet) & 0x3F) - } - i += w - - if !put(emitter, '\\') { - return false - } - - var ok bool - switch v { - case 0x00: - ok = put(emitter, '0') - case 0x07: - ok = put(emitter, 'a') - case 0x08: - ok = put(emitter, 'b') - case 0x09: - ok = put(emitter, 't') - case 0x0A: - ok = put(emitter, 'n') - case 0x0b: - ok = put(emitter, 'v') - case 0x0c: - ok = put(emitter, 'f') - case 0x0d: - ok = put(emitter, 'r') - case 0x1b: - ok = put(emitter, 'e') - case 0x22: - ok = put(emitter, '"') - case 0x5c: - ok = put(emitter, '\\') - case 0x85: - ok = put(emitter, 'N') - case 0xA0: - ok = put(emitter, '_') - case 0x2028: - ok = put(emitter, 'L') - case 0x2029: - ok = put(emitter, 'P') - default: - if v <= 0xFF { - ok = put(emitter, 'x') - w = 2 - } else if v <= 0xFFFF { - ok = put(emitter, 'u') - w = 4 - } else { - ok = put(emitter, 'U') - w = 8 - } - for k := (w - 1) * 4; ok && k >= 0; k -= 4 { - digit := byte((v >> uint(k)) & 0x0F) - if digit < 10 { - ok = put(emitter, digit+'0') - } else { - ok = put(emitter, digit+'A'-10) - } - } - } - if !ok { - return false - } - spaces = false - } else if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { - if !yaml_emitter_write_indent(emitter) { - return false - } - if is_space(value, i+1) { - if !put(emitter, '\\') { - return false - } - } - i += width(value[i]) - } else if !write(emitter, value, &i) { - return false - } - spaces = true - } else { - if !write(emitter, value, &i) { - return false - } - spaces = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { - if is_space(value, 0) || is_break(value, 0) { - indent_hint := []byte{'0' + byte(emitter.best_indent)} - if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { - return false - } - } - - emitter.open_ended = false - - var chomp_hint [1]byte - if len(value) == 0 { - chomp_hint[0] = '-' - } else { - i := len(value) - 1 - for value[i]&0xC0 == 0x80 { - i-- - } - if !is_break(value, i) { - chomp_hint[0] = '-' - } else if i == 0 { - chomp_hint[0] = '+' - emitter.open_ended = true - } else { - i-- - for value[i]&0xC0 == 0x80 { - i-- - } - if is_break(value, i) { - chomp_hint[0] = '+' - emitter.open_ended = true - } - } - } - if chomp_hint[0] != 0 { - if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { - return false - } - } - return true -} - -func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - breaks := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - breaks = false - } - } - - return true -} - -func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - - breaks := true - leading_spaces := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !breaks && !leading_spaces && value[i] == '\n' { - k := 0 - for is_break(value, k) { - k += width(value[k]) - } - if !is_blankz(value, k) { - if !put_break(emitter) { - return false - } - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - leading_spaces = is_blank(value, i) - } - if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - emitter.indention = false - breaks = false - } - } - return true -} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go deleted file mode 100644 index 0ee738e..0000000 --- a/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go +++ /dev/null @@ -1,390 +0,0 @@ -package yaml - -import ( - "encoding" - "fmt" - "io" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -// jsonNumber is the interface of the encoding/json.Number datatype. -// Repeating the interface here avoids a dependency on encoding/json, and also -// supports other libraries like jsoniter, which use a similar datatype with -// the same interface. Detecting this interface is useful when dealing with -// structures containing json.Number, which is a string under the hood. The -// encoder should prefer the use of Int64(), Float64() and string(), in that -// order, when encoding this type. -type jsonNumber interface { - Float64() (float64, error) - Int64() (int64, error) - String() string -} - -type encoder struct { - emitter yaml_emitter_t - event yaml_event_t - out []byte - flow bool - // doneInit holds whether the initial stream_start_event has been - // emitted. - doneInit bool -} - -func newEncoder() *encoder { - e := &encoder{} - yaml_emitter_initialize(&e.emitter) - yaml_emitter_set_output_string(&e.emitter, &e.out) - yaml_emitter_set_unicode(&e.emitter, true) - return e -} - -func newEncoderWithWriter(w io.Writer) *encoder { - e := &encoder{} - yaml_emitter_initialize(&e.emitter) - yaml_emitter_set_output_writer(&e.emitter, w) - yaml_emitter_set_unicode(&e.emitter, true) - return e -} - -func (e *encoder) init() { - if e.doneInit { - return - } - yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) - e.emit() - e.doneInit = true -} - -func (e *encoder) finish() { - e.emitter.open_ended = false - yaml_stream_end_event_initialize(&e.event) - e.emit() -} - -func (e *encoder) destroy() { - yaml_emitter_delete(&e.emitter) -} - -func (e *encoder) emit() { - // This will internally delete the e.event value. - e.must(yaml_emitter_emit(&e.emitter, &e.event)) -} - -func (e *encoder) must(ok bool) { - if !ok { - msg := e.emitter.problem - if msg == "" { - msg = "unknown problem generating YAML content" - } - failf("%s", msg) - } -} - -func (e *encoder) marshalDoc(tag string, in reflect.Value) { - e.init() - yaml_document_start_event_initialize(&e.event, nil, nil, true) - e.emit() - e.marshal(tag, in) - yaml_document_end_event_initialize(&e.event, true) - e.emit() -} - -func (e *encoder) marshal(tag string, in reflect.Value) { - if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { - e.nilv() - return - } - iface := in.Interface() - switch m := iface.(type) { - case jsonNumber: - integer, err := m.Int64() - if err == nil { - // In this case the json.Number is a valid int64 - in = reflect.ValueOf(integer) - break - } - float, err := m.Float64() - if err == nil { - // In this case the json.Number is a valid float64 - in = reflect.ValueOf(float) - break - } - // fallback case - no number could be obtained - in = reflect.ValueOf(m.String()) - case time.Time, *time.Time: - // Although time.Time implements TextMarshaler, - // we don't want to treat it as a string for YAML - // purposes because YAML has special support for - // timestamps. - case Marshaler: - v, err := m.MarshalYAML() - if err != nil { - fail(err) - } - if v == nil { - e.nilv() - return - } - in = reflect.ValueOf(v) - case encoding.TextMarshaler: - text, err := m.MarshalText() - if err != nil { - fail(err) - } - in = reflect.ValueOf(string(text)) - case nil: - e.nilv() - return - } - switch in.Kind() { - case reflect.Interface: - e.marshal(tag, in.Elem()) - case reflect.Map: - e.mapv(tag, in) - case reflect.Ptr: - if in.Type() == ptrTimeType { - e.timev(tag, in.Elem()) - } else { - e.marshal(tag, in.Elem()) - } - case reflect.Struct: - if in.Type() == timeType { - e.timev(tag, in) - } else { - e.structv(tag, in) - } - case reflect.Slice, reflect.Array: - if in.Type().Elem() == mapItemType { - e.itemsv(tag, in) - } else { - e.slicev(tag, in) - } - case reflect.String: - e.stringv(tag, in) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if in.Type() == durationType { - e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) - } else { - e.intv(tag, in) - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - e.uintv(tag, in) - case reflect.Float32, reflect.Float64: - e.floatv(tag, in) - case reflect.Bool: - e.boolv(tag, in) - default: - panic("cannot marshal type: " + in.Type().String()) - } -} - -func (e *encoder) mapv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - keys := keyList(in.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - e.marshal("", k) - e.marshal("", in.MapIndex(k)) - } - }) -} - -func (e *encoder) itemsv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) - for _, item := range slice { - e.marshal("", reflect.ValueOf(item.Key)) - e.marshal("", reflect.ValueOf(item.Value)) - } - }) -} - -func (e *encoder) structv(tag string, in reflect.Value) { - sinfo, err := getStructInfo(in.Type()) - if err != nil { - panic(err) - } - e.mappingv(tag, func() { - for _, info := range sinfo.FieldsList { - var value reflect.Value - if info.Inline == nil { - value = in.Field(info.Num) - } else { - value = in.FieldByIndex(info.Inline) - } - if info.OmitEmpty && isZero(value) { - continue - } - e.marshal("", reflect.ValueOf(info.Key)) - e.flow = info.Flow - e.marshal("", value) - } - if sinfo.InlineMap >= 0 { - m := in.Field(sinfo.InlineMap) - if m.Len() > 0 { - e.flow = false - keys := keyList(m.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - if _, found := sinfo.FieldsMap[k.String()]; found { - panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) - } - e.marshal("", k) - e.flow = false - e.marshal("", m.MapIndex(k)) - } - } - } - }) -} - -func (e *encoder) mappingv(tag string, f func()) { - implicit := tag == "" - style := yaml_BLOCK_MAPPING_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_MAPPING_STYLE - } - yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) - e.emit() - f() - yaml_mapping_end_event_initialize(&e.event) - e.emit() -} - -func (e *encoder) slicev(tag string, in reflect.Value) { - implicit := tag == "" - style := yaml_BLOCK_SEQUENCE_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_SEQUENCE_STYLE - } - e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) - e.emit() - n := in.Len() - for i := 0; i < n; i++ { - e.marshal("", in.Index(i)) - } - e.must(yaml_sequence_end_event_initialize(&e.event)) - e.emit() -} - -// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. -// -// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported -// in YAML 1.2 and by this package, but these should be marshalled quoted for -// the time being for compatibility with other parsers. -func isBase60Float(s string) (result bool) { - // Fast path. - if s == "" { - return false - } - c := s[0] - if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { - return false - } - // Do the full match. - return base60float.MatchString(s) -} - -// From http://yaml.org/type/float.html, except the regular expression there -// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. -var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) - -func (e *encoder) stringv(tag string, in reflect.Value) { - var style yaml_scalar_style_t - s := in.String() - canUsePlain := true - switch { - case !utf8.ValidString(s): - if tag == yaml_BINARY_TAG { - failf("explicitly tagged !!binary data must be base64-encoded") - } - if tag != "" { - failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) - } - // It can't be encoded directly as YAML so use a binary tag - // and encode it as base64. - tag = yaml_BINARY_TAG - s = encodeBase64(s) - case tag == "": - // Check to see if it would resolve to a specific - // tag when encoded unquoted. If it doesn't, - // there's no need to quote it. - rtag, _ := resolve("", s) - canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) - } - // Note: it's possible for user code to emit invalid YAML - // if they explicitly specify a tag and a string containing - // text that's incompatible with that tag. - switch { - case strings.Contains(s, "\n"): - style = yaml_LITERAL_SCALAR_STYLE - case canUsePlain: - style = yaml_PLAIN_SCALAR_STYLE - default: - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - e.emitScalar(s, "", tag, style) -} - -func (e *encoder) boolv(tag string, in reflect.Value) { - var s string - if in.Bool() { - s = "true" - } else { - s = "false" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) intv(tag string, in reflect.Value) { - s := strconv.FormatInt(in.Int(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) uintv(tag string, in reflect.Value) { - s := strconv.FormatUint(in.Uint(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) timev(tag string, in reflect.Value) { - t := in.Interface().(time.Time) - s := t.Format(time.RFC3339Nano) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) floatv(tag string, in reflect.Value) { - // Issue #352: When formatting, use the precision of the underlying value - precision := 64 - if in.Kind() == reflect.Float32 { - precision = 32 - } - - s := strconv.FormatFloat(in.Float(), 'g', -1, precision) - switch s { - case "+Inf": - s = ".inf" - case "-Inf": - s = "-.inf" - case "NaN": - s = ".nan" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) nilv() { - e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { - implicit := tag == "" - e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) - e.emit() -} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go deleted file mode 100644 index 81d05df..0000000 --- a/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go +++ /dev/null @@ -1,1095 +0,0 @@ -package yaml - -import ( - "bytes" -) - -// The parser implements the following grammar: -// -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// implicit_document ::= block_node DOCUMENT-END* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// block_node_or_indentless_sequence ::= -// ALIAS -// | properties (block_content | indentless_block_sequence)? -// | block_content -// | indentless_block_sequence -// block_node ::= ALIAS -// | properties block_content? -// | block_content -// flow_node ::= ALIAS -// | properties flow_content? -// | flow_content -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// block_content ::= block_collection | flow_collection | SCALAR -// flow_content ::= flow_collection | SCALAR -// block_collection ::= block_sequence | block_mapping -// flow_collection ::= flow_sequence | flow_mapping -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// block_mapping ::= BLOCK-MAPPING_START -// ((KEY block_node_or_indentless_sequence?)? -// (VALUE block_node_or_indentless_sequence?)?)* -// BLOCK-END -// flow_sequence ::= FLOW-SEQUENCE-START -// (flow_sequence_entry FLOW-ENTRY)* -// flow_sequence_entry? -// FLOW-SEQUENCE-END -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// flow_mapping ::= FLOW-MAPPING-START -// (flow_mapping_entry FLOW-ENTRY)* -// flow_mapping_entry? -// FLOW-MAPPING-END -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - -// Peek the next token in the token queue. -func peek_token(parser *yaml_parser_t) *yaml_token_t { - if parser.token_available || yaml_parser_fetch_more_tokens(parser) { - return &parser.tokens[parser.tokens_head] - } - return nil -} - -// Remove the next token from the queue (must be called after peek_token). -func skip_token(parser *yaml_parser_t) { - parser.token_available = false - parser.tokens_parsed++ - parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN - parser.tokens_head++ -} - -// Get the next event. -func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { - // Erase the event object. - *event = yaml_event_t{} - - // No events after the end of the stream or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { - return true - } - - // Generate the next event. - return yaml_parser_state_machine(parser, event) -} - -// Set parser error. -func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -// State dispatcher. -func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { - //trace("yaml_parser_state_machine", "state:", parser.state.String()) - - switch parser.state { - case yaml_PARSE_STREAM_START_STATE: - return yaml_parser_parse_stream_start(parser, event) - - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, true) - - case yaml_PARSE_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, false) - - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return yaml_parser_parse_document_content(parser, event) - - case yaml_PARSE_DOCUMENT_END_STATE: - return yaml_parser_parse_document_end(parser, event) - - case yaml_PARSE_BLOCK_NODE_STATE: - return yaml_parser_parse_node(parser, event, true, false) - - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return yaml_parser_parse_node(parser, event, true, true) - - case yaml_PARSE_FLOW_NODE_STATE: - return yaml_parser_parse_node(parser, event, false, false) - - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, true) - - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, false) - - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_indentless_sequence_entry(parser, event) - - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, true) - - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, false) - - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return yaml_parser_parse_block_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, true) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, false) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) - - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, true) - - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, true) - - default: - panic("invalid parser state") - } -} - -// Parse the production: -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// ************ -func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_STREAM_START_TOKEN { - return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) - } - parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - encoding: token.encoding, - } - skip_token(parser) - return true -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// * -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// ************************* -func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { - - token := peek_token(parser) - if token == nil { - return false - } - - // Parse extra document end indicators. - if !implicit { - for token.typ == yaml_DOCUMENT_END_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && - token.typ != yaml_TAG_DIRECTIVE_TOKEN && - token.typ != yaml_DOCUMENT_START_TOKEN && - token.typ != yaml_STREAM_END_TOKEN { - // Parse an implicit document. - if !yaml_parser_process_directives(parser, nil, nil) { - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_BLOCK_NODE_STATE - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - } else if token.typ != yaml_STREAM_END_TOKEN { - // Parse an explicit document. - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - start_mark := token.start_mark - if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { - return false - } - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_DOCUMENT_START_TOKEN { - yaml_parser_set_parser_error(parser, - "did not find expected ", token.start_mark) - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE - end_mark := token.end_mark - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: false, - } - skip_token(parser) - - } else { - // Parse the stream end. - parser.state = yaml_PARSE_END_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - } - - return true -} - -// Parse the productions: -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// *********** -// -func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || - token.typ == yaml_TAG_DIRECTIVE_TOKEN || - token.typ == yaml_DOCUMENT_START_TOKEN || - token.typ == yaml_DOCUMENT_END_TOKEN || - token.typ == yaml_STREAM_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - return yaml_parser_process_empty_scalar(parser, event, - token.start_mark) - } - return yaml_parser_parse_node(parser, event, true, false) -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// ************* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// -func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - start_mark := token.start_mark - end_mark := token.start_mark - - implicit := true - if token.typ == yaml_DOCUMENT_END_TOKEN { - end_mark = token.end_mark - skip_token(parser) - implicit = false - } - - parser.tag_directives = parser.tag_directives[:0] - - parser.state = yaml_PARSE_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - start_mark: start_mark, - end_mark: end_mark, - implicit: implicit, - } - return true -} - -// Parse the productions: -// block_node_or_indentless_sequence ::= -// ALIAS -// ***** -// | properties (block_content | indentless_block_sequence)? -// ********** * -// | block_content | indentless_block_sequence -// * -// block_node ::= ALIAS -// ***** -// | properties block_content? -// ********** * -// | block_content -// * -// flow_node ::= ALIAS -// ***** -// | properties flow_content? -// ********** * -// | flow_content -// * -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// ************************* -// block_content ::= block_collection | flow_collection | SCALAR -// ****** -// flow_content ::= flow_collection | SCALAR -// ****** -func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { - //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_ALIAS_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - *event = yaml_event_t{ - typ: yaml_ALIAS_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - anchor: token.value, - } - skip_token(parser) - return true - } - - start_mark := token.start_mark - end_mark := token.start_mark - - var tag_token bool - var tag_handle, tag_suffix, anchor []byte - var tag_mark yaml_mark_t - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - start_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } else if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - start_mark = token.start_mark - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - var tag []byte - if tag_token { - if len(tag_handle) == 0 { - tag = tag_suffix - tag_suffix = nil - } else { - for i := range parser.tag_directives { - if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { - tag = append([]byte(nil), parser.tag_directives[i].prefix...) - tag = append(tag, tag_suffix...) - break - } - } - if len(tag) == 0 { - yaml_parser_set_parser_error_context(parser, - "while parsing a node", start_mark, - "found undefined tag handle", tag_mark) - return false - } - } - } - - implicit := len(tag) == 0 - if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_SCALAR_TOKEN { - var plain_implicit, quoted_implicit bool - end_mark = token.end_mark - if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { - plain_implicit = true - } else if len(tag) == 0 { - quoted_implicit = true - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - value: token.value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(token.style), - } - skip_token(parser) - return true - } - if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { - // [Go] Some of the events below can be merged as they differ only on style. - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_FLOW_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), - } - return true - } - if len(anchor) > 0 || len(tag) > 0 { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - quoted_implicit: false, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true - } - - context := "while parsing a flow node" - if block { - context = "while parsing a block node" - } - yaml_parser_set_parser_error_context(parser, context, start_mark, - "did not find expected node content", token.start_mark) - return false -} - -// Parse the productions: -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// ******************** *********** * ********* -// -func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } else { - parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } - if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block collection", context_mark, - "did not find expected '-' indicator", token.start_mark) -} - -// Parse the productions: -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// *********** * -func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && - token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? - } - return true -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// ******************* -// ((KEY block_node_or_indentless_sequence?)? -// *** * -// (VALUE block_node_or_indentless_sequence?)?)* -// -// BLOCK-END -// ********* -// -func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_KEY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } else { - parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block mapping", context_mark, - "did not find expected key", token.start_mark) -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// -// ((KEY block_node_or_indentless_sequence?)? -// -// (VALUE block_node_or_indentless_sequence?)?)* -// ***** * -// BLOCK-END -// -// -func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence ::= FLOW-SEQUENCE-START -// ******************* -// (flow_sequence_entry FLOW-ENTRY)* -// * ********** -// flow_sequence_entry? -// * -// FLOW-SEQUENCE-END -// ***************** -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow sequence", context_mark, - "did not find expected ',' or ']'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - implicit: true, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - skip_token(parser) - return true - } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true -} - -// -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// *** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - mark := token.end_mark - skip_token(parser) - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// ***** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? - } - return true -} - -// Parse the productions: -// flow_mapping ::= FLOW-MAPPING-START -// ****************** -// (flow_mapping_entry FLOW-ENTRY)* -// * ********** -// flow_mapping_entry? -// ****************** -// FLOW-MAPPING-END -// **************** -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * *** * -// -func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow mapping", context_mark, - "did not find expected ',' or '}'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } else { - parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true -} - -// Parse the productions: -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * ***** * -// -func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { - token := peek_token(parser) - if token == nil { - return false - } - if empty { - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Generate an empty scalar event. -func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: mark, - end_mark: mark, - value: nil, // Empty - implicit: true, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true -} - -var default_tag_directives = []yaml_tag_directive_t{ - {[]byte("!"), []byte("!")}, - {[]byte("!!"), []byte("tag:yaml.org,2002:")}, -} - -// Parse directives. -func yaml_parser_process_directives(parser *yaml_parser_t, - version_directive_ref **yaml_version_directive_t, - tag_directives_ref *[]yaml_tag_directive_t) bool { - - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - - token := peek_token(parser) - if token == nil { - return false - } - - for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { - if version_directive != nil { - yaml_parser_set_parser_error(parser, - "found duplicate %YAML directive", token.start_mark) - return false - } - if token.major != 1 || token.minor != 1 { - yaml_parser_set_parser_error(parser, - "found incompatible YAML document", token.start_mark) - return false - } - version_directive = &yaml_version_directive_t{ - major: token.major, - minor: token.minor, - } - } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { - value := yaml_tag_directive_t{ - handle: token.value, - prefix: token.prefix, - } - if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { - return false - } - tag_directives = append(tag_directives, value) - } - - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - - for i := range default_tag_directives { - if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { - return false - } - } - - if version_directive_ref != nil { - *version_directive_ref = version_directive - } - if tag_directives_ref != nil { - *tag_directives_ref = tag_directives - } - return true -} - -// Append a tag directive to the directives stack. -func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { - for i := range parser.tag_directives { - if bytes.Equal(value.handle, parser.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) - } - } - - // [Go] I suspect the copy is unnecessary. This was likely done - // because there was no way to track ownership of the data. - value_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(value_copy.handle, value.handle) - copy(value_copy.prefix, value.prefix) - parser.tag_directives = append(parser.tag_directives, value_copy) - return true -} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go deleted file mode 100644 index 7c1f5fa..0000000 --- a/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go +++ /dev/null @@ -1,412 +0,0 @@ -package yaml - -import ( - "io" -) - -// Set the reader error and return 0. -func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { - parser.error = yaml_READER_ERROR - parser.problem = problem - parser.problem_offset = offset - parser.problem_value = value - return false -} - -// Byte order marks. -const ( - bom_UTF8 = "\xef\xbb\xbf" - bom_UTF16LE = "\xff\xfe" - bom_UTF16BE = "\xfe\xff" -) - -// Determine the input stream encoding by checking the BOM symbol. If no BOM is -// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. -func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { - // Ensure that we had enough bytes in the raw buffer. - for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { - if !yaml_parser_update_raw_buffer(parser) { - return false - } - } - - // Determine the encoding. - buf := parser.raw_buffer - pos := parser.raw_buffer_pos - avail := len(buf) - pos - if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { - parser.encoding = yaml_UTF16LE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { - parser.encoding = yaml_UTF16BE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { - parser.encoding = yaml_UTF8_ENCODING - parser.raw_buffer_pos += 3 - parser.offset += 3 - } else { - parser.encoding = yaml_UTF8_ENCODING - } - return true -} - -// Update the raw buffer. -func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { - size_read := 0 - - // Return if the raw buffer is full. - if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { - return true - } - - // Return on EOF. - if parser.eof { - return true - } - - // Move the remaining bytes in the raw buffer to the beginning. - if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { - copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) - } - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] - parser.raw_buffer_pos = 0 - - // Call the read handler to fill the buffer. - size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] - if err == io.EOF { - parser.eof = true - } else if err != nil { - return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) - } - return true -} - -// Ensure that the buffer contains at least `length` characters. -// Return true on success, false on failure. -// -// The length is supposed to be significantly less that the buffer size. -func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { - if parser.read_handler == nil { - panic("read handler must be set") - } - - // [Go] This function was changed to guarantee the requested length size at EOF. - // The fact we need to do this is pretty awful, but the description above implies - // for that to be the case, and there are tests - - // If the EOF flag is set and the raw buffer is empty, do nothing. - if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { - // [Go] ACTUALLY! Read the documentation of this function above. - // This is just broken. To return true, we need to have the - // given length in the buffer. Not doing that means every single - // check that calls this function to make sure the buffer has a - // given length is Go) panicking; or C) accessing invalid memory. - //return true - } - - // Return if the buffer contains enough characters. - if parser.unread >= length { - return true - } - - // Determine the input encoding if it is not known yet. - if parser.encoding == yaml_ANY_ENCODING { - if !yaml_parser_determine_encoding(parser) { - return false - } - } - - // Move the unread characters to the beginning of the buffer. - buffer_len := len(parser.buffer) - if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { - copy(parser.buffer, parser.buffer[parser.buffer_pos:]) - buffer_len -= parser.buffer_pos - parser.buffer_pos = 0 - } else if parser.buffer_pos == buffer_len { - buffer_len = 0 - parser.buffer_pos = 0 - } - - // Open the whole buffer for writing, and cut it before returning. - parser.buffer = parser.buffer[:cap(parser.buffer)] - - // Fill the buffer until it has enough characters. - first := true - for parser.unread < length { - - // Fill the raw buffer if necessary. - if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { - if !yaml_parser_update_raw_buffer(parser) { - parser.buffer = parser.buffer[:buffer_len] - return false - } - } - first = false - - // Decode the raw buffer. - inner: - for parser.raw_buffer_pos != len(parser.raw_buffer) { - var value rune - var width int - - raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos - - // Decode the next character. - switch parser.encoding { - case yaml_UTF8_ENCODING: - // Decode a UTF-8 character. Check RFC 3629 - // (http://www.ietf.org/rfc/rfc3629.txt) for more details. - // - // The following table (taken from the RFC) is used for - // decoding. - // - // Char. number range | UTF-8 octet sequence - // (hexadecimal) | (binary) - // --------------------+------------------------------------ - // 0000 0000-0000 007F | 0xxxxxxx - // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx - // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx - // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - // - // Additionally, the characters in the range 0xD800-0xDFFF - // are prohibited as they are reserved for use with UTF-16 - // surrogate pairs. - - // Determine the length of the UTF-8 sequence. - octet := parser.raw_buffer[parser.raw_buffer_pos] - switch { - case octet&0x80 == 0x00: - width = 1 - case octet&0xE0 == 0xC0: - width = 2 - case octet&0xF0 == 0xE0: - width = 3 - case octet&0xF8 == 0xF0: - width = 4 - default: - // The leading octet is invalid. - return yaml_parser_set_reader_error(parser, - "invalid leading UTF-8 octet", - parser.offset, int(octet)) - } - - // Check if the raw buffer contains an incomplete character. - if width > raw_unread { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-8 octet sequence", - parser.offset, -1) - } - break inner - } - - // Decode the leading octet. - switch { - case octet&0x80 == 0x00: - value = rune(octet & 0x7F) - case octet&0xE0 == 0xC0: - value = rune(octet & 0x1F) - case octet&0xF0 == 0xE0: - value = rune(octet & 0x0F) - case octet&0xF8 == 0xF0: - value = rune(octet & 0x07) - default: - value = 0 - } - - // Check and decode the trailing octets. - for k := 1; k < width; k++ { - octet = parser.raw_buffer[parser.raw_buffer_pos+k] - - // Check if the octet is valid. - if (octet & 0xC0) != 0x80 { - return yaml_parser_set_reader_error(parser, - "invalid trailing UTF-8 octet", - parser.offset+k, int(octet)) - } - - // Decode the octet. - value = (value << 6) + rune(octet&0x3F) - } - - // Check the length of the sequence against the value. - switch { - case width == 1: - case width == 2 && value >= 0x80: - case width == 3 && value >= 0x800: - case width == 4 && value >= 0x10000: - default: - return yaml_parser_set_reader_error(parser, - "invalid length of a UTF-8 sequence", - parser.offset, -1) - } - - // Check the range of the value. - if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { - return yaml_parser_set_reader_error(parser, - "invalid Unicode character", - parser.offset, int(value)) - } - - case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: - var low, high int - if parser.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - low, high = 1, 0 - } - - // The UTF-16 encoding is not as simple as one might - // naively think. Check RFC 2781 - // (http://www.ietf.org/rfc/rfc2781.txt). - // - // Normally, two subsequent bytes describe a Unicode - // character. However a special technique (called a - // surrogate pair) is used for specifying character - // values larger than 0xFFFF. - // - // A surrogate pair consists of two pseudo-characters: - // high surrogate area (0xD800-0xDBFF) - // low surrogate area (0xDC00-0xDFFF) - // - // The following formulas are used for decoding - // and encoding characters using surrogate pairs: - // - // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) - // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) - // W1 = 110110yyyyyyyyyy - // W2 = 110111xxxxxxxxxx - // - // where U is the character value, W1 is the high surrogate - // area, W2 is the low surrogate area. - - // Check for incomplete UTF-16 character. - if raw_unread < 2 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 character", - parser.offset, -1) - } - break inner - } - - // Get the character. - value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) - - // Check for unexpected low surrogate area. - if value&0xFC00 == 0xDC00 { - return yaml_parser_set_reader_error(parser, - "unexpected low surrogate area", - parser.offset, int(value)) - } - - // Check for a high surrogate area. - if value&0xFC00 == 0xD800 { - width = 4 - - // Check for incomplete surrogate pair. - if raw_unread < 4 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 surrogate pair", - parser.offset, -1) - } - break inner - } - - // Get the next character. - value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) - - // Check for a low surrogate area. - if value2&0xFC00 != 0xDC00 { - return yaml_parser_set_reader_error(parser, - "expected low surrogate area", - parser.offset+2, int(value2)) - } - - // Generate the value of the surrogate pair. - value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) - } else { - width = 2 - } - - default: - panic("impossible") - } - - // Check if the character is in the allowed range: - // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) - // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) - // | [#x10000-#x10FFFF] (32 bit) - switch { - case value == 0x09: - case value == 0x0A: - case value == 0x0D: - case value >= 0x20 && value <= 0x7E: - case value == 0x85: - case value >= 0xA0 && value <= 0xD7FF: - case value >= 0xE000 && value <= 0xFFFD: - case value >= 0x10000 && value <= 0x10FFFF: - default: - return yaml_parser_set_reader_error(parser, - "control characters are not allowed", - parser.offset, int(value)) - } - - // Move the raw pointers. - parser.raw_buffer_pos += width - parser.offset += width - - // Finally put the character into the buffer. - if value <= 0x7F { - // 0000 0000-0000 007F . 0xxxxxxx - parser.buffer[buffer_len+0] = byte(value) - buffer_len += 1 - } else if value <= 0x7FF { - // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) - parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) - buffer_len += 2 - } else if value <= 0xFFFF { - // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) - buffer_len += 3 - } else { - // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) - buffer_len += 4 - } - - parser.unread++ - } - - // On EOF, put NUL into the buffer and return. - if parser.eof { - parser.buffer[buffer_len] = 0 - buffer_len++ - parser.unread++ - break - } - } - // [Go] Read the documentation of this function above. To return true, - // we need to have the given length in the buffer. Not doing that means - // every single check that calls this function to make sure the buffer - // has a given length is Go) panicking; or C) accessing invalid memory. - // This happens here due to the EOF above breaking early. - for buffer_len < length { - parser.buffer[buffer_len] = 0 - buffer_len++ - } - parser.buffer = parser.buffer[:buffer_len] - return true -} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go deleted file mode 100644 index 4120e0c..0000000 --- a/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go +++ /dev/null @@ -1,258 +0,0 @@ -package yaml - -import ( - "encoding/base64" - "math" - "regexp" - "strconv" - "strings" - "time" -) - -type resolveMapItem struct { - value interface{} - tag string -} - -var resolveTable = make([]byte, 256) -var resolveMap = make(map[string]resolveMapItem) - -func init() { - t := resolveTable - t[int('+')] = 'S' // Sign - t[int('-')] = 'S' - for _, c := range "0123456789" { - t[int(c)] = 'D' // Digit - } - for _, c := range "yYnNtTfFoO~" { - t[int(c)] = 'M' // In map - } - t[int('.')] = '.' // Float (potentially in map) - - var resolveMapList = []struct { - v interface{} - tag string - l []string - }{ - {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, - {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, - {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, - {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, - {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, - {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, - {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, - {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, - {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, - {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, - {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, - {"<<", yaml_MERGE_TAG, []string{"<<"}}, - } - - m := resolveMap - for _, item := range resolveMapList { - for _, s := range item.l { - m[s] = resolveMapItem{item.v, item.tag} - } - } -} - -const longTagPrefix = "tag:yaml.org,2002:" - -func shortTag(tag string) string { - // TODO This can easily be made faster and produce less garbage. - if strings.HasPrefix(tag, longTagPrefix) { - return "!!" + tag[len(longTagPrefix):] - } - return tag -} - -func longTag(tag string) string { - if strings.HasPrefix(tag, "!!") { - return longTagPrefix + tag[2:] - } - return tag -} - -func resolvableTag(tag string) bool { - switch tag { - case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: - return true - } - return false -} - -var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) - -func resolve(tag string, in string) (rtag string, out interface{}) { - if !resolvableTag(tag) { - return tag, in - } - - defer func() { - switch tag { - case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: - return - case yaml_FLOAT_TAG: - if rtag == yaml_INT_TAG { - switch v := out.(type) { - case int64: - rtag = yaml_FLOAT_TAG - out = float64(v) - return - case int: - rtag = yaml_FLOAT_TAG - out = float64(v) - return - } - } - } - failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) - }() - - // Any data is accepted as a !!str or !!binary. - // Otherwise, the prefix is enough of a hint about what it might be. - hint := byte('N') - if in != "" { - hint = resolveTable[in[0]] - } - if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { - // Handle things we can lookup in a map. - if item, ok := resolveMap[in]; ok { - return item.tag, item.value - } - - // Base 60 floats are a bad idea, were dropped in YAML 1.2, and - // are purposefully unsupported here. They're still quoted on - // the way out for compatibility with other parser, though. - - switch hint { - case 'M': - // We've already checked the map above. - - case '.': - // Not in the map, so maybe a normal float. - floatv, err := strconv.ParseFloat(in, 64) - if err == nil { - return yaml_FLOAT_TAG, floatv - } - - case 'D', 'S': - // Int, float, or timestamp. - // Only try values as a timestamp if the value is unquoted or there's an explicit - // !!timestamp tag. - if tag == "" || tag == yaml_TIMESTAMP_TAG { - t, ok := parseTimestamp(in) - if ok { - return yaml_TIMESTAMP_TAG, t - } - } - - plain := strings.Replace(in, "_", "", -1) - intv, err := strconv.ParseInt(plain, 0, 64) - if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) - } else { - return yaml_INT_TAG, intv - } - } - uintv, err := strconv.ParseUint(plain, 0, 64) - if err == nil { - return yaml_INT_TAG, uintv - } - if yamlStyleFloat.MatchString(plain) { - floatv, err := strconv.ParseFloat(plain, 64) - if err == nil { - return yaml_FLOAT_TAG, floatv - } - } - if strings.HasPrefix(plain, "0b") { - intv, err := strconv.ParseInt(plain[2:], 2, 64) - if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) - } else { - return yaml_INT_TAG, intv - } - } - uintv, err := strconv.ParseUint(plain[2:], 2, 64) - if err == nil { - return yaml_INT_TAG, uintv - } - } else if strings.HasPrefix(plain, "-0b") { - intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) - if err == nil { - if true || intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) - } else { - return yaml_INT_TAG, intv - } - } - } - default: - panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") - } - } - return yaml_STR_TAG, in -} - -// encodeBase64 encodes s as base64 that is broken up into multiple lines -// as appropriate for the resulting length. -func encodeBase64(s string) string { - const lineLen = 70 - encLen := base64.StdEncoding.EncodedLen(len(s)) - lines := encLen/lineLen + 1 - buf := make([]byte, encLen*2+lines) - in := buf[0:encLen] - out := buf[encLen:] - base64.StdEncoding.Encode(in, []byte(s)) - k := 0 - for i := 0; i < len(in); i += lineLen { - j := i + lineLen - if j > len(in) { - j = len(in) - } - k += copy(out[k:], in[i:j]) - if lines > 1 { - out[k] = '\n' - k++ - } - } - return string(out[:k]) -} - -// This is a subset of the formats allowed by the regular expression -// defined at http://yaml.org/type/timestamp.html. -var allowedTimestampFormats = []string{ - "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. - "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". - "2006-1-2 15:4:5.999999999", // space separated with no time zone - "2006-1-2", // date only - // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" - // from the set of examples. -} - -// parseTimestamp parses s as a timestamp string and -// returns the timestamp and reports whether it succeeded. -// Timestamp formats are defined at http://yaml.org/type/timestamp.html -func parseTimestamp(s string) (time.Time, bool) { - // TODO write code to check all the formats supported by - // http://yaml.org/type/timestamp.html instead of using time.Parse. - - // Quick check: all date formats start with YYYY-. - i := 0 - for ; i < len(s); i++ { - if c := s[i]; c < '0' || c > '9' { - break - } - } - if i != 4 || i == len(s) || s[i] != '-' { - return time.Time{}, false - } - for _, format := range allowedTimestampFormats { - if t, err := time.Parse(format, s); err == nil { - return t, true - } - } - return time.Time{}, false -} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go deleted file mode 100644 index 0b9bb60..0000000 --- a/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go +++ /dev/null @@ -1,2711 +0,0 @@ -package yaml - -import ( - "bytes" - "fmt" -) - -// Introduction -// ************ -// -// The following notes assume that you are familiar with the YAML specification -// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in -// some cases we are less restrictive that it requires. -// -// The process of transforming a YAML stream into a sequence of events is -// divided on two steps: Scanning and Parsing. -// -// The Scanner transforms the input stream into a sequence of tokens, while the -// parser transform the sequence of tokens produced by the Scanner into a -// sequence of parsing events. -// -// The Scanner is rather clever and complicated. The Parser, on the contrary, -// is a straightforward implementation of a recursive-descendant parser (or, -// LL(1) parser, as it is usually called). -// -// Actually there are two issues of Scanning that might be called "clever", the -// rest is quite straightforward. The issues are "block collection start" and -// "simple keys". Both issues are explained below in details. -// -// Here the Scanning step is explained and implemented. We start with the list -// of all the tokens produced by the Scanner together with short descriptions. -// -// Now, tokens: -// -// STREAM-START(encoding) # The stream start. -// STREAM-END # The stream end. -// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. -// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. -// DOCUMENT-START # '---' -// DOCUMENT-END # '...' -// BLOCK-SEQUENCE-START # Indentation increase denoting a block -// BLOCK-MAPPING-START # sequence or a block mapping. -// BLOCK-END # Indentation decrease. -// FLOW-SEQUENCE-START # '[' -// FLOW-SEQUENCE-END # ']' -// BLOCK-SEQUENCE-START # '{' -// BLOCK-SEQUENCE-END # '}' -// BLOCK-ENTRY # '-' -// FLOW-ENTRY # ',' -// KEY # '?' or nothing (simple keys). -// VALUE # ':' -// ALIAS(anchor) # '*anchor' -// ANCHOR(anchor) # '&anchor' -// TAG(handle,suffix) # '!handle!suffix' -// SCALAR(value,style) # A scalar. -// -// The following two tokens are "virtual" tokens denoting the beginning and the -// end of the stream: -// -// STREAM-START(encoding) -// STREAM-END -// -// We pass the information about the input stream encoding with the -// STREAM-START token. -// -// The next two tokens are responsible for tags: -// -// VERSION-DIRECTIVE(major,minor) -// TAG-DIRECTIVE(handle,prefix) -// -// Example: -// -// %YAML 1.1 -// %TAG ! !foo -// %TAG !yaml! tag:yaml.org,2002: -// --- -// -// The correspoding sequence of tokens: -// -// STREAM-START(utf-8) -// VERSION-DIRECTIVE(1,1) -// TAG-DIRECTIVE("!","!foo") -// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") -// DOCUMENT-START -// STREAM-END -// -// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole -// line. -// -// The document start and end indicators are represented by: -// -// DOCUMENT-START -// DOCUMENT-END -// -// Note that if a YAML stream contains an implicit document (without '---' -// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be -// produced. -// -// In the following examples, we present whole documents together with the -// produced tokens. -// -// 1. An implicit document: -// -// 'a scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// STREAM-END -// -// 2. An explicit document: -// -// --- -// 'a scalar' -// ... -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// SCALAR("a scalar",single-quoted) -// DOCUMENT-END -// STREAM-END -// -// 3. Several documents in a stream: -// -// 'a scalar' -// --- -// 'another scalar' -// --- -// 'yet another scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// DOCUMENT-START -// SCALAR("another scalar",single-quoted) -// DOCUMENT-START -// SCALAR("yet another scalar",single-quoted) -// STREAM-END -// -// We have already introduced the SCALAR token above. The following tokens are -// used to describe aliases, anchors, tag, and scalars: -// -// ALIAS(anchor) -// ANCHOR(anchor) -// TAG(handle,suffix) -// SCALAR(value,style) -// -// The following series of examples illustrate the usage of these tokens: -// -// 1. A recursive sequence: -// -// &A [ *A ] -// -// Tokens: -// -// STREAM-START(utf-8) -// ANCHOR("A") -// FLOW-SEQUENCE-START -// ALIAS("A") -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A tagged scalar: -// -// !!float "3.14" # A good approximation. -// -// Tokens: -// -// STREAM-START(utf-8) -// TAG("!!","float") -// SCALAR("3.14",double-quoted) -// STREAM-END -// -// 3. Various scalar styles: -// -// --- # Implicit empty plain scalars do not produce tokens. -// --- a plain scalar -// --- 'a single-quoted scalar' -// --- "a double-quoted scalar" -// --- |- -// a literal scalar -// --- >- -// a folded -// scalar -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// DOCUMENT-START -// SCALAR("a plain scalar",plain) -// DOCUMENT-START -// SCALAR("a single-quoted scalar",single-quoted) -// DOCUMENT-START -// SCALAR("a double-quoted scalar",double-quoted) -// DOCUMENT-START -// SCALAR("a literal scalar",literal) -// DOCUMENT-START -// SCALAR("a folded scalar",folded) -// STREAM-END -// -// Now it's time to review collection-related tokens. We will start with -// flow collections: -// -// FLOW-SEQUENCE-START -// FLOW-SEQUENCE-END -// FLOW-MAPPING-START -// FLOW-MAPPING-END -// FLOW-ENTRY -// KEY -// VALUE -// -// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and -// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' -// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the -// indicators '?' and ':', which are used for denoting mapping keys and values, -// are represented by the KEY and VALUE tokens. -// -// The following examples show flow collections: -// -// 1. A flow sequence: -// -// [item 1, item 2, item 3] -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-SEQUENCE-START -// SCALAR("item 1",plain) -// FLOW-ENTRY -// SCALAR("item 2",plain) -// FLOW-ENTRY -// SCALAR("item 3",plain) -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A flow mapping: -// -// { -// a simple key: a value, # Note that the KEY token is produced. -// ? a complex key: another value, -// } -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// FLOW-ENTRY -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// FLOW-ENTRY -// FLOW-MAPPING-END -// STREAM-END -// -// A simple key is a key which is not denoted by the '?' indicator. Note that -// the Scanner still produce the KEY token whenever it encounters a simple key. -// -// For scanning block collections, the following tokens are used (note that we -// repeat KEY and VALUE here): -// -// BLOCK-SEQUENCE-START -// BLOCK-MAPPING-START -// BLOCK-END -// BLOCK-ENTRY -// KEY -// VALUE -// -// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation -// increase that precedes a block collection (cf. the INDENT token in Python). -// The token BLOCK-END denote indentation decrease that ends a block collection -// (cf. the DEDENT token in Python). However YAML has some syntax pecularities -// that makes detections of these tokens more complex. -// -// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators -// '-', '?', and ':' correspondingly. -// -// The following examples show how the tokens BLOCK-SEQUENCE-START, -// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: -// -// 1. Block sequences: -// -// - item 1 -// - item 2 -// - -// - item 3.1 -// - item 3.2 -// - -// key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 3.1",plain) -// BLOCK-ENTRY -// SCALAR("item 3.2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Block mappings: -// -// a simple key: a value # The KEY token is produced here. -// ? a complex key -// : another value -// a mapping: -// key 1: value 1 -// key 2: value 2 -// a sequence: -// - item 1 -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// KEY -// SCALAR("a mapping",plain) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML does not always require to start a new block collection from a new -// line. If the current line contains only '-', '?', and ':' indicators, a new -// block collection may start at the current line. The following examples -// illustrate this case: -// -// 1. Collections in a sequence: -// -// - - item 1 -// - item 2 -// - key 1: value 1 -// key 2: value 2 -// - ? complex key -// : complex value -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("complex key") -// VALUE -// SCALAR("complex value") -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Collections in a mapping: -// -// ? a sequence -// : - item 1 -// - item 2 -// ? a mapping -// : key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// KEY -// SCALAR("a mapping",plain) -// VALUE -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML also permits non-indented sequences if they are included into a block -// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: -// -// key: -// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key",plain) -// VALUE -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// - -// Ensure that the buffer contains the required number of characters. -// Return true on success, false on failure (reader error or memory error). -func cache(parser *yaml_parser_t, length int) bool { - // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) - return parser.unread >= length || yaml_parser_update_buffer(parser, length) -} - -// Advance the buffer pointer. -func skip(parser *yaml_parser_t) { - parser.mark.index++ - parser.mark.column++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) -} - -func skip_line(parser *yaml_parser_t) { - if is_crlf(parser.buffer, parser.buffer_pos) { - parser.mark.index += 2 - parser.mark.column = 0 - parser.mark.line++ - parser.unread -= 2 - parser.buffer_pos += 2 - } else if is_break(parser.buffer, parser.buffer_pos) { - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) - } -} - -// Copy a character to a string buffer and advance pointers. -func read(parser *yaml_parser_t, s []byte) []byte { - w := width(parser.buffer[parser.buffer_pos]) - if w == 0 { - panic("invalid character sequence") - } - if len(s) == 0 { - s = make([]byte, 0, 32) - } - if w == 1 && len(s)+w <= cap(s) { - s = s[:len(s)+1] - s[len(s)-1] = parser.buffer[parser.buffer_pos] - parser.buffer_pos++ - } else { - s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) - parser.buffer_pos += w - } - parser.mark.index++ - parser.mark.column++ - parser.unread-- - return s -} - -// Copy a line break character to a string buffer and advance pointers. -func read_line(parser *yaml_parser_t, s []byte) []byte { - buf := parser.buffer - pos := parser.buffer_pos - switch { - case buf[pos] == '\r' && buf[pos+1] == '\n': - // CR LF . LF - s = append(s, '\n') - parser.buffer_pos += 2 - parser.mark.index++ - parser.unread-- - case buf[pos] == '\r' || buf[pos] == '\n': - // CR|LF . LF - s = append(s, '\n') - parser.buffer_pos += 1 - case buf[pos] == '\xC2' && buf[pos+1] == '\x85': - // NEL . LF - s = append(s, '\n') - parser.buffer_pos += 2 - case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): - // LS|PS . LS|PS - s = append(s, buf[parser.buffer_pos:pos+3]...) - parser.buffer_pos += 3 - default: - return s - } - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - return s -} - -// Get the next token. -func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { - // Erase the token object. - *token = yaml_token_t{} // [Go] Is this necessary? - - // No tokens after STREAM-END or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR { - return true - } - - // Ensure that the tokens queue contains enough tokens. - if !parser.token_available { - if !yaml_parser_fetch_more_tokens(parser) { - return false - } - } - - // Fetch the next token from the queue. - *token = parser.tokens[parser.tokens_head] - parser.tokens_head++ - parser.tokens_parsed++ - parser.token_available = false - - if token.typ == yaml_STREAM_END_TOKEN { - parser.stream_end_produced = true - } - return true -} - -// Set the scanner error and return false. -func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { - parser.error = yaml_SCANNER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = parser.mark - return false -} - -func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { - context := "while parsing a tag" - if directive { - context = "while parsing a %TAG directive" - } - return yaml_parser_set_scanner_error(parser, context, context_mark, problem) -} - -func trace(args ...interface{}) func() { - pargs := append([]interface{}{"+++"}, args...) - fmt.Println(pargs...) - pargs = append([]interface{}{"---"}, args...) - return func() { fmt.Println(pargs...) } -} - -// Ensure that the tokens queue contains at least one token which can be -// returned to the Parser. -func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { - // While we need more tokens to fetch, do it. - for { - if parser.tokens_head != len(parser.tokens) { - // If queue is non-empty, check if any potential simple key may - // occupy the head position. - head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] - if !ok { - break - } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { - return false - } else if !valid { - break - } - } - // Fetch the next token. - if !yaml_parser_fetch_next_token(parser) { - return false - } - } - - parser.token_available = true - return true -} - -// The dispatcher for token fetchers. -func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { - // Ensure that the buffer is initialized. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we just started scanning. Fetch STREAM-START then. - if !parser.stream_start_produced { - return yaml_parser_fetch_stream_start(parser) - } - - // Eat whitespaces and comments until we reach the next token. - if !yaml_parser_scan_to_next_token(parser) { - return false - } - - // Check the indentation level against the current column. - if !yaml_parser_unroll_indent(parser, parser.mark.column) { - return false - } - - // Ensure that the buffer contains at least 4 characters. 4 is the length - // of the longest indicators ('--- ' and '... '). - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - // Is it the end of the stream? - if is_z(parser.buffer, parser.buffer_pos) { - return yaml_parser_fetch_stream_end(parser) - } - - // Is it a directive? - if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { - return yaml_parser_fetch_directive(parser) - } - - buf := parser.buffer - pos := parser.buffer_pos - - // Is it the document start indicator? - if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) - } - - // Is it the document end indicator? - if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) - } - - // Is it the flow sequence start indicator? - if buf[pos] == '[' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) - } - - // Is it the flow mapping start indicator? - if parser.buffer[parser.buffer_pos] == '{' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) - } - - // Is it the flow sequence end indicator? - if parser.buffer[parser.buffer_pos] == ']' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_SEQUENCE_END_TOKEN) - } - - // Is it the flow mapping end indicator? - if parser.buffer[parser.buffer_pos] == '}' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_MAPPING_END_TOKEN) - } - - // Is it the flow entry indicator? - if parser.buffer[parser.buffer_pos] == ',' { - return yaml_parser_fetch_flow_entry(parser) - } - - // Is it the block entry indicator? - if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { - return yaml_parser_fetch_block_entry(parser) - } - - // Is it the key indicator? - if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_key(parser) - } - - // Is it the value indicator? - if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_value(parser) - } - - // Is it an alias? - if parser.buffer[parser.buffer_pos] == '*' { - return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) - } - - // Is it an anchor? - if parser.buffer[parser.buffer_pos] == '&' { - return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) - } - - // Is it a tag? - if parser.buffer[parser.buffer_pos] == '!' { - return yaml_parser_fetch_tag(parser) - } - - // Is it a literal scalar? - if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, true) - } - - // Is it a folded scalar? - if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, false) - } - - // Is it a single-quoted scalar? - if parser.buffer[parser.buffer_pos] == '\'' { - return yaml_parser_fetch_flow_scalar(parser, true) - } - - // Is it a double-quoted scalar? - if parser.buffer[parser.buffer_pos] == '"' { - return yaml_parser_fetch_flow_scalar(parser, false) - } - - // Is it a plain scalar? - // - // A plain scalar may start with any non-blank characters except - // - // '-', '?', ':', ',', '[', ']', '{', '}', - // '#', '&', '*', '!', '|', '>', '\'', '\"', - // '%', '@', '`'. - // - // In the block context (and, for the '-' indicator, in the flow context - // too), it may also start with the characters - // - // '-', '?', ':' - // - // if it is followed by a non-space character. - // - // The last rule is more restrictive than the specification requires. - // [Go] Make this logic more reasonable. - //switch parser.buffer[parser.buffer_pos] { - //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': - //} - if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || - parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || - parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || - (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level == 0 && - (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && - !is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_plain_scalar(parser) - } - - // If we don't determine the token type so far, it is an error. - return yaml_parser_set_scanner_error(parser, - "while scanning for the next token", parser.mark, - "found character that cannot start any token") -} - -func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { - if !simple_key.possible { - return false, true - } - - // The 1.2 specification says: - // - // "If the ? indicator is omitted, parsing needs to see past the - // implicit key to recognize it as such. To limit the amount of - // lookahead required, the “:” indicator must appear at most 1024 - // Unicode characters beyond the start of the key. In addition, the key - // is restricted to a single line." - // - if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { - // Check if the potential simple key to be removed is required. - if simple_key.required { - return false, yaml_parser_set_scanner_error(parser, - "while scanning a simple key", simple_key.mark, - "could not find expected ':'") - } - simple_key.possible = false - return false, true - } - return true, true -} - -// Check if a simple key may start at the current position and add it if -// needed. -func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { - // A simple key is required at the current position if the scanner is in - // the block context and the current column coincides with the indentation - // level. - - required := parser.flow_level == 0 && parser.indent == parser.mark.column - - // - // If the current position may start a simple key, save it. - // - if parser.simple_key_allowed { - simple_key := yaml_simple_key_t{ - possible: true, - required: required, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - mark: parser.mark, - } - - if !yaml_parser_remove_simple_key(parser) { - return false - } - parser.simple_keys[len(parser.simple_keys)-1] = simple_key - parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 - } - return true -} - -// Remove a potential simple key at the current flow level. -func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { - i := len(parser.simple_keys) - 1 - if parser.simple_keys[i].possible { - // If the key is required, it is an error. - if parser.simple_keys[i].required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", parser.simple_keys[i].mark, - "could not find expected ':'") - } - // Remove the key from the stack. - parser.simple_keys[i].possible = false - delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) - } - return true -} - -// max_flow_level limits the flow_level -const max_flow_level = 10000 - -// Increase the flow level and resize the simple key list if needed. -func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { - // Reset the simple key on the next level. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ - possible: false, - required: false, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - mark: parser.mark, - }) - - // Increase the flow level. - parser.flow_level++ - if parser.flow_level > max_flow_level { - return yaml_parser_set_scanner_error(parser, - "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, - fmt.Sprintf("exceeded max depth of %d", max_flow_level)) - } - return true -} - -// Decrease the flow level. -func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { - if parser.flow_level > 0 { - parser.flow_level-- - last := len(parser.simple_keys) - 1 - delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) - parser.simple_keys = parser.simple_keys[:last] - } - return true -} - -// max_indents limits the indents stack size -const max_indents = 10000 - -// Push the current indentation level to the stack and set the new level -// the current column is greater than the indentation level. In this case, -// append or insert the specified token into the token queue. -func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - if parser.indent < column { - // Push the current indentation level to the stack and set the new - // indentation level. - parser.indents = append(parser.indents, parser.indent) - parser.indent = column - if len(parser.indents) > max_indents { - return yaml_parser_set_scanner_error(parser, - "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, - fmt.Sprintf("exceeded max depth of %d", max_indents)) - } - - // Create a token and insert it into the queue. - token := yaml_token_t{ - typ: typ, - start_mark: mark, - end_mark: mark, - } - if number > -1 { - number -= parser.tokens_parsed - } - yaml_insert_token(parser, number, &token) - } - return true -} - -// Pop indentation levels from the indents stack until the current level -// becomes less or equal to the column. For each indentation level, append -// the BLOCK-END token. -func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - // Loop through the indentation levels in the stack. - for parser.indent > column { - // Create a token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - - // Pop the indentation level. - parser.indent = parser.indents[len(parser.indents)-1] - parser.indents = parser.indents[:len(parser.indents)-1] - } - return true -} - -// Initialize the scanner and produce the STREAM-START token. -func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { - - // Set the initial indentation. - parser.indent = -1 - - // Initialize the simple key stack. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - parser.simple_keys_by_tok = make(map[int]int) - - // A simple key is allowed at the beginning of the stream. - parser.simple_key_allowed = true - - // We have started. - parser.stream_start_produced = true - - // Create the STREAM-START token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_START_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - encoding: parser.encoding, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the STREAM-END token and shut down the scanner. -func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { - - // Force new line. - if parser.mark.column != 0 { - parser.mark.column = 0 - parser.mark.line++ - } - - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the STREAM-END token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. -func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. - token := yaml_token_t{} - if !yaml_parser_scan_directive(parser, &token) { - return false - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the DOCUMENT-START or DOCUMENT-END token. -func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Consume the token. - start_mark := parser.mark - - skip(parser) - skip(parser) - skip(parser) - - end_mark := parser.mark - - // Create the DOCUMENT-START or DOCUMENT-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. -func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // The indicators '[' and '{' may start a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // Increase the flow level. - if !yaml_parser_increase_flow_level(parser) { - return false - } - - // A simple key may follow the indicators '[' and '{'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. -func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset any potential simple key on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Decrease the flow level. - if !yaml_parser_decrease_flow_level(parser) { - return false - } - - // No simple keys after the indicators ']' and '}'. - parser.simple_key_allowed = false - - // Consume the token. - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-ENTRY token. -func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after ','. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_FLOW_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the BLOCK-ENTRY token. -func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { - // Check if the scanner is in the block context. - if parser.flow_level == 0 { - // Check if we are allowed to start a new entry. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "block sequence entries are not allowed in this context") - } - // Add the BLOCK-SEQUENCE-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { - return false - } - } else { - // It is an error for the '-' indicator to occur in the flow context, - // but we let the Parser detect and report about it because the Parser - // is able to point to the context. - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '-'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the BLOCK-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the KEY token. -func yaml_parser_fetch_key(parser *yaml_parser_t) bool { - - // In the block context, additional checks are required. - if parser.flow_level == 0 { - // Check if we are allowed to start a new key (not nessesary simple). - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping keys are not allowed in this context") - } - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '?' in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the KEY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the VALUE token. -func yaml_parser_fetch_value(parser *yaml_parser_t) bool { - - simple_key := &parser.simple_keys[len(parser.simple_keys)-1] - - // Have we found a simple key? - if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { - return false - - } else if valid { - - // Create the KEY token and insert it into the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: simple_key.mark, - end_mark: simple_key.mark, - } - yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) - - // In the block context, we may need to add the BLOCK-MAPPING-START token. - if !yaml_parser_roll_indent(parser, simple_key.mark.column, - simple_key.token_number, - yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { - return false - } - - // Remove the simple key. - simple_key.possible = false - delete(parser.simple_keys_by_tok, simple_key.token_number) - - // A simple key cannot follow another simple key. - parser.simple_key_allowed = false - - } else { - // The ':' indicator follows a complex key. - - // In the block context, extra checks are required. - if parser.flow_level == 0 { - - // Check if we are allowed to start a complex value. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping values are not allowed in this context") - } - - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Simple keys after ':' are allowed in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - } - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the VALUE token and append it to the queue. - token := yaml_token_t{ - typ: yaml_VALUE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the ALIAS or ANCHOR token. -func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // An anchor or an alias could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow an anchor or an alias. - parser.simple_key_allowed = false - - // Create the ALIAS or ANCHOR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_anchor(parser, &token, typ) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the TAG token. -func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { - // A tag could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a tag. - parser.simple_key_allowed = false - - // Create the TAG token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_tag(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. -func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { - // Remove any potential simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // A simple key may follow a block scalar. - parser.simple_key_allowed = true - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_block_scalar(parser, &token, literal) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. -func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_flow_scalar(parser, &token, single) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,plain) token. -func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_plain_scalar(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Eat whitespaces and comments until the next token is found. -func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { - - // Until the next token is not found. - for { - // Allow the BOM mark to start a line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { - skip(parser) - } - - // Eat whitespaces. - // Tabs are allowed: - // - in the flow context - // - in the block context, but not at the beginning of the line or - // after '-', '?', or ':' (complex value). - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Eat a comment until a line break. - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // If it is a line break, eat it. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - - // In the block context, a new line may start a simple key. - if parser.flow_level == 0 { - parser.simple_key_allowed = true - } - } else { - break // We have found a token. - } - } - - return true -} - -// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { - // Eat '%'. - start_mark := parser.mark - skip(parser) - - // Scan the directive name. - var name []byte - if !yaml_parser_scan_directive_name(parser, start_mark, &name) { - return false - } - - // Is it a YAML directive? - if bytes.Equal(name, []byte("YAML")) { - // Scan the VERSION directive value. - var major, minor int8 - if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { - return false - } - end_mark := parser.mark - - // Create a VERSION-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_VERSION_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - major: major, - minor: minor, - } - - // Is it a TAG directive? - } else if bytes.Equal(name, []byte("TAG")) { - // Scan the TAG directive value. - var handle, prefix []byte - if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { - return false - } - end_mark := parser.mark - - // Create a TAG-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_TAG_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - prefix: prefix, - } - - // Unknown directive. - } else { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unknown directive name") - return false - } - - // Eat the rest of the line including any comments. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - return true -} - -// Scan the directive name. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^ -// -func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { - // Consume the directive name. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - var s []byte - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the name is empty. - if len(s) == 0 { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "could not find expected directive name") - return false - } - - // Check for an blank character after the name. - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unexpected non-alphabetical character") - return false - } - *name = s - return true -} - -// Scan the value of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^ -func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the major version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { - return false - } - - // Eat '.'. - if parser.buffer[parser.buffer_pos] != '.' { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected digit or '.' character") - } - - skip(parser) - - // Consume the minor version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { - return false - } - return true -} - -const max_number_length = 2 - -// Scan the version number of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^ -// %YAML 1.1 # a comment \n -// ^ -func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { - - // Repeat while the next character is digit. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var value, length int8 - for is_digit(parser.buffer, parser.buffer_pos) { - // Check if the number is too long. - length++ - if length > max_number_length { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "found extremely long version number") - } - value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the number was present. - if length == 0 { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected version number") - } - *number = value - return true -} - -// Scan the value of a TAG-DIRECTIVE token. -// -// Scope: -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { - var handle_value, prefix_value []byte - - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a handle. - if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { - return false - } - - // Expect a whitespace. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blank(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace") - return false - } - - // Eat whitespaces. - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a prefix. - if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { - return false - } - - // Expect a whitespace or line break. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace or line break") - return false - } - - *handle = handle_value - *prefix = prefix_value - return true -} - -func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { - var s []byte - - // Eat the indicator character. - start_mark := parser.mark - skip(parser) - - // Consume the value. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - end_mark := parser.mark - - /* - * Check if length of the anchor is greater than 0 and it is followed by - * a whitespace character or one of the indicators: - * - * '?', ':', ',', ']', '}', '%', '@', '`'. - */ - - if len(s) == 0 || - !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || - parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '`') { - context := "while scanning an alias" - if typ == yaml_ANCHOR_TOKEN { - context = "while scanning an anchor" - } - yaml_parser_set_scanner_error(parser, context, start_mark, - "did not find expected alphabetic or numeric character") - return false - } - - // Create a token. - *token = yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - value: s, - } - - return true -} - -/* - * Scan a TAG token. - */ - -func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { - var handle, suffix []byte - - start_mark := parser.mark - - // Check if the tag is in the canonical form. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - if parser.buffer[parser.buffer_pos+1] == '<' { - // Keep the handle as '' - - // Eat '!<' - skip(parser) - skip(parser) - - // Consume the tag value. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - - // Check for '>' and eat it. - if parser.buffer[parser.buffer_pos] != '>' { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find the expected '>'") - return false - } - - skip(parser) - } else { - // The tag has either the '!suffix' or the '!handle!suffix' form. - - // First, try to scan a handle. - if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { - return false - } - - // Check if it is, indeed, handle. - if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { - // Scan the suffix now. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - } else { - // It wasn't a handle after all. Scan the rest of the tag. - if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { - return false - } - - // Set the handle to '!'. - handle = []byte{'!'} - - // A special case: the '!' tag. Set the handle to '' and the - // suffix to '!'. - if len(suffix) == 0 { - handle, suffix = suffix, handle - } - } - } - - // Check the character which ends the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find expected whitespace or line break") - return false - } - - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_TAG_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - suffix: suffix, - } - return true -} - -// Scan a tag handle. -func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { - // Check the initial '!' character. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] != '!' { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - - var s []byte - - // Copy the '!' character. - s = read(parser, s) - - // Copy all subsequent alphabetical and numerical characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the trailing character is '!' and copy it. - if parser.buffer[parser.buffer_pos] == '!' { - s = read(parser, s) - } else { - // It's either the '!' tag or not really a tag handle. If it's a %TAG - // directive, it's an error. If it's a tag token, it must be a part of URI. - if directive && string(s) != "!" { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - } - - *handle = s - return true -} - -// Scan a tag. -func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { - //size_t length = head ? strlen((char *)head) : 0 - var s []byte - hasTag := len(head) > 0 - - // Copy the head if needed. - // - // Note that we don't copy the leading '!' character. - if len(head) > 1 { - s = append(s, head[1:]...) - } - - // Scan the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // The set of characters that may appear in URI is as follows: - // - // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', - // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', - // '%'. - // [Go] Convert this into more reasonable logic. - for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || - parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || - parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || - parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || - parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || - parser.buffer[parser.buffer_pos] == '%' { - // Check if it is a URI-escape sequence. - if parser.buffer[parser.buffer_pos] == '%' { - if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { - return false - } - } else { - s = read(parser, s) - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - hasTag = true - } - - if !hasTag { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected tag URI") - return false - } - *uri = s - return true -} - -// Decode an URI-escape sequence corresponding to a single UTF-8 character. -func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { - - // Decode the required number of characters. - w := 1024 - for w > 0 { - // Check for a URI-escaped octet. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - - if !(parser.buffer[parser.buffer_pos] == '%' && - is_hex(parser.buffer, parser.buffer_pos+1) && - is_hex(parser.buffer, parser.buffer_pos+2)) { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find URI escaped octet") - } - - // Get the octet. - octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) - - // If it is the leading octet, determine the length of the UTF-8 sequence. - if w == 1024 { - w = width(octet) - if w == 0 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect leading UTF-8 octet") - } - } else { - // Check if the trailing octet is correct. - if octet&0xC0 != 0x80 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect trailing UTF-8 octet") - } - } - - // Copy the octet and move the pointers. - *s = append(*s, octet) - skip(parser) - skip(parser) - skip(parser) - w-- - } - return true -} - -// Scan a block scalar. -func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { - // Eat the indicator '|' or '>'. - start_mark := parser.mark - skip(parser) - - // Scan the additional block scalar indicators. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check for a chomping indicator. - var chomping, increment int - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - // Set the chomping method and eat the indicator. - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - - // Check for an indentation indicator. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if is_digit(parser.buffer, parser.buffer_pos) { - // Check that the indentation is greater than 0. - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - - // Get the indentation level and eat the indicator. - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - } - - } else if is_digit(parser.buffer, parser.buffer_pos) { - // Do the same as above, but in the opposite order. - - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - } - } - - // Eat whitespaces and comments to the end of the line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - end_mark := parser.mark - - // Set the indentation level if it was specified. - var indent int - if increment > 0 { - if parser.indent >= 0 { - indent = parser.indent + increment - } else { - indent = increment - } - } - - // Scan the leading line breaks and determine the indentation level if needed. - var s, leading_break, trailing_breaks []byte - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - - // Scan the block scalar content. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var leading_blank, trailing_blank bool - for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { - // We are at the beginning of a non-empty line. - - // Is it a trailing whitespace? - trailing_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Check if we need to fold the leading line break. - if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { - // Do we need to join the lines by space? - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } - } else { - s = append(s, leading_break...) - } - leading_break = leading_break[:0] - - // Append the remaining line breaks. - s = append(s, trailing_breaks...) - trailing_breaks = trailing_breaks[:0] - - // Is it a leading whitespace? - leading_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Consume the current line. - for !is_breakz(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - leading_break = read_line(parser, leading_break) - - // Eat the following indentation spaces and line breaks. - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - } - - // Chomp the tail. - if chomping != -1 { - s = append(s, leading_break...) - } - if chomping == 1 { - s = append(s, trailing_breaks...) - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_LITERAL_SCALAR_STYLE, - } - if !literal { - token.style = yaml_FOLDED_SCALAR_STYLE - } - return true -} - -// Scan indentation spaces and line breaks for a block scalar. Determine the -// indentation level if needed. -func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { - *end_mark = parser.mark - - // Eat the indentation spaces and line breaks. - max_indent := 0 - for { - // Eat the indentation spaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.mark.column > max_indent { - max_indent = parser.mark.column - } - - // Check for a tab character messing the indentation. - if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { - return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found a tab character where an indentation space is expected") - } - - // Have we found a non-empty line? - if !is_break(parser.buffer, parser.buffer_pos) { - break - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - // [Go] Should really be returning breaks instead. - *breaks = read_line(parser, *breaks) - *end_mark = parser.mark - } - - // Determine the indentation level if needed. - if *indent == 0 { - *indent = max_indent - if *indent < parser.indent+1 { - *indent = parser.indent + 1 - } - if *indent < 1 { - *indent = 1 - } - } - return true -} - -// Scan a quoted scalar. -func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { - // Eat the left quote. - start_mark := parser.mark - skip(parser) - - // Consume the content of the quoted scalar. - var s, leading_break, trailing_breaks, whitespaces []byte - for { - // Check that there are no document indicators at the beginning of the line. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected document indicator") - return false - } - - // Check for EOF. - if is_z(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected end of stream") - return false - } - - // Consume non-blank characters. - leading_blanks := false - for !is_blankz(parser.buffer, parser.buffer_pos) { - if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { - // Is is an escaped single quote. - s = append(s, '\'') - skip(parser) - skip(parser) - - } else if single && parser.buffer[parser.buffer_pos] == '\'' { - // It is a right single quote. - break - } else if !single && parser.buffer[parser.buffer_pos] == '"' { - // It is a right double quote. - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { - // It is an escaped line break. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - skip(parser) - skip_line(parser) - leading_blanks = true - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' { - // It is an escape sequence. - code_length := 0 - - // Check the escape character. - switch parser.buffer[parser.buffer_pos+1] { - case '0': - s = append(s, 0) - case 'a': - s = append(s, '\x07') - case 'b': - s = append(s, '\x08') - case 't', '\t': - s = append(s, '\x09') - case 'n': - s = append(s, '\x0A') - case 'v': - s = append(s, '\x0B') - case 'f': - s = append(s, '\x0C') - case 'r': - s = append(s, '\x0D') - case 'e': - s = append(s, '\x1B') - case ' ': - s = append(s, '\x20') - case '"': - s = append(s, '"') - case '\'': - s = append(s, '\'') - case '\\': - s = append(s, '\\') - case 'N': // NEL (#x85) - s = append(s, '\xC2') - s = append(s, '\x85') - case '_': // #xA0 - s = append(s, '\xC2') - s = append(s, '\xA0') - case 'L': // LS (#x2028) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA8') - case 'P': // PS (#x2029) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA9') - case 'x': - code_length = 2 - case 'u': - code_length = 4 - case 'U': - code_length = 8 - default: - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found unknown escape character") - return false - } - - skip(parser) - skip(parser) - - // Consume an arbitrary escape code. - if code_length > 0 { - var value int - - // Scan the character value. - if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { - return false - } - for k := 0; k < code_length; k++ { - if !is_hex(parser.buffer, parser.buffer_pos+k) { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "did not find expected hexdecimal number") - return false - } - value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) - } - - // Check the value and write the character. - if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found invalid Unicode character escape code") - return false - } - if value <= 0x7F { - s = append(s, byte(value)) - } else if value <= 0x7FF { - s = append(s, byte(0xC0+(value>>6))) - s = append(s, byte(0x80+(value&0x3F))) - } else if value <= 0xFFFF { - s = append(s, byte(0xE0+(value>>12))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } else { - s = append(s, byte(0xF0+(value>>18))) - s = append(s, byte(0x80+((value>>12)&0x3F))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } - - // Advance the pointer. - for k := 0; k < code_length; k++ { - skip(parser) - } - } - } else { - // It is a non-escaped non-blank character. - s = read(parser, s) - } - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we are at the end of the scalar. - if single { - if parser.buffer[parser.buffer_pos] == '\'' { - break - } - } else { - if parser.buffer[parser.buffer_pos] == '"' { - break - } - } - - // Consume blank characters. - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Join the whitespaces or fold line breaks. - if leading_blanks { - // Do we need to fold line breaks? - if len(leading_break) > 0 && leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Eat the right quote. - skip(parser) - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_SINGLE_QUOTED_SCALAR_STYLE, - } - if !single { - token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - return true -} - -// Scan a plain scalar. -func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { - - var s, leading_break, trailing_breaks, whitespaces []byte - var leading_blanks bool - var indent = parser.indent + 1 - - start_mark := parser.mark - end_mark := parser.mark - - // Consume the content of the plain scalar. - for { - // Check for a document indicator. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - break - } - - // Check for a comment. - if parser.buffer[parser.buffer_pos] == '#' { - break - } - - // Consume non-blank characters. - for !is_blankz(parser.buffer, parser.buffer_pos) { - - // Check for indicators that may end a plain scalar. - if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level > 0 && - (parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}')) { - break - } - - // Check if we need to join whitespaces and breaks. - if leading_blanks || len(whitespaces) > 0 { - if leading_blanks { - // Do we need to fold line breaks? - if leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - leading_blanks = false - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Copy the character. - s = read(parser, s) - - end_mark = parser.mark - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - // Is it the end? - if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { - break - } - - // Consume blank characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - - // Check for tab characters that abuse indentation. - if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found a tab character that violates indentation") - return false - } - - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check indentation level. - if parser.flow_level == 0 && parser.mark.column < indent { - break - } - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_PLAIN_SCALAR_STYLE, - } - - // Note that we change the 'simple_key_allowed' flag. - if leading_blanks { - parser.simple_key_allowed = true - } - return true -} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go deleted file mode 100644 index 4c45e66..0000000 --- a/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go +++ /dev/null @@ -1,113 +0,0 @@ -package yaml - -import ( - "reflect" - "unicode" -) - -type keyList []reflect.Value - -func (l keyList) Len() int { return len(l) } -func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } -func (l keyList) Less(i, j int) bool { - a := l[i] - b := l[j] - ak := a.Kind() - bk := b.Kind() - for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { - a = a.Elem() - ak = a.Kind() - } - for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { - b = b.Elem() - bk = b.Kind() - } - af, aok := keyFloat(a) - bf, bok := keyFloat(b) - if aok && bok { - if af != bf { - return af < bf - } - if ak != bk { - return ak < bk - } - return numLess(a, b) - } - if ak != reflect.String || bk != reflect.String { - return ak < bk - } - ar, br := []rune(a.String()), []rune(b.String()) - for i := 0; i < len(ar) && i < len(br); i++ { - if ar[i] == br[i] { - continue - } - al := unicode.IsLetter(ar[i]) - bl := unicode.IsLetter(br[i]) - if al && bl { - return ar[i] < br[i] - } - if al || bl { - return bl - } - var ai, bi int - var an, bn int64 - if ar[i] == '0' || br[i] == '0' { - for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { - if ar[j] != '0' { - an = 1 - bn = 1 - break - } - } - } - for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { - an = an*10 + int64(ar[ai]-'0') - } - for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { - bn = bn*10 + int64(br[bi]-'0') - } - if an != bn { - return an < bn - } - if ai != bi { - return ai < bi - } - return ar[i] < br[i] - } - return len(ar) < len(br) -} - -// keyFloat returns a float value for v if it is a number/bool -// and whether it is a number/bool or not. -func keyFloat(v reflect.Value) (f float64, ok bool) { - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return float64(v.Int()), true - case reflect.Float32, reflect.Float64: - return v.Float(), true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return float64(v.Uint()), true - case reflect.Bool: - if v.Bool() { - return 1, true - } - return 0, true - } - return 0, false -} - -// numLess returns whether a < b. -// a and b must necessarily have the same kind. -func numLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return a.Int() < b.Int() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Bool: - return !a.Bool() && b.Bool() - } - panic("not a number") -} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go deleted file mode 100644 index a2dde60..0000000 --- a/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go +++ /dev/null @@ -1,26 +0,0 @@ -package yaml - -// Set the writer error and return false. -func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_WRITER_ERROR - emitter.problem = problem - return false -} - -// Flush the output buffer. -func yaml_emitter_flush(emitter *yaml_emitter_t) bool { - if emitter.write_handler == nil { - panic("write handler not set") - } - - // Check if the buffer is empty. - if emitter.buffer_pos == 0 { - return true - } - - if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - return true -} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go deleted file mode 100644 index 3081388..0000000 --- a/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go +++ /dev/null @@ -1,478 +0,0 @@ -// Package yaml implements YAML support for the Go language. -// -// Source code and other details for the project are available at GitHub: -// -// https://github.com/go-yaml/yaml -// -package yaml - -import ( - "errors" - "fmt" - "io" - "reflect" - "strings" - "sync" -) - -// MapSlice encodes and decodes as a YAML map. -// The order of keys is preserved when encoding and decoding. -type MapSlice []MapItem - -// MapItem is an item in a MapSlice. -type MapItem struct { - Key, Value interface{} -} - -// The Unmarshaler interface may be implemented by types to customize their -// behavior when being unmarshaled from a YAML document. The UnmarshalYAML -// method receives a function that may be called to unmarshal the original -// YAML value into a field or variable. It is safe to call the unmarshal -// function parameter more than once if necessary. -type Unmarshaler interface { - UnmarshalYAML(unmarshal func(interface{}) error) error -} - -// The Marshaler interface may be implemented by types to customize their -// behavior when being marshaled into a YAML document. The returned value -// is marshaled in place of the original value implementing Marshaler. -// -// If an error is returned by MarshalYAML, the marshaling procedure stops -// and returns with the provided error. -type Marshaler interface { - MarshalYAML() (interface{}, error) -} - -// Unmarshal decodes the first document found within the in byte slice -// and assigns decoded values into the out value. -// -// Maps and pointers (to a struct, string, int, etc) are accepted as out -// values. If an internal pointer within a struct is not initialized, -// the yaml package will initialize it if necessary for unmarshalling -// the provided data. The out parameter must not be nil. -// -// The type of the decoded values should be compatible with the respective -// values in out. If one or more values cannot be decoded due to a type -// mismatches, decoding continues partially until the end of the YAML -// content, and a *yaml.TypeError is returned with details for all -// missed values. -// -// Struct fields are only unmarshalled if they are exported (have an -// upper case first letter), and are unmarshalled using the field name -// lowercased as the default key. Custom keys may be defined via the -// "yaml" name in the field tag: the content preceding the first comma -// is used as the key, and the following comma-separated options are -// used to tweak the marshalling process (see Marshal). -// Conflicting names result in a runtime error. -// -// For example: -// -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// var t T -// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) -// -// See the documentation of Marshal for the format of tags and a list of -// supported tag options. -// -func Unmarshal(in []byte, out interface{}) (err error) { - return unmarshal(in, out, false) -} - -// UnmarshalStrict is like Unmarshal except that any fields that are found -// in the data that do not have corresponding struct members, or mapping -// keys that are duplicates, will result in -// an error. -func UnmarshalStrict(in []byte, out interface{}) (err error) { - return unmarshal(in, out, true) -} - -// A Decoder reads and decodes YAML values from an input stream. -type Decoder struct { - strict bool - parser *parser -} - -// NewDecoder returns a new decoder that reads from r. -// -// The decoder introduces its own buffering and may read -// data from r beyond the YAML values requested. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{ - parser: newParserFromReader(r), - } -} - -// SetStrict sets whether strict decoding behaviour is enabled when -// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. -func (dec *Decoder) SetStrict(strict bool) { - dec.strict = strict -} - -// Decode reads the next YAML-encoded value from its input -// and stores it in the value pointed to by v. -// -// See the documentation for Unmarshal for details about the -// conversion of YAML into a Go value. -func (dec *Decoder) Decode(v interface{}) (err error) { - d := newDecoder(dec.strict) - defer handleErr(&err) - node := dec.parser.parse() - if node == nil { - return io.EOF - } - out := reflect.ValueOf(v) - if out.Kind() == reflect.Ptr && !out.IsNil() { - out = out.Elem() - } - d.unmarshal(node, out) - if len(d.terrors) > 0 { - return &TypeError{d.terrors} - } - return nil -} - -func unmarshal(in []byte, out interface{}, strict bool) (err error) { - defer handleErr(&err) - d := newDecoder(strict) - p := newParser(in) - defer p.destroy() - node := p.parse() - if node != nil { - v := reflect.ValueOf(out) - if v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - d.unmarshal(node, v) - } - if len(d.terrors) > 0 { - return &TypeError{d.terrors} - } - return nil -} - -// Marshal serializes the value provided into a YAML document. The structure -// of the generated document will reflect the structure of the value itself. -// Maps and pointers (to struct, string, int, etc) are accepted as the in value. -// -// Struct fields are only marshalled if they are exported (have an upper case -// first letter), and are marshalled using the field name lowercased as the -// default key. Custom keys may be defined via the "yaml" name in the field -// tag: the content preceding the first comma is used as the key, and the -// following comma-separated options are used to tweak the marshalling process. -// Conflicting names result in a runtime error. -// -// The field tag format accepted is: -// -// `(...) yaml:"[][,[,]]" (...)` -// -// The following flags are currently supported: -// -// omitempty Only include the field if it's not set to the zero -// value for the type or to empty slices or maps. -// Zero valued structs will be omitted if all their public -// fields are zero, unless they implement an IsZero -// method (see the IsZeroer interface type), in which -// case the field will be excluded if IsZero returns true. -// -// flow Marshal using a flow style (useful for structs, -// sequences and maps). -// -// inline Inline the field, which must be a struct or a map, -// causing all of its fields or keys to be processed as if -// they were part of the outer struct. For maps, keys must -// not conflict with the yaml keys of other struct fields. -// -// In addition, if the key is "-", the field is ignored. -// -// For example: -// -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" -// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" -// -func Marshal(in interface{}) (out []byte, err error) { - defer handleErr(&err) - e := newEncoder() - defer e.destroy() - e.marshalDoc("", reflect.ValueOf(in)) - e.finish() - out = e.out - return -} - -// An Encoder writes YAML values to an output stream. -type Encoder struct { - encoder *encoder -} - -// NewEncoder returns a new encoder that writes to w. -// The Encoder should be closed after use to flush all data -// to w. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - encoder: newEncoderWithWriter(w), - } -} - -// Encode writes the YAML encoding of v to the stream. -// If multiple items are encoded to the stream, the -// second and subsequent document will be preceded -// with a "---" document separator, but the first will not. -// -// See the documentation for Marshal for details about the conversion of Go -// values to YAML. -func (e *Encoder) Encode(v interface{}) (err error) { - defer handleErr(&err) - e.encoder.marshalDoc("", reflect.ValueOf(v)) - return nil -} - -// Close closes the encoder by writing any remaining data. -// It does not write a stream terminating string "...". -func (e *Encoder) Close() (err error) { - defer handleErr(&err) - e.encoder.finish() - return nil -} - -func handleErr(err *error) { - if v := recover(); v != nil { - if e, ok := v.(yamlError); ok { - *err = e.err - } else { - panic(v) - } - } -} - -type yamlError struct { - err error -} - -func fail(err error) { - panic(yamlError{err}) -} - -func failf(format string, args ...interface{}) { - panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) -} - -// A TypeError is returned by Unmarshal when one or more fields in -// the YAML document cannot be properly decoded into the requested -// types. When this error is returned, the value is still -// unmarshaled partially. -type TypeError struct { - Errors []string -} - -func (e *TypeError) Error() string { - return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) -} - -// -------------------------------------------------------------------------- -// Maintain a mapping of keys to structure field indexes - -// The code in this section was copied from mgo/bson. - -// structInfo holds details for the serialization of fields of -// a given struct. -type structInfo struct { - FieldsMap map[string]fieldInfo - FieldsList []fieldInfo - - // InlineMap is the number of the field in the struct that - // contains an ,inline map, or -1 if there's none. - InlineMap int -} - -type fieldInfo struct { - Key string - Num int - OmitEmpty bool - Flow bool - // Id holds the unique field identifier, so we can cheaply - // check for field duplicates without maintaining an extra map. - Id int - - // Inline holds the field index if the field is part of an inlined struct. - Inline []int -} - -var structMap = make(map[reflect.Type]*structInfo) -var fieldMapMutex sync.RWMutex - -func getStructInfo(st reflect.Type) (*structInfo, error) { - fieldMapMutex.RLock() - sinfo, found := structMap[st] - fieldMapMutex.RUnlock() - if found { - return sinfo, nil - } - - n := st.NumField() - fieldsMap := make(map[string]fieldInfo) - fieldsList := make([]fieldInfo, 0, n) - inlineMap := -1 - for i := 0; i != n; i++ { - field := st.Field(i) - if field.PkgPath != "" && !field.Anonymous { - continue // Private field - } - - info := fieldInfo{Num: i} - - tag := field.Tag.Get("yaml") - if tag == "" && strings.Index(string(field.Tag), ":") < 0 { - tag = string(field.Tag) - } - if tag == "-" { - continue - } - - inline := false - fields := strings.Split(tag, ",") - if len(fields) > 1 { - for _, flag := range fields[1:] { - switch flag { - case "omitempty": - info.OmitEmpty = true - case "flow": - info.Flow = true - case "inline": - inline = true - default: - return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) - } - } - tag = fields[0] - } - - if inline { - switch field.Type.Kind() { - case reflect.Map: - if inlineMap >= 0 { - return nil, errors.New("Multiple ,inline maps in struct " + st.String()) - } - if field.Type.Key() != reflect.TypeOf("") { - return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) - } - inlineMap = info.Num - case reflect.Struct: - sinfo, err := getStructInfo(field.Type) - if err != nil { - return nil, err - } - for _, finfo := range sinfo.FieldsList { - if _, found := fieldsMap[finfo.Key]; found { - msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - if finfo.Inline == nil { - finfo.Inline = []int{i, finfo.Num} - } else { - finfo.Inline = append([]int{i}, finfo.Inline...) - } - finfo.Id = len(fieldsList) - fieldsMap[finfo.Key] = finfo - fieldsList = append(fieldsList, finfo) - } - default: - //return nil, errors.New("Option ,inline needs a struct value or map field") - return nil, errors.New("Option ,inline needs a struct value field") - } - continue - } - - if tag != "" { - info.Key = tag - } else { - info.Key = strings.ToLower(field.Name) - } - - if _, found = fieldsMap[info.Key]; found { - msg := "Duplicated key '" + info.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - - info.Id = len(fieldsList) - fieldsList = append(fieldsList, info) - fieldsMap[info.Key] = info - } - - sinfo = &structInfo{ - FieldsMap: fieldsMap, - FieldsList: fieldsList, - InlineMap: inlineMap, - } - - fieldMapMutex.Lock() - structMap[st] = sinfo - fieldMapMutex.Unlock() - return sinfo, nil -} - -// IsZeroer is used to check whether an object is zero to -// determine whether it should be omitted when marshaling -// with the omitempty flag. One notable implementation -// is time.Time. -type IsZeroer interface { - IsZero() bool -} - -func isZero(v reflect.Value) bool { - kind := v.Kind() - if z, ok := v.Interface().(IsZeroer); ok { - if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { - return true - } - return z.IsZero() - } - switch kind { - case reflect.String: - return len(v.String()) == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflect.Slice: - return v.Len() == 0 - case reflect.Map: - return v.Len() == 0 - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Struct: - vt := v.Type() - for i := v.NumField() - 1; i >= 0; i-- { - if vt.Field(i).PkgPath != "" { - continue // Private field - } - if !isZero(v.Field(i)) { - return false - } - } - return true - } - return false -} - -// FutureLineWrap globally disables line wrapping when encoding long strings. -// This is a temporary and thus deprecated method introduced to faciliate -// migration towards v3, which offers more control of line lengths on -// individual encodings, and has a default matching the behavior introduced -// by this function. -// -// The default formatting of v2 was erroneously changed in v2.3.0 and reverted -// in v2.4.0, at which point this function was introduced to help migration. -func FutureLineWrap() { - disableLineWrapping = true -} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go deleted file mode 100644 index f6a9c8e..0000000 --- a/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go +++ /dev/null @@ -1,739 +0,0 @@ -package yaml - -import ( - "fmt" - "io" -) - -// The version directive data. -type yaml_version_directive_t struct { - major int8 // The major version number. - minor int8 // The minor version number. -} - -// The tag directive data. -type yaml_tag_directive_t struct { - handle []byte // The tag handle. - prefix []byte // The tag prefix. -} - -type yaml_encoding_t int - -// The stream encoding. -const ( - // Let the parser choose the encoding. - yaml_ANY_ENCODING yaml_encoding_t = iota - - yaml_UTF8_ENCODING // The default UTF-8 encoding. - yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. - yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. -) - -type yaml_break_t int - -// Line break types. -const ( - // Let the parser choose the break type. - yaml_ANY_BREAK yaml_break_t = iota - - yaml_CR_BREAK // Use CR for line breaks (Mac style). - yaml_LN_BREAK // Use LN for line breaks (Unix style). - yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). -) - -type yaml_error_type_t int - -// Many bad things could happen with the parser and emitter. -const ( - // No error is produced. - yaml_NO_ERROR yaml_error_type_t = iota - - yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. - yaml_READER_ERROR // Cannot read or decode the input stream. - yaml_SCANNER_ERROR // Cannot scan the input stream. - yaml_PARSER_ERROR // Cannot parse the input stream. - yaml_COMPOSER_ERROR // Cannot compose a YAML document. - yaml_WRITER_ERROR // Cannot write to the output stream. - yaml_EMITTER_ERROR // Cannot emit a YAML stream. -) - -// The pointer position. -type yaml_mark_t struct { - index int // The position index. - line int // The position line. - column int // The position column. -} - -// Node Styles - -type yaml_style_t int8 - -type yaml_scalar_style_t yaml_style_t - -// Scalar styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota - - yaml_PLAIN_SCALAR_STYLE // The plain scalar style. - yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. - yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. - yaml_LITERAL_SCALAR_STYLE // The literal scalar style. - yaml_FOLDED_SCALAR_STYLE // The folded scalar style. -) - -type yaml_sequence_style_t yaml_style_t - -// Sequence styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota - - yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. - yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. -) - -type yaml_mapping_style_t yaml_style_t - -// Mapping styles. -const ( - // Let the emitter choose the style. - yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota - - yaml_BLOCK_MAPPING_STYLE // The block mapping style. - yaml_FLOW_MAPPING_STYLE // The flow mapping style. -) - -// Tokens - -type yaml_token_type_t int - -// Token types. -const ( - // An empty token. - yaml_NO_TOKEN yaml_token_type_t = iota - - yaml_STREAM_START_TOKEN // A STREAM-START token. - yaml_STREAM_END_TOKEN // A STREAM-END token. - - yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. - yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. - yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. - yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. - - yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. - yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. - yaml_BLOCK_END_TOKEN // A BLOCK-END token. - - yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. - yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. - yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. - yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. - - yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. - yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. - yaml_KEY_TOKEN // A KEY token. - yaml_VALUE_TOKEN // A VALUE token. - - yaml_ALIAS_TOKEN // An ALIAS token. - yaml_ANCHOR_TOKEN // An ANCHOR token. - yaml_TAG_TOKEN // A TAG token. - yaml_SCALAR_TOKEN // A SCALAR token. -) - -func (tt yaml_token_type_t) String() string { - switch tt { - case yaml_NO_TOKEN: - return "yaml_NO_TOKEN" - case yaml_STREAM_START_TOKEN: - return "yaml_STREAM_START_TOKEN" - case yaml_STREAM_END_TOKEN: - return "yaml_STREAM_END_TOKEN" - case yaml_VERSION_DIRECTIVE_TOKEN: - return "yaml_VERSION_DIRECTIVE_TOKEN" - case yaml_TAG_DIRECTIVE_TOKEN: - return "yaml_TAG_DIRECTIVE_TOKEN" - case yaml_DOCUMENT_START_TOKEN: - return "yaml_DOCUMENT_START_TOKEN" - case yaml_DOCUMENT_END_TOKEN: - return "yaml_DOCUMENT_END_TOKEN" - case yaml_BLOCK_SEQUENCE_START_TOKEN: - return "yaml_BLOCK_SEQUENCE_START_TOKEN" - case yaml_BLOCK_MAPPING_START_TOKEN: - return "yaml_BLOCK_MAPPING_START_TOKEN" - case yaml_BLOCK_END_TOKEN: - return "yaml_BLOCK_END_TOKEN" - case yaml_FLOW_SEQUENCE_START_TOKEN: - return "yaml_FLOW_SEQUENCE_START_TOKEN" - case yaml_FLOW_SEQUENCE_END_TOKEN: - return "yaml_FLOW_SEQUENCE_END_TOKEN" - case yaml_FLOW_MAPPING_START_TOKEN: - return "yaml_FLOW_MAPPING_START_TOKEN" - case yaml_FLOW_MAPPING_END_TOKEN: - return "yaml_FLOW_MAPPING_END_TOKEN" - case yaml_BLOCK_ENTRY_TOKEN: - return "yaml_BLOCK_ENTRY_TOKEN" - case yaml_FLOW_ENTRY_TOKEN: - return "yaml_FLOW_ENTRY_TOKEN" - case yaml_KEY_TOKEN: - return "yaml_KEY_TOKEN" - case yaml_VALUE_TOKEN: - return "yaml_VALUE_TOKEN" - case yaml_ALIAS_TOKEN: - return "yaml_ALIAS_TOKEN" - case yaml_ANCHOR_TOKEN: - return "yaml_ANCHOR_TOKEN" - case yaml_TAG_TOKEN: - return "yaml_TAG_TOKEN" - case yaml_SCALAR_TOKEN: - return "yaml_SCALAR_TOKEN" - } - return "" -} - -// The token structure. -type yaml_token_t struct { - // The token type. - typ yaml_token_type_t - - // The start/end of the token. - start_mark, end_mark yaml_mark_t - - // The stream encoding (for yaml_STREAM_START_TOKEN). - encoding yaml_encoding_t - - // The alias/anchor/scalar value or tag/tag directive handle - // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). - value []byte - - // The tag suffix (for yaml_TAG_TOKEN). - suffix []byte - - // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). - prefix []byte - - // The scalar style (for yaml_SCALAR_TOKEN). - style yaml_scalar_style_t - - // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). - major, minor int8 -} - -// Events - -type yaml_event_type_t int8 - -// Event types. -const ( - // An empty event. - yaml_NO_EVENT yaml_event_type_t = iota - - yaml_STREAM_START_EVENT // A STREAM-START event. - yaml_STREAM_END_EVENT // A STREAM-END event. - yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. - yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. - yaml_ALIAS_EVENT // An ALIAS event. - yaml_SCALAR_EVENT // A SCALAR event. - yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. - yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. - yaml_MAPPING_START_EVENT // A MAPPING-START event. - yaml_MAPPING_END_EVENT // A MAPPING-END event. -) - -var eventStrings = []string{ - yaml_NO_EVENT: "none", - yaml_STREAM_START_EVENT: "stream start", - yaml_STREAM_END_EVENT: "stream end", - yaml_DOCUMENT_START_EVENT: "document start", - yaml_DOCUMENT_END_EVENT: "document end", - yaml_ALIAS_EVENT: "alias", - yaml_SCALAR_EVENT: "scalar", - yaml_SEQUENCE_START_EVENT: "sequence start", - yaml_SEQUENCE_END_EVENT: "sequence end", - yaml_MAPPING_START_EVENT: "mapping start", - yaml_MAPPING_END_EVENT: "mapping end", -} - -func (e yaml_event_type_t) String() string { - if e < 0 || int(e) >= len(eventStrings) { - return fmt.Sprintf("unknown event %d", e) - } - return eventStrings[e] -} - -// The event structure. -type yaml_event_t struct { - - // The event type. - typ yaml_event_type_t - - // The start and end of the event. - start_mark, end_mark yaml_mark_t - - // The document encoding (for yaml_STREAM_START_EVENT). - encoding yaml_encoding_t - - // The version directive (for yaml_DOCUMENT_START_EVENT). - version_directive *yaml_version_directive_t - - // The list of tag directives (for yaml_DOCUMENT_START_EVENT). - tag_directives []yaml_tag_directive_t - - // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). - anchor []byte - - // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - tag []byte - - // The scalar value (for yaml_SCALAR_EVENT). - value []byte - - // Is the document start/end indicator implicit, or the tag optional? - // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). - implicit bool - - // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). - quoted_implicit bool - - // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - style yaml_style_t -} - -func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } -func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } -func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } - -// Nodes - -const ( - yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. - yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. - yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. - yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. - yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. - yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. - - yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. - yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. - - // Not in original libyaml. - yaml_BINARY_TAG = "tag:yaml.org,2002:binary" - yaml_MERGE_TAG = "tag:yaml.org,2002:merge" - - yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. - yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. - yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. -) - -type yaml_node_type_t int - -// Node types. -const ( - // An empty node. - yaml_NO_NODE yaml_node_type_t = iota - - yaml_SCALAR_NODE // A scalar node. - yaml_SEQUENCE_NODE // A sequence node. - yaml_MAPPING_NODE // A mapping node. -) - -// An element of a sequence node. -type yaml_node_item_t int - -// An element of a mapping node. -type yaml_node_pair_t struct { - key int // The key of the element. - value int // The value of the element. -} - -// The node structure. -type yaml_node_t struct { - typ yaml_node_type_t // The node type. - tag []byte // The node tag. - - // The node data. - - // The scalar parameters (for yaml_SCALAR_NODE). - scalar struct { - value []byte // The scalar value. - length int // The length of the scalar value. - style yaml_scalar_style_t // The scalar style. - } - - // The sequence parameters (for YAML_SEQUENCE_NODE). - sequence struct { - items_data []yaml_node_item_t // The stack of sequence items. - style yaml_sequence_style_t // The sequence style. - } - - // The mapping parameters (for yaml_MAPPING_NODE). - mapping struct { - pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). - pairs_start *yaml_node_pair_t // The beginning of the stack. - pairs_end *yaml_node_pair_t // The end of the stack. - pairs_top *yaml_node_pair_t // The top of the stack. - style yaml_mapping_style_t // The mapping style. - } - - start_mark yaml_mark_t // The beginning of the node. - end_mark yaml_mark_t // The end of the node. - -} - -// The document structure. -type yaml_document_t struct { - - // The document nodes. - nodes []yaml_node_t - - // The version directive. - version_directive *yaml_version_directive_t - - // The list of tag directives. - tag_directives_data []yaml_tag_directive_t - tag_directives_start int // The beginning of the tag directives list. - tag_directives_end int // The end of the tag directives list. - - start_implicit int // Is the document start indicator implicit? - end_implicit int // Is the document end indicator implicit? - - // The start/end of the document. - start_mark, end_mark yaml_mark_t -} - -// The prototype of a read handler. -// -// The read handler is called when the parser needs to read more bytes from the -// source. The handler should write not more than size bytes to the buffer. -// The number of written bytes should be set to the size_read variable. -// -// [in,out] data A pointer to an application data specified by -// yaml_parser_set_input(). -// [out] buffer The buffer to write the data from the source. -// [in] size The size of the buffer. -// [out] size_read The actual number of bytes read from the source. -// -// On success, the handler should return 1. If the handler failed, -// the returned value should be 0. On EOF, the handler should set the -// size_read to 0 and return 1. -type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) - -// This structure holds information about a potential simple key. -type yaml_simple_key_t struct { - possible bool // Is a simple key possible? - required bool // Is a simple key required? - token_number int // The number of the token. - mark yaml_mark_t // The position mark. -} - -// The states of the parser. -type yaml_parser_state_t int - -const ( - yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota - - yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. - yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. - yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. - yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. - yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. - yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. - yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. - yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. - yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. - yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. - yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. - yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. - yaml_PARSE_END_STATE // Expect nothing. -) - -func (ps yaml_parser_state_t) String() string { - switch ps { - case yaml_PARSE_STREAM_START_STATE: - return "yaml_PARSE_STREAM_START_STATE" - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_START_STATE: - return "yaml_PARSE_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return "yaml_PARSE_DOCUMENT_CONTENT_STATE" - case yaml_PARSE_DOCUMENT_END_STATE: - return "yaml_PARSE_DOCUMENT_END_STATE" - case yaml_PARSE_BLOCK_NODE_STATE: - return "yaml_PARSE_BLOCK_NODE_STATE" - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" - case yaml_PARSE_FLOW_NODE_STATE: - return "yaml_PARSE_FLOW_NODE_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" - case yaml_PARSE_END_STATE: - return "yaml_PARSE_END_STATE" - } - return "" -} - -// This structure holds aliases data. -type yaml_alias_data_t struct { - anchor []byte // The anchor. - index int // The node id. - mark yaml_mark_t // The anchor mark. -} - -// The parser structure. -// -// All members are internal. Manage the structure using the -// yaml_parser_ family of functions. -type yaml_parser_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - - problem string // Error description. - - // The byte about which the problem occurred. - problem_offset int - problem_value int - problem_mark yaml_mark_t - - // The error context. - context string - context_mark yaml_mark_t - - // Reader stuff - - read_handler yaml_read_handler_t // Read handler. - - input_reader io.Reader // File input data. - input []byte // String input data. - input_pos int - - eof bool // EOF flag - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - unread int // The number of unread characters in the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The input encoding. - - offset int // The offset of the current position (in bytes). - mark yaml_mark_t // The mark of the current position. - - // Scanner stuff - - stream_start_produced bool // Have we started to scan the input stream? - stream_end_produced bool // Have we reached the end of the input stream? - - flow_level int // The number of unclosed '[' and '{' indicators. - - tokens []yaml_token_t // The tokens queue. - tokens_head int // The head of the tokens queue. - tokens_parsed int // The number of tokens fetched from the queue. - token_available bool // Does the tokens queue contain a token ready for dequeueing. - - indent int // The current indentation level. - indents []int // The indentation levels stack. - - simple_key_allowed bool // May a simple key occur at the current position? - simple_keys []yaml_simple_key_t // The stack of simple keys. - simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number - - // Parser stuff - - state yaml_parser_state_t // The current parser state. - states []yaml_parser_state_t // The parser states stack. - marks []yaml_mark_t // The stack of marks. - tag_directives []yaml_tag_directive_t // The list of TAG directives. - - // Dumper stuff - - aliases []yaml_alias_data_t // The alias data. - - document *yaml_document_t // The currently parsed document. -} - -// Emitter Definitions - -// The prototype of a write handler. -// -// The write handler is called when the emitter needs to flush the accumulated -// characters to the output. The handler should write @a size bytes of the -// @a buffer to the output. -// -// @param[in,out] data A pointer to an application data specified by -// yaml_emitter_set_output(). -// @param[in] buffer The buffer with bytes to be written. -// @param[in] size The size of the buffer. -// -// @returns On success, the handler should return @c 1. If the handler failed, -// the returned value should be @c 0. -// -type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error - -type yaml_emitter_state_t int - -// The emitter states. -const ( - // Expect STREAM-START. - yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota - - yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. - yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. - yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. - yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. - yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. - yaml_EMIT_END_STATE // Expect nothing. -) - -// The emitter structure. -// -// All members are internal. Manage the structure using the @c yaml_emitter_ -// family of functions. -type yaml_emitter_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - problem string // Error description. - - // Writer stuff - - write_handler yaml_write_handler_t // Write handler. - - output_buffer *[]byte // String output data. - output_writer io.Writer // File output data. - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The stream encoding. - - // Emitter stuff - - canonical bool // If the output is in the canonical style? - best_indent int // The number of indentation spaces. - best_width int // The preferred width of the output lines. - unicode bool // Allow unescaped non-ASCII characters? - line_break yaml_break_t // The preferred line break. - - state yaml_emitter_state_t // The current emitter state. - states []yaml_emitter_state_t // The stack of states. - - events []yaml_event_t // The event queue. - events_head int // The head of the event queue. - - indents []int // The stack of indentation levels. - - tag_directives []yaml_tag_directive_t // The list of tag directives. - - indent int // The current indentation level. - - flow_level int // The current flow level. - - root_context bool // Is it the document root context? - sequence_context bool // Is it a sequence context? - mapping_context bool // Is it a mapping context? - simple_key_context bool // Is it a simple mapping key context? - - line int // The current line. - column int // The current column. - whitespace bool // If the last character was a whitespace? - indention bool // If the last character was an indentation character (' ', '-', '?', ':')? - open_ended bool // If an explicit document end is required? - - // Anchor analysis. - anchor_data struct { - anchor []byte // The anchor value. - alias bool // Is it an alias? - } - - // Tag analysis. - tag_data struct { - handle []byte // The tag handle. - suffix []byte // The tag suffix. - } - - // Scalar analysis. - scalar_data struct { - value []byte // The scalar value. - multiline bool // Does the scalar contain line breaks? - flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? - block_plain_allowed bool // Can the scalar be expressed in the block plain style? - single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? - block_allowed bool // Can the scalar be expressed in the literal or folded styles? - style yaml_scalar_style_t // The output style. - } - - // Dumper stuff - - opened bool // If the stream was already opened? - closed bool // If the stream was already closed? - - // The information associated with the document nodes. - anchors *struct { - references int // The number of references. - anchor int // The anchor id. - serialized bool // If the node has been emitted? - } - - last_anchor_id int // The last assigned anchor id. - - document *yaml_document_t // The currently emitted document. -} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go deleted file mode 100644 index 8110ce3..0000000 --- a/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go +++ /dev/null @@ -1,173 +0,0 @@ -package yaml - -const ( - // The size of the input raw buffer. - input_raw_buffer_size = 512 - - // The size of the input buffer. - // It should be possible to decode the whole raw buffer. - input_buffer_size = input_raw_buffer_size * 3 - - // The size of the output buffer. - output_buffer_size = 128 - - // The size of the output raw buffer. - // It should be possible to encode the whole output buffer. - output_raw_buffer_size = (output_buffer_size*2 + 2) - - // The size of other stacks and queues. - initial_stack_size = 16 - initial_queue_size = 16 - initial_string_size = 16 -) - -// Check if the character at the specified position is an alphabetical -// character, a digit, '_', or '-'. -func is_alpha(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' -} - -// Check if the character at the specified position is a digit. -func is_digit(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' -} - -// Get the value of a digit. -func as_digit(b []byte, i int) int { - return int(b[i]) - '0' -} - -// Check if the character at the specified position is a hex-digit. -func is_hex(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' -} - -// Get the value of a hex-digit. -func as_hex(b []byte, i int) int { - bi := b[i] - if bi >= 'A' && bi <= 'F' { - return int(bi) - 'A' + 10 - } - if bi >= 'a' && bi <= 'f' { - return int(bi) - 'a' + 10 - } - return int(bi) - '0' -} - -// Check if the character is ASCII. -func is_ascii(b []byte, i int) bool { - return b[i] <= 0x7F -} - -// Check if the character at the start of the buffer can be printed unescaped. -func is_printable(b []byte, i int) bool { - return ((b[i] == 0x0A) || // . == #x0A - (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E - (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF - (b[i] > 0xC2 && b[i] < 0xED) || - (b[i] == 0xED && b[i+1] < 0xA0) || - (b[i] == 0xEE) || - (b[i] == 0xEF && // #xE000 <= . <= #xFFFD - !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF - !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) -} - -// Check if the character at the specified position is NUL. -func is_z(b []byte, i int) bool { - return b[i] == 0x00 -} - -// Check if the beginning of the buffer is a BOM. -func is_bom(b []byte, i int) bool { - return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF -} - -// Check if the character at the specified position is space. -func is_space(b []byte, i int) bool { - return b[i] == ' ' -} - -// Check if the character at the specified position is tab. -func is_tab(b []byte, i int) bool { - return b[i] == '\t' -} - -// Check if the character at the specified position is blank (space or tab). -func is_blank(b []byte, i int) bool { - //return is_space(b, i) || is_tab(b, i) - return b[i] == ' ' || b[i] == '\t' -} - -// Check if the character at the specified position is a line break. -func is_break(b []byte, i int) bool { - return (b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) -} - -func is_crlf(b []byte, i int) bool { - return b[i] == '\r' && b[i+1] == '\n' -} - -// Check if the character is a line break or NUL. -func is_breakz(b []byte, i int) bool { - //return is_break(b, i) || is_z(b, i) - return ( // is_break: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - // is_z: - b[i] == 0) -} - -// Check if the character is a line break, space, or NUL. -func is_spacez(b []byte, i int) bool { - //return is_space(b, i) || is_breakz(b, i) - return ( // is_space: - b[i] == ' ' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Check if the character is a line break, space, tab, or NUL. -func is_blankz(b []byte, i int) bool { - //return is_blank(b, i) || is_breakz(b, i) - return ( // is_blank: - b[i] == ' ' || b[i] == '\t' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Determine the width of the character. -func width(b byte) int { - // Don't replace these by a switch without first - // confirming that it is being inlined. - if b&0x80 == 0x00 { - return 1 - } - if b&0xE0 == 0xC0 { - return 2 - } - if b&0xF0 == 0xE0 { - return 3 - } - if b&0xF8 == 0xF0 { - return 4 - } - return 0 - -} diff --git a/vendor/sigs.k8s.io/yaml/yaml.go b/vendor/sigs.k8s.io/yaml/yaml.go deleted file mode 100644 index fc10246..0000000 --- a/vendor/sigs.k8s.io/yaml/yaml.go +++ /dev/null @@ -1,419 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package yaml - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "reflect" - "strconv" - - "sigs.k8s.io/yaml/goyaml.v2" -) - -// Marshal marshals obj into JSON using stdlib json.Marshal, and then converts JSON to YAML using JSONToYAML (see that method for more reference) -func Marshal(obj interface{}) ([]byte, error) { - jsonBytes, err := json.Marshal(obj) - if err != nil { - return nil, fmt.Errorf("error marshaling into JSON: %w", err) - } - - return JSONToYAML(jsonBytes) -} - -// JSONOpt is a decoding option for decoding from JSON format. -type JSONOpt func(*json.Decoder) *json.Decoder - -// Unmarshal first converts the given YAML to JSON, and then unmarshals the JSON into obj. Options for the -// standard library json.Decoder can be optionally specified, e.g. to decode untyped numbers into json.Number instead of float64, or to disallow unknown fields (but for that purpose, see also UnmarshalStrict). obj must be a non-nil pointer. -// -// Important notes about the Unmarshal logic: -// -// - Decoding is case-insensitive, unlike the rest of Kubernetes API machinery, as this is using the stdlib json library. This might be confusing to users. -// - This decodes any number (although it is an integer) into a float64 if the type of obj is unknown, e.g. *map[string]interface{}, *interface{}, or *[]interface{}. This means integers above +/- 2^53 will lose precision when round-tripping. Make a JSONOpt that calls d.UseNumber() to avoid this. -// - Duplicate fields, including in-case-sensitive matches, are ignored in an undefined order. Note that the YAML specification forbids duplicate fields, so this logic is more permissive than it needs to. See UnmarshalStrict for an alternative. -// - Unknown fields, i.e. serialized data that do not map to a field in obj, are ignored. Use d.DisallowUnknownFields() or UnmarshalStrict to override. -// - As per the YAML 1.1 specification, which yaml.v2 used underneath implements, literal 'yes' and 'no' strings without quotation marks will be converted to true/false implicitly. -// - YAML non-string keys, e.g. ints, bools and floats, are converted to strings implicitly during the YAML to JSON conversion process. -// - There are no compatibility guarantees for returned error values. -func Unmarshal(yamlBytes []byte, obj interface{}, opts ...JSONOpt) error { - return unmarshal(yamlBytes, obj, yaml.Unmarshal, opts...) -} - -// UnmarshalStrict is similar to Unmarshal (please read its documentation for reference), with the following exceptions: -// -// - Duplicate fields in an object yield an error. This is according to the YAML specification. -// - If obj, or any of its recursive children, is a struct, presence of fields in the serialized data unknown to the struct will yield an error. -func UnmarshalStrict(yamlBytes []byte, obj interface{}, opts ...JSONOpt) error { - return unmarshal(yamlBytes, obj, yaml.UnmarshalStrict, append(opts, DisallowUnknownFields)...) -} - -// unmarshal unmarshals the given YAML byte stream into the given interface, -// optionally performing the unmarshalling strictly -func unmarshal(yamlBytes []byte, obj interface{}, unmarshalFn func([]byte, interface{}) error, opts ...JSONOpt) error { - jsonTarget := reflect.ValueOf(obj) - - jsonBytes, err := yamlToJSONTarget(yamlBytes, &jsonTarget, unmarshalFn) - if err != nil { - return fmt.Errorf("error converting YAML to JSON: %w", err) - } - - err = jsonUnmarshal(bytes.NewReader(jsonBytes), obj, opts...) - if err != nil { - return fmt.Errorf("error unmarshaling JSON: %w", err) - } - - return nil -} - -// jsonUnmarshal unmarshals the JSON byte stream from the given reader into the -// object, optionally applying decoder options prior to decoding. We are not -// using json.Unmarshal directly as we want the chance to pass in non-default -// options. -func jsonUnmarshal(reader io.Reader, obj interface{}, opts ...JSONOpt) error { - d := json.NewDecoder(reader) - for _, opt := range opts { - d = opt(d) - } - if err := d.Decode(&obj); err != nil { - return fmt.Errorf("while decoding JSON: %v", err) - } - return nil -} - -// JSONToYAML converts JSON to YAML. Notable implementation details: -// -// - Duplicate fields, are case-sensitively ignored in an undefined order. -// - The sequence indentation style is compact, which means that the "- " marker for a YAML sequence will be on the same indentation level as the sequence field name. -// - Unlike Unmarshal, all integers, up to 64 bits, are preserved during this round-trip. -func JSONToYAML(j []byte) ([]byte, error) { - // Convert the JSON to an object. - var jsonObj interface{} - - // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the - // Go JSON library doesn't try to pick the right number type (int, float, - // etc.) when unmarshalling to interface{}, it just picks float64 - // universally. go-yaml does go through the effort of picking the right - // number type, so we can preserve number type throughout this process. - err := yaml.Unmarshal(j, &jsonObj) - if err != nil { - return nil, err - } - - // Marshal this object into YAML. - yamlBytes, err := yaml.Marshal(jsonObj) - if err != nil { - return nil, err - } - - return yamlBytes, nil -} - -// YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML, -// passing JSON through this method should be a no-op. -// -// Some things YAML can do that are not supported by JSON: -// - In YAML you can have binary and null keys in your maps. These are invalid -// in JSON, and therefore int, bool and float keys are converted to strings implicitly. -// - Binary data in YAML with the !!binary tag is not supported. If you want to -// use binary data with this library, encode the data as base64 as usual but do -// not use the !!binary tag in your YAML. This will ensure the original base64 -// encoded data makes it all the way through to the JSON. -// - And more... read the YAML specification for more details. -// -// Notable about the implementation: -// -// - Duplicate fields are case-sensitively ignored in an undefined order. Note that the YAML specification forbids duplicate fields, so this logic is more permissive than it needs to. See YAMLToJSONStrict for an alternative. -// - As per the YAML 1.1 specification, which yaml.v2 used underneath implements, literal 'yes' and 'no' strings without quotation marks will be converted to true/false implicitly. -// - Unlike Unmarshal, all integers, up to 64 bits, are preserved during this round-trip. -// - There are no compatibility guarantees for returned error values. -func YAMLToJSON(y []byte) ([]byte, error) { - return yamlToJSONTarget(y, nil, yaml.Unmarshal) -} - -// YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding, -// returning an error on any duplicate field names. -func YAMLToJSONStrict(y []byte) ([]byte, error) { - return yamlToJSONTarget(y, nil, yaml.UnmarshalStrict) -} - -func yamlToJSONTarget(yamlBytes []byte, jsonTarget *reflect.Value, unmarshalFn func([]byte, interface{}) error) ([]byte, error) { - // Convert the YAML to an object. - var yamlObj interface{} - err := unmarshalFn(yamlBytes, &yamlObj) - if err != nil { - return nil, err - } - - // YAML objects are not completely compatible with JSON objects (e.g. you - // can have non-string keys in YAML). So, convert the YAML-compatible object - // to a JSON-compatible object, failing with an error if irrecoverable - // incompatibilties happen along the way. - jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget) - if err != nil { - return nil, err - } - - // Convert this object to JSON and return the data. - jsonBytes, err := json.Marshal(jsonObj) - if err != nil { - return nil, err - } - return jsonBytes, nil -} - -func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { - var err error - - // Resolve jsonTarget to a concrete value (i.e. not a pointer or an - // interface). We pass decodingNull as false because we're not actually - // decoding into the value, we're just checking if the ultimate target is a - // string. - if jsonTarget != nil { - jsonUnmarshaler, textUnmarshaler, pointerValue := indirect(*jsonTarget, false) - // We have a JSON or Text Umarshaler at this level, so we can't be trying - // to decode into a string. - if jsonUnmarshaler != nil || textUnmarshaler != nil { - jsonTarget = nil - } else { - jsonTarget = &pointerValue - } - } - - // If yamlObj is a number or a boolean, check if jsonTarget is a string - - // if so, coerce. Else return normal. - // If yamlObj is a map or array, find the field that each key is - // unmarshaling to, and when you recurse pass the reflect.Value for that - // field back into this function. - switch typedYAMLObj := yamlObj.(type) { - case map[interface{}]interface{}: - // JSON does not support arbitrary keys in a map, so we must convert - // these keys to strings. - // - // From my reading of go-yaml v2 (specifically the resolve function), - // keys can only have the types string, int, int64, float64, binary - // (unsupported), or null (unsupported). - strMap := make(map[string]interface{}) - for k, v := range typedYAMLObj { - // Resolve the key to a string first. - var keyString string - switch typedKey := k.(type) { - case string: - keyString = typedKey - case int: - keyString = strconv.Itoa(typedKey) - case int64: - // go-yaml will only return an int64 as a key if the system - // architecture is 32-bit and the key's value is between 32-bit - // and 64-bit. Otherwise the key type will simply be int. - keyString = strconv.FormatInt(typedKey, 10) - case float64: - // Stolen from go-yaml to use the same conversion to string as - // the go-yaml library uses to convert float to string when - // Marshaling. - s := strconv.FormatFloat(typedKey, 'g', -1, 32) - switch s { - case "+Inf": - s = ".inf" - case "-Inf": - s = "-.inf" - case "NaN": - s = ".nan" - } - keyString = s - case bool: - if typedKey { - keyString = "true" - } else { - keyString = "false" - } - default: - return nil, fmt.Errorf("unsupported map key of type: %s, key: %+#v, value: %+#v", - reflect.TypeOf(k), k, v) - } - - // jsonTarget should be a struct or a map. If it's a struct, find - // the field it's going to map to and pass its reflect.Value. If - // it's a map, find the element type of the map and pass the - // reflect.Value created from that type. If it's neither, just pass - // nil - JSON conversion will error for us if it's a real issue. - if jsonTarget != nil { - t := *jsonTarget - if t.Kind() == reflect.Struct { - keyBytes := []byte(keyString) - // Find the field that the JSON library would use. - var f *field - fields := cachedTypeFields(t.Type()) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, keyBytes) { - f = ff - break - } - // Do case-insensitive comparison. - if f == nil && ff.equalFold(ff.nameBytes, keyBytes) { - f = ff - } - } - if f != nil { - // Find the reflect.Value of the most preferential - // struct field. - jtf := t.Field(f.index[0]) - strMap[keyString], err = convertToJSONableObject(v, &jtf) - if err != nil { - return nil, err - } - continue - } - } else if t.Kind() == reflect.Map { - // Create a zero value of the map's element type to use as - // the JSON target. - jtv := reflect.Zero(t.Type().Elem()) - strMap[keyString], err = convertToJSONableObject(v, &jtv) - if err != nil { - return nil, err - } - continue - } - } - strMap[keyString], err = convertToJSONableObject(v, nil) - if err != nil { - return nil, err - } - } - return strMap, nil - case []interface{}: - // We need to recurse into arrays in case there are any - // map[interface{}]interface{}'s inside and to convert any - // numbers to strings. - - // If jsonTarget is a slice (which it really should be), find the - // thing it's going to map to. If it's not a slice, just pass nil - // - JSON conversion will error for us if it's a real issue. - var jsonSliceElemValue *reflect.Value - if jsonTarget != nil { - t := *jsonTarget - if t.Kind() == reflect.Slice { - // By default slices point to nil, but we need a reflect.Value - // pointing to a value of the slice type, so we create one here. - ev := reflect.Indirect(reflect.New(t.Type().Elem())) - jsonSliceElemValue = &ev - } - } - - // Make and use a new array. - arr := make([]interface{}, len(typedYAMLObj)) - for i, v := range typedYAMLObj { - arr[i], err = convertToJSONableObject(v, jsonSliceElemValue) - if err != nil { - return nil, err - } - } - return arr, nil - default: - // If the target type is a string and the YAML type is a number, - // convert the YAML type to a string. - if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String { - // Based on my reading of go-yaml, it may return int, int64, - // float64, or uint64. - var s string - switch typedVal := typedYAMLObj.(type) { - case int: - s = strconv.FormatInt(int64(typedVal), 10) - case int64: - s = strconv.FormatInt(typedVal, 10) - case float64: - s = strconv.FormatFloat(typedVal, 'g', -1, 32) - case uint64: - s = strconv.FormatUint(typedVal, 10) - case bool: - if typedVal { - s = "true" - } else { - s = "false" - } - } - if len(s) > 0 { - yamlObj = interface{}(s) - } - } - return yamlObj, nil - } -} - -// JSONObjectToYAMLObject converts an in-memory JSON object into a YAML in-memory MapSlice, -// without going through a byte representation. A nil or empty map[string]interface{} input is -// converted to an empty map, i.e. yaml.MapSlice(nil). -// -// interface{} slices stay interface{} slices. map[string]interface{} becomes yaml.MapSlice. -// -// int64 and float64 are down casted following the logic of github.com/go-yaml/yaml: -// - float64s are down-casted as far as possible without data-loss to int, int64, uint64. -// - int64s are down-casted to int if possible without data-loss. -// -// Big int/int64/uint64 do not lose precision as in the json-yaml roundtripping case. -// -// string, bool and any other types are unchanged. -func JSONObjectToYAMLObject(j map[string]interface{}) yaml.MapSlice { - if len(j) == 0 { - return nil - } - ret := make(yaml.MapSlice, 0, len(j)) - for k, v := range j { - ret = append(ret, yaml.MapItem{Key: k, Value: jsonToYAMLValue(v)}) - } - return ret -} - -func jsonToYAMLValue(j interface{}) interface{} { - switch j := j.(type) { - case map[string]interface{}: - if j == nil { - return interface{}(nil) - } - return JSONObjectToYAMLObject(j) - case []interface{}: - if j == nil { - return interface{}(nil) - } - ret := make([]interface{}, len(j)) - for i := range j { - ret[i] = jsonToYAMLValue(j[i]) - } - return ret - case float64: - // replicate the logic in https://github.com/go-yaml/yaml/blob/51d6538a90f86fe93ac480b35f37b2be17fef232/resolve.go#L151 - if i64 := int64(j); j == float64(i64) { - if i := int(i64); i64 == int64(i) { - return i - } - return i64 - } - if ui64 := uint64(j); j == float64(ui64) { - return ui64 - } - return j - case int64: - if i := int(j); j == int64(i) { - return i - } - return j - } - return j -} diff --git a/vendor/sigs.k8s.io/yaml/yaml_go110.go b/vendor/sigs.k8s.io/yaml/yaml_go110.go deleted file mode 100644 index 94abc17..0000000 --- a/vendor/sigs.k8s.io/yaml/yaml_go110.go +++ /dev/null @@ -1,31 +0,0 @@ -// This file contains changes that are only compatible with go 1.10 and onwards. - -//go:build go1.10 -// +build go1.10 - -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package yaml - -import "encoding/json" - -// DisallowUnknownFields configures the JSON decoder to error out if unknown -// fields come along, instead of dropping them by default. -func DisallowUnknownFields(d *json.Decoder) *json.Decoder { - d.DisallowUnknownFields() - return d -}