From c0f2636ea6f89d0af1c977f2c1e4239c2a85932d Mon Sep 17 00:00:00 2001 From: Matteo Danelon Date: Sun, 22 Sep 2024 19:01:58 +0200 Subject: [PATCH] wip --- cmd/dnsapi/cloudflare.go | 316 +- cmd/dnsapi/cloudflare_test.go | 456 +- go.mod | 12 +- go.sum | 37 +- test/mocks/client.go | 122 +- .../cloudflare/cloudflare-go/.gitignore | 6 + .../cloudflare/cloudflare-go/.golintci.yaml | 33 + .../cloudflare/cloudflare-go/.semgrep.yml | 88 + .../cloudflare/cloudflare-go/.semgrepignore | 22 + .../cloudflare/cloudflare-go/CHANGELOG.md | 1233 +++ .../cloudflare-go/CODE_OF_CONDUCT.md | 77 + .../cloudflare/cloudflare-go/LICENSE | 26 + .../cloudflare/cloudflare-go/README.md | 85 + .../cloudflare-go/access_application.go | 511 ++ .../cloudflare-go/access_audit_log.go | 86 + .../cloudflare-go/access_bookmark.go | 206 + .../cloudflare-go/access_ca_certificate.go | 153 + .../cloudflare-go/access_custom_page.go | 127 + .../cloudflare/cloudflare-go/access_group.go | 405 + .../cloudflare-go/access_identity_provider.go | 306 + .../cloudflare/cloudflare-go/access_keys.go | 64 + .../access_mutual_tls_certificates.go | 279 + .../cloudflare-go/access_organization.go | 143 + .../cloudflare/cloudflare-go/access_policy.go | 356 + .../cloudflare/cloudflare-go/access_seats.go | 111 + .../cloudflare-go/access_service_tokens.go | 257 + .../cloudflare/cloudflare-go/access_tag.go | 85 + .../cloudflare-go/access_user_tokens.go | 24 + .../cloudflare/cloudflare-go/access_users.go | 362 + .../cloudflare-go/account_members.go | 245 + .../cloudflare/cloudflare-go/account_roles.go | 111 + .../cloudflare/cloudflare-go/accounts.go | 151 + .../cloudflare-go/addressing_address_map.go | 285 + .../cloudflare-go/addressing_ip_prefix.go | 149 + .../cloudflare/cloudflare-go/api_shield.go | 71 + .../cloudflare-go/api_shield_api_discovery.go | 190 + .../cloudflare-go/api_shield_operations.go | 185 + .../cloudflare-go/api_shield_schemas.go | 505 ++ .../cloudflare/cloudflare-go/api_token.go | 238 + .../cloudflare/cloudflare-go/argo.go | 121 + .../cloudflare/cloudflare-go/argo_tunnel.go | 162 + .../cloudflare/cloudflare-go/auditlogs.go | 158 + .../authenticated_origin_pulls.go | 67 + ...authenticated_origin_pulls_per_hostname.go | 175 + .../authenticated_origin_pulls_per_zone.go | 151 + .../cloudflare-go/bot_management.go | 93 + .../cloudflare/cloudflare-go/cache_reserve.go | 83 + .../cloudflare/cloudflare-go/catalog.json | 67 + .../cloudflare-go/certificate_packs.go | 163 + .../cloudflare-go/cloud_connector.go | 71 + .../cloudflare/cloudflare-go/cloudflare.go | 593 ++ .../cloudflare/cloudflare-go/consts.go | 27 + .../cloudflare/cloudflare-go/convert_types.go | 932 ++ .../cloudflare-go/custom_hostname.go | 330 + .../cloudflare-go/custom_nameservers.go | 207 + .../cloudflare/cloudflare-go/custom_pages.go | 177 + .../github.com/cloudflare/cloudflare-go/d1.go | 199 + .../cloudflare-go/dcv_delegation.go | 41 + .../cloudflare-go/device_posture_rule.go | 340 + .../cloudflare/cloudflare-go/devices_dex.go | 174 + .../cloudflare-go/devices_managed_networks.go | 165 + .../cloudflare-go/devices_policy.go | 381 + .../cloudflare/cloudflare-go/diagnostics.go | 102 + .../cloudflare/cloudflare-go/dlp_dataset.go | 278 + .../cloudflare-go/dlp_payload_log.go | 72 + .../cloudflare/cloudflare-go/dlp_profile.go | 234 + .../cloudflare/cloudflare-go/dns.go | 419 + .../cloudflare/cloudflare-go/dns_firewall.go | 220 + .../cloudflare/cloudflare-go/duration.go | 41 + .../email_routing_destination.go | 156 + .../cloudflare-go/email_routing_rules.go | 274 + .../cloudflare-go/email_routing_settings.go | 118 + .../cloudflare/cloudflare-go/errors.go | 415 + .../cloudflare-go/fallback_domain.go | 133 + .../cloudflare/cloudflare-go/filter.go | 290 + .../cloudflare/cloudflare-go/firewall.go | 281 + .../cloudflare-go/firewall_rules.go | 244 + .../cloudflare/cloudflare-go/flake.lock | 540 ++ .../cloudflare/cloudflare-go/flake.nix | 7 + .../cloudflare/cloudflare-go/flox.nix | 3 + .../cloudflare-go/gateway_categories.go | 54 + .../cloudflare/cloudflare-go/healthchecks.go | 199 + .../cloudflare/cloudflare-go/hyperdrive.go | 222 + .../cloudflare/cloudflare-go/images.go | 401 + .../cloudflare-go/images_variants.go | 163 + .../cloudflare-go/intelligence_asn.go | 101 + .../cloudflare-go/intelligence_domain.go | 177 + .../cloudflare-go/intelligence_ip.go | 156 + .../cloudflare-go/intelligence_phishing.go | 52 + .../cloudflare-go/intelligence_whois.go | 60 + .../cloudflare-go/ip_access_rules.go | 89 + .../cloudflare/cloudflare-go/ip_list.go | 507 ++ .../cloudflare/cloudflare-go/ips.go | 72 + .../cloudflare/cloudflare-go/keyless.go | 146 + .../cloudflare/cloudflare-go/list.go | 629 ++ .../cloudflare-go/load_balancing.go | 821 ++ .../cloudflare/cloudflare-go/lockdown.go | 192 + .../cloudflare/cloudflare-go/logger.go | 130 + .../cloudflare/cloudflare-go/logpull.go | 58 + .../cloudflare/cloudflare-go/logpush.go | 572 ++ .../cloudflare-go/magic_firewall_rulesets.go | 206 + .../cloudflare-go/magic_transit_gre_tunnel.go | 181 + .../magic_transit_ipsec_tunnel.go | 216 + .../magic_transit_static_routes.go | 180 + .../magic_transit_tunnel_healthcheck.go | 10 + .../cloudflare-go/managed_headers.go | 78 + .../cloudflare-go/miscategorization.go | 50 + .../cloudflare-go/mtls_certificates.go | 215 + .../cloudflare/cloudflare-go/notifications.go | 447 + .../cloudflare/cloudflare-go/observatory.go | 401 + .../cloudflare/cloudflare-go/options.go | 106 + .../cloudflare/cloudflare-go/origin_ca.go | 233 + .../cloudflare/cloudflare-go/page_rules.go | 240 + .../cloudflare/cloudflare-go/page_shield.go | 76 + .../cloudflare-go/page_shield_connections.go | 88 + .../cloudflare-go/page_shield_policies.go | 140 + .../cloudflare-go/page_shield_scripts.go | 107 + .../cloudflare-go/pages_deployment.go | 343 + .../cloudflare/cloudflare-go/pages_domain.go | 194 + .../cloudflare/cloudflare-go/pages_project.go | 333 + .../cloudflare/cloudflare-go/pagination.go | 59 + .../per_hostname_tls_settings.go | 251 + .../cloudflare-go/permission_group.go | 99 + .../cloudflare/cloudflare-go/policy.go | 8 + .../cloudflare/cloudflare-go/queue.go | 378 + .../cloudflare/cloudflare-go/r2_bucket.go | 147 + .../cloudflare/cloudflare-go/rate_limiting.go | 199 + .../cloudflare-go/regional_hostnames.go | 191 + .../cloudflare-go/regional_tiered_cache.go | 85 + .../cloudflare/cloudflare-go/registrar.go | 176 + .../cloudflare/cloudflare-go/resource.go | 114 + .../cloudflare-go/resource_group.go | 53 + .../cloudflare/cloudflare-go/rulesets.go | 915 ++ .../cloudflare-go/secondary_dns_primaries.go | 165 + .../cloudflare-go/secondary_dns_tsig.go | 132 + .../cloudflare-go/secondary_dns_zone.go | 172 + .../cloudflare/cloudflare-go/spectrum.go | 368 + .../cloudflare/cloudflare-go/split_tunnel.go | 107 + .../cloudflare/cloudflare-go/ssl.go | 173 + .../cloudflare/cloudflare-go/stream.go | 574 ++ .../cloudflare-go/teams_accounts.go | 344 + .../cloudflare-go/teams_audit_ssh_settings.go | 86 + .../cloudflare-go/teams_certificates.go | 158 + .../cloudflare/cloudflare-go/teams_devices.go | 107 + .../cloudflare/cloudflare-go/teams_list.go | 331 + .../cloudflare-go/teams_locations.go | 155 + .../cloudflare-go/teams_proxy_endpoints.go | 137 + .../cloudflare/cloudflare-go/teams_rules.go | 359 + .../cloudflare/cloudflare-go/tiered_cache.go | 317 + .../cloudflare/cloudflare-go/total_tls.go | 64 + .../cloudflare/cloudflare-go/tunnel.go | 536 ++ .../cloudflare/cloudflare-go/tunnel_routes.go | 214 + .../cloudflare-go/tunnel_virtual_networks.go | 159 + .../cloudflare/cloudflare-go/turnstile.go | 245 + .../cloudflare/cloudflare-go/universal_ssl.go | 108 + .../url_normalization_settings.go | 60 + .../cloudflare/cloudflare-go/user.go | 161 + .../cloudflare/cloudflare-go/user_agent.go | 151 + .../cloudflare/cloudflare-go/utils.go | 28 + .../cloudflare/cloudflare-go/waf.go | 352 + .../cloudflare/cloudflare-go/waf_overrides.go | 138 + .../cloudflare/cloudflare-go/waiting_room.go | 630 ++ .../cloudflare/cloudflare-go/web3.go | 198 + .../cloudflare/cloudflare-go/web_analytics.go | 411 + .../cloudflare/cloudflare-go/workers.go | 631 ++ .../cloudflare-go/workers_account_settings.go | 83 + .../cloudflare-go/workers_bindings.go | 651 ++ .../cloudflare-go/workers_cron_triggers.go | 91 + .../cloudflare-go/workers_domain.go | 151 + .../cloudflare-go/workers_for_platforms.go | 139 + .../cloudflare/cloudflare-go/workers_kv.go | 378 + .../cloudflare-go/workers_routes.go | 162 + .../cloudflare-go/workers_secrets.go | 125 + .../cloudflare-go/workers_subdomain.go | 58 + .../cloudflare/cloudflare-go/workers_tail.go | 114 + .../cloudflare/cloudflare-go/zaraz.go | 430 + .../cloudflare/cloudflare-go/zone.go | 1077 +++ .../cloudflare-go/zone_cache_variants.go | 89 + .../cloudflare/cloudflare-go/zone_hold.go | 111 + .../cloudflare-go/zt_risk_behaviors.go | 126 + .../zt_risk_score_integration.go | 180 + vendor/github.com/davecgh/go-spew/LICENSE | 15 + .../github.com/davecgh/go-spew/spew/bypass.go | 145 + .../davecgh/go-spew/spew/bypasssafe.go | 38 + .../github.com/davecgh/go-spew/spew/common.go | 341 + .../github.com/davecgh/go-spew/spew/config.go | 306 + vendor/github.com/davecgh/go-spew/spew/doc.go | 211 + .../github.com/davecgh/go-spew/spew/dump.go | 509 ++ .../github.com/davecgh/go-spew/spew/format.go | 419 + .../github.com/davecgh/go-spew/spew/spew.go | 148 + vendor/github.com/goccy/go-json/.codecov.yml | 32 + vendor/github.com/goccy/go-json/.gitignore | 2 + vendor/github.com/goccy/go-json/.golangci.yml | 86 + vendor/github.com/goccy/go-json/CHANGELOG.md | 425 + vendor/github.com/goccy/go-json/LICENSE | 21 + vendor/github.com/goccy/go-json/Makefile | 39 + vendor/github.com/goccy/go-json/README.md | 529 ++ vendor/github.com/goccy/go-json/color.go | 68 + vendor/github.com/goccy/go-json/decode.go | 263 + .../goccy/go-json/docker-compose.yml | 13 + vendor/github.com/goccy/go-json/encode.go | 326 + vendor/github.com/goccy/go-json/error.go | 41 + .../internal/decoder/anonymous_field.go | 41 + .../goccy/go-json/internal/decoder/array.go | 176 + .../goccy/go-json/internal/decoder/assign.go | 438 + .../goccy/go-json/internal/decoder/bool.go | 83 + .../goccy/go-json/internal/decoder/bytes.go | 118 + .../goccy/go-json/internal/decoder/compile.go | 487 + .../internal/decoder/compile_norace.go | 29 + .../go-json/internal/decoder/compile_race.go | 37 + .../goccy/go-json/internal/decoder/context.go | 254 + .../goccy/go-json/internal/decoder/float.go | 170 + .../goccy/go-json/internal/decoder/func.go | 146 + .../goccy/go-json/internal/decoder/int.go | 246 + .../go-json/internal/decoder/interface.go | 528 ++ .../goccy/go-json/internal/decoder/invalid.go | 55 + .../goccy/go-json/internal/decoder/map.go | 280 + .../goccy/go-json/internal/decoder/number.go | 123 + .../goccy/go-json/internal/decoder/option.go | 17 + .../goccy/go-json/internal/decoder/path.go | 670 ++ .../goccy/go-json/internal/decoder/ptr.go | 97 + .../goccy/go-json/internal/decoder/slice.go | 380 + .../goccy/go-json/internal/decoder/stream.go | 556 ++ .../goccy/go-json/internal/decoder/string.go | 452 + .../goccy/go-json/internal/decoder/struct.go | 845 ++ .../goccy/go-json/internal/decoder/type.go | 30 + .../goccy/go-json/internal/decoder/uint.go | 194 + .../internal/decoder/unmarshal_json.go | 104 + .../internal/decoder/unmarshal_text.go | 285 + .../internal/decoder/wrapped_string.go | 73 + .../goccy/go-json/internal/encoder/code.go | 1023 +++ .../goccy/go-json/internal/encoder/compact.go | 286 + .../go-json/internal/encoder/compiler.go | 935 ++ .../internal/encoder/compiler_norace.go | 32 + .../go-json/internal/encoder/compiler_race.go | 45 + .../goccy/go-json/internal/encoder/context.go | 105 + .../go-json/internal/encoder/decode_rune.go | 126 + .../goccy/go-json/internal/encoder/encoder.go | 596 ++ .../goccy/go-json/internal/encoder/indent.go | 211 + .../goccy/go-json/internal/encoder/int.go | 176 + .../goccy/go-json/internal/encoder/map112.go | 9 + .../goccy/go-json/internal/encoder/map113.go | 9 + .../goccy/go-json/internal/encoder/opcode.go | 752 ++ .../goccy/go-json/internal/encoder/option.go | 48 + .../goccy/go-json/internal/encoder/optype.go | 932 ++ .../goccy/go-json/internal/encoder/query.go | 135 + .../goccy/go-json/internal/encoder/string.go | 483 + .../go-json/internal/encoder/string_table.go | 415 + .../go-json/internal/encoder/vm/debug_vm.go | 41 + .../goccy/go-json/internal/encoder/vm/hack.go | 9 + .../goccy/go-json/internal/encoder/vm/util.go | 207 + .../goccy/go-json/internal/encoder/vm/vm.go | 4859 ++++++++++ .../internal/encoder/vm_color/debug_vm.go | 35 + .../go-json/internal/encoder/vm_color/hack.go | 9 + .../go-json/internal/encoder/vm_color/util.go | 274 + .../go-json/internal/encoder/vm_color/vm.go | 4859 ++++++++++ .../encoder/vm_color_indent/debug_vm.go | 35 + .../internal/encoder/vm_color_indent/util.go | 297 + .../internal/encoder/vm_color_indent/vm.go | 4859 ++++++++++ .../internal/encoder/vm_indent/debug_vm.go | 35 + .../internal/encoder/vm_indent/hack.go | 9 + .../internal/encoder/vm_indent/util.go | 230 + .../go-json/internal/encoder/vm_indent/vm.go | 4859 ++++++++++ .../goccy/go-json/internal/errors/error.go | 183 + .../goccy/go-json/internal/runtime/rtype.go | 262 + .../go-json/internal/runtime/struct_field.go | 91 + .../goccy/go-json/internal/runtime/type.go | 100 + vendor/github.com/goccy/go-json/json.go | 368 + vendor/github.com/goccy/go-json/option.go | 79 + vendor/github.com/goccy/go-json/path.go | 84 + vendor/github.com/goccy/go-json/query.go | 47 + .../github.com/google/go-querystring/LICENSE | 27 + .../google/go-querystring/query/encode.go | 357 + vendor/github.com/pmezard/go-difflib/LICENSE | 27 + .../pmezard/go-difflib/difflib/difflib.go | 772 ++ vendor/github.com/stretchr/testify/LICENSE | 21 + .../testify/assert/assertion_compare.go | 480 + .../testify/assert/assertion_format.go | 815 ++ .../testify/assert/assertion_format.go.tmpl | 5 + .../testify/assert/assertion_forward.go | 1621 ++++ .../testify/assert/assertion_forward.go.tmpl | 5 + .../testify/assert/assertion_order.go | 81 + .../stretchr/testify/assert/assertions.go | 2105 +++++ .../github.com/stretchr/testify/assert/doc.go | 46 + .../stretchr/testify/assert/errors.go | 10 + .../testify/assert/forward_assertions.go | 16 + .../testify/assert/http_assertions.go | 165 + vendor/golang.org/x/net/LICENSE | 27 + vendor/golang.org/x/net/PATENTS | 22 + vendor/golang.org/x/net/idna/go118.go | 13 + vendor/golang.org/x/net/idna/idna10.0.0.go | 769 ++ vendor/golang.org/x/net/idna/idna9.0.0.go | 717 ++ vendor/golang.org/x/net/idna/pre_go118.go | 11 + vendor/golang.org/x/net/idna/punycode.go | 217 + vendor/golang.org/x/net/idna/tables10.0.0.go | 4559 ++++++++++ vendor/golang.org/x/net/idna/tables11.0.0.go | 4653 ++++++++++ vendor/golang.org/x/net/idna/tables12.0.0.go | 4733 ++++++++++ vendor/golang.org/x/net/idna/tables13.0.0.go | 4959 +++++++++++ vendor/golang.org/x/net/idna/tables15.0.0.go | 5144 +++++++++++ vendor/golang.org/x/net/idna/tables9.0.0.go | 4486 ++++++++++ vendor/golang.org/x/net/idna/trie.go | 51 + vendor/golang.org/x/net/idna/trie12.0.0.go | 30 + vendor/golang.org/x/net/idna/trie13.0.0.go | 30 + vendor/golang.org/x/net/idna/trieval.go | 119 + vendor/golang.org/x/sys/LICENSE | 4 +- .../sys/internal/unsafeheader/unsafeheader.go | 30 - vendor/golang.org/x/sys/unix/aliases.go | 4 +- vendor/golang.org/x/sys/unix/asm_aix_ppc64.s | 1 - vendor/golang.org/x/sys/unix/asm_bsd_386.s | 2 - vendor/golang.org/x/sys/unix/asm_bsd_amd64.s | 2 - vendor/golang.org/x/sys/unix/asm_bsd_arm.s | 2 - vendor/golang.org/x/sys/unix/asm_bsd_arm64.s | 2 - vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s | 2 - .../golang.org/x/sys/unix/asm_bsd_riscv64.s | 2 - vendor/golang.org/x/sys/unix/asm_linux_386.s | 1 - .../golang.org/x/sys/unix/asm_linux_amd64.s | 1 - vendor/golang.org/x/sys/unix/asm_linux_arm.s | 1 - .../golang.org/x/sys/unix/asm_linux_arm64.s | 3 - .../golang.org/x/sys/unix/asm_linux_loong64.s | 3 - .../golang.org/x/sys/unix/asm_linux_mips64x.s | 3 - .../golang.org/x/sys/unix/asm_linux_mipsx.s | 3 - .../golang.org/x/sys/unix/asm_linux_ppc64x.s | 3 - .../golang.org/x/sys/unix/asm_linux_riscv64.s | 2 - .../golang.org/x/sys/unix/asm_linux_s390x.s | 3 - .../x/sys/unix/asm_openbsd_mips64.s | 1 - .../golang.org/x/sys/unix/asm_solaris_amd64.s | 1 - vendor/golang.org/x/sys/unix/asm_zos_s390x.s | 668 +- vendor/golang.org/x/sys/unix/bpxsvc_zos.go | 657 ++ vendor/golang.org/x/sys/unix/bpxsvc_zos.s | 192 + vendor/golang.org/x/sys/unix/cap_freebsd.go | 1 - vendor/golang.org/x/sys/unix/constants.go | 1 - vendor/golang.org/x/sys/unix/dev_aix_ppc.go | 1 - vendor/golang.org/x/sys/unix/dev_aix_ppc64.go | 1 - vendor/golang.org/x/sys/unix/dev_zos.go | 1 - vendor/golang.org/x/sys/unix/dirent.go | 1 - vendor/golang.org/x/sys/unix/endian_big.go | 1 - vendor/golang.org/x/sys/unix/endian_little.go | 1 - vendor/golang.org/x/sys/unix/env_unix.go | 1 - vendor/golang.org/x/sys/unix/epoll_zos.go | 221 - vendor/golang.org/x/sys/unix/fcntl.go | 3 +- .../x/sys/unix/fcntl_linux_32bit.go | 1 - vendor/golang.org/x/sys/unix/fdset.go | 1 - vendor/golang.org/x/sys/unix/fstatfs_zos.go | 164 - vendor/golang.org/x/sys/unix/gccgo.go | 1 - vendor/golang.org/x/sys/unix/gccgo_c.c | 1 - .../x/sys/unix/gccgo_linux_amd64.go | 1 - vendor/golang.org/x/sys/unix/ifreq_linux.go | 1 - vendor/golang.org/x/sys/unix/ioctl_linux.go | 5 + vendor/golang.org/x/sys/unix/ioctl_signed.go | 69 + .../sys/unix/{ioctl.go => ioctl_unsigned.go} | 3 +- vendor/golang.org/x/sys/unix/ioctl_zos.go | 13 +- vendor/golang.org/x/sys/unix/mkall.sh | 2 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 61 +- vendor/golang.org/x/sys/unix/mmap_nomremap.go | 13 + vendor/golang.org/x/sys/unix/mremap.go | 57 + vendor/golang.org/x/sys/unix/pagesize_unix.go | 3 +- .../golang.org/x/sys/unix/pledge_openbsd.go | 92 +- vendor/golang.org/x/sys/unix/ptrace_darwin.go | 7 - vendor/golang.org/x/sys/unix/ptrace_ios.go | 7 - vendor/golang.org/x/sys/unix/race.go | 1 - vendor/golang.org/x/sys/unix/race0.go | 1 - .../x/sys/unix/readdirent_getdents.go | 1 - .../x/sys/unix/readdirent_getdirentries.go | 3 +- vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 1 - .../x/sys/unix/sockcmsg_unix_other.go | 1 - vendor/golang.org/x/sys/unix/sockcmsg_zos.go | 58 + .../golang.org/x/sys/unix/symaddr_zos_s390x.s | 75 + vendor/golang.org/x/sys/unix/syscall.go | 1 - vendor/golang.org/x/sys/unix/syscall_aix.go | 25 +- .../golang.org/x/sys/unix/syscall_aix_ppc.go | 2 - .../x/sys/unix/syscall_aix_ppc64.go | 2 - vendor/golang.org/x/sys/unix/syscall_bsd.go | 17 +- .../golang.org/x/sys/unix/syscall_darwin.go | 292 +- .../x/sys/unix/syscall_darwin_amd64.go | 2 - .../x/sys/unix/syscall_darwin_arm64.go | 2 - .../x/sys/unix/syscall_darwin_libSystem.go | 3 +- .../x/sys/unix/syscall_dragonfly.go | 199 - .../x/sys/unix/syscall_dragonfly_amd64.go | 1 - .../golang.org/x/sys/unix/syscall_freebsd.go | 205 +- .../x/sys/unix/syscall_freebsd_386.go | 1 - .../x/sys/unix/syscall_freebsd_amd64.go | 1 - .../x/sys/unix/syscall_freebsd_arm.go | 1 - .../x/sys/unix/syscall_freebsd_arm64.go | 1 - .../x/sys/unix/syscall_freebsd_riscv64.go | 1 - vendor/golang.org/x/sys/unix/syscall_hurd.go | 2 +- .../golang.org/x/sys/unix/syscall_hurd_386.go | 1 - .../golang.org/x/sys/unix/syscall_illumos.go | 1 - vendor/golang.org/x/sys/unix/syscall_linux.go | 367 +- .../x/sys/unix/syscall_linux_386.go | 28 - .../x/sys/unix/syscall_linux_alarm.go | 2 - .../x/sys/unix/syscall_linux_amd64.go | 4 +- .../x/sys/unix/syscall_linux_amd64_gc.go | 1 - .../x/sys/unix/syscall_linux_arm.go | 28 - .../x/sys/unix/syscall_linux_arm64.go | 13 +- .../golang.org/x/sys/unix/syscall_linux_gc.go | 1 - .../x/sys/unix/syscall_linux_gc_386.go | 1 - .../x/sys/unix/syscall_linux_gc_arm.go | 1 - .../x/sys/unix/syscall_linux_gccgo_386.go | 1 - .../x/sys/unix/syscall_linux_gccgo_arm.go | 1 - .../x/sys/unix/syscall_linux_loong64.go | 8 +- .../x/sys/unix/syscall_linux_mips64x.go | 5 +- .../x/sys/unix/syscall_linux_mipsx.go | 29 - .../x/sys/unix/syscall_linux_ppc.go | 28 - .../x/sys/unix/syscall_linux_ppc64x.go | 3 - .../x/sys/unix/syscall_linux_riscv64.go | 15 +- .../x/sys/unix/syscall_linux_s390x.go | 2 - .../x/sys/unix/syscall_linux_sparc64.go | 2 - .../golang.org/x/sys/unix/syscall_netbsd.go | 274 +- .../x/sys/unix/syscall_netbsd_386.go | 1 - .../x/sys/unix/syscall_netbsd_amd64.go | 1 - .../x/sys/unix/syscall_netbsd_arm.go | 1 - .../x/sys/unix/syscall_netbsd_arm64.go | 1 - .../golang.org/x/sys/unix/syscall_openbsd.go | 121 +- .../x/sys/unix/syscall_openbsd_386.go | 1 - .../x/sys/unix/syscall_openbsd_amd64.go | 1 - .../x/sys/unix/syscall_openbsd_arm.go | 1 - .../x/sys/unix/syscall_openbsd_arm64.go | 1 - .../x/sys/unix/syscall_openbsd_libc.go | 1 - .../x/sys/unix/syscall_openbsd_ppc64.go | 1 - .../x/sys/unix/syscall_openbsd_riscv64.go | 1 - .../golang.org/x/sys/unix/syscall_solaris.go | 58 +- .../x/sys/unix/syscall_solaris_amd64.go | 1 - vendor/golang.org/x/sys/unix/syscall_unix.go | 28 +- .../golang.org/x/sys/unix/syscall_unix_gc.go | 2 - .../x/sys/unix/syscall_unix_gc_ppc64x.go | 3 - .../x/sys/unix/syscall_zos_s390x.go | 1515 +++- vendor/golang.org/x/sys/unix/sysvshm_linux.go | 1 - vendor/golang.org/x/sys/unix/sysvshm_unix.go | 3 +- .../x/sys/unix/sysvshm_unix_other.go | 3 +- vendor/golang.org/x/sys/unix/timestruct.go | 1 - .../golang.org/x/sys/unix/unveil_openbsd.go | 41 +- vendor/golang.org/x/sys/unix/xattr_bsd.go | 1 - .../golang.org/x/sys/unix/zerrors_aix_ppc.go | 1 - .../x/sys/unix/zerrors_aix_ppc64.go | 1 - .../x/sys/unix/zerrors_darwin_amd64.go | 32 +- .../x/sys/unix/zerrors_darwin_arm64.go | 32 +- .../x/sys/unix/zerrors_dragonfly_amd64.go | 1 - .../x/sys/unix/zerrors_freebsd_386.go | 1 - .../x/sys/unix/zerrors_freebsd_amd64.go | 1 - .../x/sys/unix/zerrors_freebsd_arm.go | 1 - .../x/sys/unix/zerrors_freebsd_arm64.go | 1 - .../x/sys/unix/zerrors_freebsd_riscv64.go | 1 - vendor/golang.org/x/sys/unix/zerrors_linux.go | 235 +- .../x/sys/unix/zerrors_linux_386.go | 18 +- .../x/sys/unix/zerrors_linux_amd64.go | 18 +- .../x/sys/unix/zerrors_linux_arm.go | 17 +- .../x/sys/unix/zerrors_linux_arm64.go | 20 +- .../x/sys/unix/zerrors_linux_loong64.go | 20 +- .../x/sys/unix/zerrors_linux_mips.go | 17 +- .../x/sys/unix/zerrors_linux_mips64.go | 17 +- .../x/sys/unix/zerrors_linux_mips64le.go | 17 +- .../x/sys/unix/zerrors_linux_mipsle.go | 17 +- .../x/sys/unix/zerrors_linux_ppc.go | 17 +- .../x/sys/unix/zerrors_linux_ppc64.go | 17 +- .../x/sys/unix/zerrors_linux_ppc64le.go | 17 +- .../x/sys/unix/zerrors_linux_riscv64.go | 20 +- .../x/sys/unix/zerrors_linux_s390x.go | 17 +- .../x/sys/unix/zerrors_linux_sparc64.go | 65 +- .../x/sys/unix/zerrors_netbsd_386.go | 1 - .../x/sys/unix/zerrors_netbsd_amd64.go | 1 - .../x/sys/unix/zerrors_netbsd_arm.go | 1 - .../x/sys/unix/zerrors_netbsd_arm64.go | 1 - .../x/sys/unix/zerrors_openbsd_386.go | 1 - .../x/sys/unix/zerrors_openbsd_amd64.go | 1 - .../x/sys/unix/zerrors_openbsd_arm.go | 1 - .../x/sys/unix/zerrors_openbsd_arm64.go | 1 - .../x/sys/unix/zerrors_openbsd_mips64.go | 1 - .../x/sys/unix/zerrors_openbsd_ppc64.go | 1 - .../x/sys/unix/zerrors_openbsd_riscv64.go | 1 - .../x/sys/unix/zerrors_solaris_amd64.go | 1 - .../x/sys/unix/zerrors_zos_s390x.go | 236 +- .../x/sys/unix/zptrace_armnn_linux.go | 2 - .../x/sys/unix/zptrace_mipsnn_linux.go | 2 - .../x/sys/unix/zptrace_mipsnnle_linux.go | 2 - .../x/sys/unix/zptrace_x86_linux.go | 2 - .../x/sys/unix/zsymaddr_zos_s390x.s | 364 + .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 38 +- .../x/sys/unix/zsyscall_aix_ppc64.go | 41 +- .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 11 - .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 11 +- .../x/sys/unix/zsyscall_darwin_amd64.go | 177 +- .../x/sys/unix/zsyscall_darwin_amd64.s | 185 +- .../x/sys/unix/zsyscall_darwin_arm64.go | 177 +- .../x/sys/unix/zsyscall_darwin_arm64.s | 185 +- .../x/sys/unix/zsyscall_dragonfly_amd64.go | 33 - .../x/sys/unix/zsyscall_freebsd_386.go | 33 - .../x/sys/unix/zsyscall_freebsd_amd64.go | 33 - .../x/sys/unix/zsyscall_freebsd_arm.go | 33 - .../x/sys/unix/zsyscall_freebsd_arm64.go | 33 - .../x/sys/unix/zsyscall_freebsd_riscv64.go | 33 - .../x/sys/unix/zsyscall_illumos_amd64.go | 11 +- .../golang.org/x/sys/unix/zsyscall_linux.go | 131 +- .../x/sys/unix/zsyscall_linux_386.go | 11 - .../x/sys/unix/zsyscall_linux_amd64.go | 11 - .../x/sys/unix/zsyscall_linux_arm.go | 11 - .../x/sys/unix/zsyscall_linux_arm64.go | 11 - .../x/sys/unix/zsyscall_linux_loong64.go | 1 - .../x/sys/unix/zsyscall_linux_mips.go | 11 - .../x/sys/unix/zsyscall_linux_mips64.go | 11 - .../x/sys/unix/zsyscall_linux_mips64le.go | 11 - .../x/sys/unix/zsyscall_linux_mipsle.go | 11 - .../x/sys/unix/zsyscall_linux_ppc.go | 11 - .../x/sys/unix/zsyscall_linux_ppc64.go | 11 - .../x/sys/unix/zsyscall_linux_ppc64le.go | 11 - .../x/sys/unix/zsyscall_linux_riscv64.go | 27 +- .../x/sys/unix/zsyscall_linux_s390x.go | 11 - .../x/sys/unix/zsyscall_linux_sparc64.go | 11 - .../x/sys/unix/zsyscall_netbsd_386.go | 40 +- .../x/sys/unix/zsyscall_netbsd_amd64.go | 40 +- .../x/sys/unix/zsyscall_netbsd_arm.go | 40 +- .../x/sys/unix/zsyscall_netbsd_arm64.go | 40 +- .../x/sys/unix/zsyscall_openbsd_386.go | 140 +- .../x/sys/unix/zsyscall_openbsd_386.s | 40 +- .../x/sys/unix/zsyscall_openbsd_amd64.go | 140 +- .../x/sys/unix/zsyscall_openbsd_amd64.s | 40 +- .../x/sys/unix/zsyscall_openbsd_arm.go | 140 +- .../x/sys/unix/zsyscall_openbsd_arm.s | 40 +- .../x/sys/unix/zsyscall_openbsd_arm64.go | 140 +- .../x/sys/unix/zsyscall_openbsd_arm64.s | 40 +- .../x/sys/unix/zsyscall_openbsd_mips64.go | 140 +- .../x/sys/unix/zsyscall_openbsd_mips64.s | 40 +- .../x/sys/unix/zsyscall_openbsd_ppc64.go | 140 +- .../x/sys/unix/zsyscall_openbsd_ppc64.s | 48 +- .../x/sys/unix/zsyscall_openbsd_riscv64.go | 140 +- .../x/sys/unix/zsyscall_openbsd_riscv64.s | 40 +- .../x/sys/unix/zsyscall_solaris_amd64.go | 274 +- .../x/sys/unix/zsyscall_zos_s390x.go | 3115 ++++++- .../x/sys/unix/zsysctl_openbsd_386.go | 1 - .../x/sys/unix/zsysctl_openbsd_amd64.go | 1 - .../x/sys/unix/zsysctl_openbsd_arm.go | 1 - .../x/sys/unix/zsysctl_openbsd_arm64.go | 1 - .../x/sys/unix/zsysctl_openbsd_mips64.go | 1 - .../x/sys/unix/zsysctl_openbsd_ppc64.go | 1 - .../x/sys/unix/zsysctl_openbsd_riscv64.go | 1 - .../x/sys/unix/zsysnum_darwin_amd64.go | 1 - .../x/sys/unix/zsysnum_darwin_arm64.go | 1 - .../x/sys/unix/zsysnum_dragonfly_amd64.go | 1 - .../x/sys/unix/zsysnum_freebsd_386.go | 1 - .../x/sys/unix/zsysnum_freebsd_amd64.go | 1 - .../x/sys/unix/zsysnum_freebsd_arm.go | 1 - .../x/sys/unix/zsysnum_freebsd_arm64.go | 1 - .../x/sys/unix/zsysnum_freebsd_riscv64.go | 1 - .../x/sys/unix/zsysnum_linux_386.go | 13 +- .../x/sys/unix/zsysnum_linux_amd64.go | 13 +- .../x/sys/unix/zsysnum_linux_arm.go | 13 +- .../x/sys/unix/zsysnum_linux_arm64.go | 13 +- .../x/sys/unix/zsysnum_linux_loong64.go | 13 +- .../x/sys/unix/zsysnum_linux_mips.go | 13 +- .../x/sys/unix/zsysnum_linux_mips64.go | 13 +- .../x/sys/unix/zsysnum_linux_mips64le.go | 13 +- .../x/sys/unix/zsysnum_linux_mipsle.go | 13 +- .../x/sys/unix/zsysnum_linux_ppc.go | 13 +- .../x/sys/unix/zsysnum_linux_ppc64.go | 13 +- .../x/sys/unix/zsysnum_linux_ppc64le.go | 13 +- .../x/sys/unix/zsysnum_linux_riscv64.go | 15 +- .../x/sys/unix/zsysnum_linux_s390x.go | 14 +- .../x/sys/unix/zsysnum_linux_sparc64.go | 13 +- .../x/sys/unix/zsysnum_netbsd_386.go | 1 - .../x/sys/unix/zsysnum_netbsd_amd64.go | 1 - .../x/sys/unix/zsysnum_netbsd_arm.go | 1 - .../x/sys/unix/zsysnum_netbsd_arm64.go | 1 - .../x/sys/unix/zsysnum_openbsd_386.go | 1 - .../x/sys/unix/zsysnum_openbsd_amd64.go | 1 - .../x/sys/unix/zsysnum_openbsd_arm.go | 1 - .../x/sys/unix/zsysnum_openbsd_arm64.go | 1 - .../x/sys/unix/zsysnum_openbsd_mips64.go | 1 - .../x/sys/unix/zsysnum_openbsd_ppc64.go | 1 - .../x/sys/unix/zsysnum_openbsd_riscv64.go | 1 - .../x/sys/unix/zsysnum_zos_s390x.go | 5508 ++++++------ .../golang.org/x/sys/unix/ztypes_aix_ppc.go | 1 - .../golang.org/x/sys/unix/ztypes_aix_ppc64.go | 1 - .../x/sys/unix/ztypes_darwin_amd64.go | 25 +- .../x/sys/unix/ztypes_darwin_arm64.go | 25 +- .../x/sys/unix/ztypes_dragonfly_amd64.go | 1 - .../x/sys/unix/ztypes_freebsd_386.go | 2 +- .../x/sys/unix/ztypes_freebsd_amd64.go | 2 +- .../x/sys/unix/ztypes_freebsd_arm.go | 2 +- .../x/sys/unix/ztypes_freebsd_arm64.go | 2 +- .../x/sys/unix/ztypes_freebsd_riscv64.go | 2 +- vendor/golang.org/x/sys/unix/ztypes_linux.go | 394 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 11 +- .../x/sys/unix/ztypes_linux_amd64.go | 12 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 12 +- .../x/sys/unix/ztypes_linux_arm64.go | 12 +- .../x/sys/unix/ztypes_linux_loong64.go | 12 +- .../x/sys/unix/ztypes_linux_mips.go | 12 +- .../x/sys/unix/ztypes_linux_mips64.go | 12 +- .../x/sys/unix/ztypes_linux_mips64le.go | 12 +- .../x/sys/unix/ztypes_linux_mipsle.go | 12 +- .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 12 +- .../x/sys/unix/ztypes_linux_ppc64.go | 12 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 12 +- .../x/sys/unix/ztypes_linux_riscv64.go | 72 +- .../x/sys/unix/ztypes_linux_s390x.go | 12 +- .../x/sys/unix/ztypes_linux_sparc64.go | 12 +- .../x/sys/unix/ztypes_netbsd_386.go | 1 - .../x/sys/unix/ztypes_netbsd_amd64.go | 1 - .../x/sys/unix/ztypes_netbsd_arm.go | 1 - .../x/sys/unix/ztypes_netbsd_arm64.go | 1 - .../x/sys/unix/ztypes_openbsd_386.go | 1 - .../x/sys/unix/ztypes_openbsd_amd64.go | 1 - .../x/sys/unix/ztypes_openbsd_arm.go | 1 - .../x/sys/unix/ztypes_openbsd_arm64.go | 1 - .../x/sys/unix/ztypes_openbsd_mips64.go | 1 - .../x/sys/unix/ztypes_openbsd_ppc64.go | 1 - .../x/sys/unix/ztypes_openbsd_riscv64.go | 1 - .../x/sys/unix/ztypes_solaris_amd64.go | 1 - .../golang.org/x/sys/unix/ztypes_zos_s390x.go | 147 +- vendor/golang.org/x/sys/windows/aliases.go | 3 +- vendor/golang.org/x/sys/windows/empty.s | 9 - .../golang.org/x/sys/windows/env_windows.go | 17 +- vendor/golang.org/x/sys/windows/eventlog.go | 1 - .../golang.org/x/sys/windows/exec_windows.go | 92 +- vendor/golang.org/x/sys/windows/mksyscall.go | 1 - vendor/golang.org/x/sys/windows/race.go | 1 - vendor/golang.org/x/sys/windows/race0.go | 1 - .../x/sys/windows/security_windows.go | 46 +- vendor/golang.org/x/sys/windows/service.go | 12 +- vendor/golang.org/x/sys/windows/str.go | 1 - vendor/golang.org/x/sys/windows/syscall.go | 1 - .../x/sys/windows/syscall_windows.go | 178 +- .../golang.org/x/sys/windows/types_windows.go | 141 +- .../x/sys/windows/zsyscall_windows.go | 305 +- vendor/golang.org/x/text/LICENSE | 27 + vendor/golang.org/x/text/PATENTS | 22 + .../x/text/secure/bidirule/bidirule.go | 336 + .../x/text/secure/bidirule/bidirule10.0.0.go | 11 + .../x/text/secure/bidirule/bidirule9.0.0.go | 14 + .../golang.org/x/text/transform/transform.go | 709 ++ vendor/golang.org/x/text/unicode/bidi/bidi.go | 359 + .../golang.org/x/text/unicode/bidi/bracket.go | 335 + vendor/golang.org/x/text/unicode/bidi/core.go | 1071 +++ vendor/golang.org/x/text/unicode/bidi/prop.go | 206 + .../x/text/unicode/bidi/tables10.0.0.go | 1815 ++++ .../x/text/unicode/bidi/tables11.0.0.go | 1887 ++++ .../x/text/unicode/bidi/tables12.0.0.go | 1923 ++++ .../x/text/unicode/bidi/tables13.0.0.go | 1955 ++++ .../x/text/unicode/bidi/tables15.0.0.go | 2042 +++++ .../x/text/unicode/bidi/tables9.0.0.go | 1781 ++++ .../golang.org/x/text/unicode/bidi/trieval.go | 48 + .../x/text/unicode/norm/composition.go | 512 ++ .../x/text/unicode/norm/forminfo.go | 279 + .../golang.org/x/text/unicode/norm/input.go | 109 + vendor/golang.org/x/text/unicode/norm/iter.go | 458 + .../x/text/unicode/norm/normalize.go | 610 ++ .../x/text/unicode/norm/readwriter.go | 125 + .../x/text/unicode/norm/tables10.0.0.go | 7657 ++++++++++++++++ .../x/text/unicode/norm/tables11.0.0.go | 7693 ++++++++++++++++ .../x/text/unicode/norm/tables12.0.0.go | 7710 ++++++++++++++++ .../x/text/unicode/norm/tables13.0.0.go | 7760 ++++++++++++++++ .../x/text/unicode/norm/tables15.0.0.go | 7907 +++++++++++++++++ .../x/text/unicode/norm/tables9.0.0.go | 7637 ++++++++++++++++ .../x/text/unicode/norm/transform.go | 88 + vendor/golang.org/x/text/unicode/norm/trie.go | 54 + vendor/golang.org/x/time/LICENSE | 27 + vendor/golang.org/x/time/PATENTS | 22 + vendor/golang.org/x/time/rate/rate.go | 430 + vendor/golang.org/x/time/rate/sometimes.go | 67 + vendor/gopkg.in/yaml.v3/LICENSE | 50 + vendor/gopkg.in/yaml.v3/NOTICE | 13 + vendor/gopkg.in/yaml.v3/README.md | 150 + vendor/gopkg.in/yaml.v3/apic.go | 747 ++ vendor/gopkg.in/yaml.v3/decode.go | 1000 +++ vendor/gopkg.in/yaml.v3/emitterc.go | 2020 +++++ vendor/gopkg.in/yaml.v3/encode.go | 577 ++ vendor/gopkg.in/yaml.v3/parserc.go | 1258 +++ vendor/gopkg.in/yaml.v3/readerc.go | 434 + vendor/gopkg.in/yaml.v3/resolve.go | 326 + vendor/gopkg.in/yaml.v3/scannerc.go | 3038 +++++++ vendor/gopkg.in/yaml.v3/sorter.go | 134 + vendor/gopkg.in/yaml.v3/writerc.go | 48 + vendor/gopkg.in/yaml.v3/yaml.go | 698 ++ vendor/gopkg.in/yaml.v3/yamlh.go | 807 ++ vendor/gopkg.in/yaml.v3/yamlprivateh.go | 198 + vendor/modules.txt | 46 +- 675 files changed, 203714 insertions(+), 8149 deletions(-) create mode 100644 vendor/github.com/cloudflare/cloudflare-go/.gitignore create mode 100644 vendor/github.com/cloudflare/cloudflare-go/.golintci.yaml create mode 100644 vendor/github.com/cloudflare/cloudflare-go/.semgrep.yml create mode 100644 vendor/github.com/cloudflare/cloudflare-go/.semgrepignore create mode 100644 vendor/github.com/cloudflare/cloudflare-go/CHANGELOG.md create mode 100644 vendor/github.com/cloudflare/cloudflare-go/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/cloudflare/cloudflare-go/LICENSE create mode 100644 vendor/github.com/cloudflare/cloudflare-go/README.md create mode 100644 vendor/github.com/cloudflare/cloudflare-go/access_application.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/access_audit_log.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/access_bookmark.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/access_ca_certificate.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/access_custom_page.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/access_group.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/access_identity_provider.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/access_keys.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/access_mutual_tls_certificates.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/access_organization.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/access_policy.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/access_seats.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/access_service_tokens.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/access_tag.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/access_user_tokens.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/access_users.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/account_members.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/account_roles.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/accounts.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/addressing_address_map.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/addressing_ip_prefix.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/api_shield.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/api_shield_api_discovery.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/api_shield_operations.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/api_shield_schemas.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/api_token.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/argo.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/argo_tunnel.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/auditlogs.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/authenticated_origin_pulls.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/authenticated_origin_pulls_per_hostname.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/authenticated_origin_pulls_per_zone.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/bot_management.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/cache_reserve.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/catalog.json create mode 100644 vendor/github.com/cloudflare/cloudflare-go/certificate_packs.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/cloud_connector.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/cloudflare.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/consts.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/convert_types.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/custom_hostname.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/custom_nameservers.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/custom_pages.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/d1.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/dcv_delegation.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/device_posture_rule.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/devices_dex.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/devices_managed_networks.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/devices_policy.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/diagnostics.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/dlp_dataset.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/dlp_payload_log.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/dlp_profile.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/dns.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/dns_firewall.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/duration.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/email_routing_destination.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/email_routing_rules.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/email_routing_settings.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/errors.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/fallback_domain.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/filter.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/firewall.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/firewall_rules.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/flake.lock create mode 100644 vendor/github.com/cloudflare/cloudflare-go/flake.nix create mode 100644 vendor/github.com/cloudflare/cloudflare-go/flox.nix create mode 100644 vendor/github.com/cloudflare/cloudflare-go/gateway_categories.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/healthchecks.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/hyperdrive.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/images.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/images_variants.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/intelligence_asn.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/intelligence_domain.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/intelligence_ip.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/intelligence_phishing.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/intelligence_whois.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/ip_access_rules.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/ip_list.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/ips.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/keyless.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/list.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/load_balancing.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/lockdown.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/logger.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/logpull.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/logpush.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/magic_firewall_rulesets.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/magic_transit_gre_tunnel.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/magic_transit_ipsec_tunnel.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/magic_transit_static_routes.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/magic_transit_tunnel_healthcheck.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/managed_headers.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/miscategorization.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/mtls_certificates.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/notifications.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/observatory.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/options.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/origin_ca.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/page_rules.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/page_shield.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/page_shield_connections.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/page_shield_policies.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/page_shield_scripts.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/pages_deployment.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/pages_domain.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/pages_project.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/pagination.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/per_hostname_tls_settings.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/permission_group.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/policy.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/queue.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/r2_bucket.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/rate_limiting.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/regional_hostnames.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/regional_tiered_cache.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/registrar.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/resource.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/resource_group.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/rulesets.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/secondary_dns_primaries.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/secondary_dns_tsig.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/secondary_dns_zone.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/spectrum.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/split_tunnel.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/ssl.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/stream.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/teams_accounts.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/teams_audit_ssh_settings.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/teams_certificates.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/teams_devices.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/teams_list.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/teams_locations.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/teams_proxy_endpoints.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/teams_rules.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/tiered_cache.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/total_tls.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/tunnel.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/tunnel_routes.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/tunnel_virtual_networks.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/turnstile.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/universal_ssl.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/url_normalization_settings.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/user.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/user_agent.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/utils.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/waf.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/waf_overrides.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/waiting_room.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/web3.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/web_analytics.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/workers.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/workers_account_settings.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/workers_bindings.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/workers_cron_triggers.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/workers_domain.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/workers_for_platforms.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/workers_kv.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/workers_routes.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/workers_secrets.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/workers_subdomain.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/workers_tail.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/zaraz.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/zone.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/zone_cache_variants.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/zone_hold.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/zt_risk_behaviors.go create mode 100644 vendor/github.com/cloudflare/cloudflare-go/zt_risk_score_integration.go create mode 100644 vendor/github.com/davecgh/go-spew/LICENSE create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypass.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypasssafe.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/common.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/config.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/doc.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/dump.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/format.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/spew.go create mode 100644 vendor/github.com/goccy/go-json/.codecov.yml create mode 100644 vendor/github.com/goccy/go-json/.gitignore create mode 100644 vendor/github.com/goccy/go-json/.golangci.yml create mode 100644 vendor/github.com/goccy/go-json/CHANGELOG.md create mode 100644 vendor/github.com/goccy/go-json/LICENSE create mode 100644 vendor/github.com/goccy/go-json/Makefile create mode 100644 vendor/github.com/goccy/go-json/README.md create mode 100644 vendor/github.com/goccy/go-json/color.go create mode 100644 vendor/github.com/goccy/go-json/decode.go create mode 100644 vendor/github.com/goccy/go-json/docker-compose.yml create mode 100644 vendor/github.com/goccy/go-json/encode.go create mode 100644 vendor/github.com/goccy/go-json/error.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/anonymous_field.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/array.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/assign.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/bool.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/bytes.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/compile.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/compile_race.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/context.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/float.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/func.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/int.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/interface.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/invalid.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/map.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/number.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/option.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/path.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/ptr.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/slice.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/stream.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/string.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/struct.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/type.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/uint.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/unmarshal_json.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/unmarshal_text.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/wrapped_string.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/code.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/compact.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/compiler.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/context.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/decode_rune.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/encoder.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/indent.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/int.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/map112.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/map113.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/opcode.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/option.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/optype.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/query.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/string.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/string_table.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm/debug_vm.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm/hack.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm/util.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm/vm.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_color/debug_vm.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_color/hack.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_color/util.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_color/vm.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/debug_vm.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/util.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/vm.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_indent/debug_vm.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_indent/hack.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_indent/util.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_indent/vm.go create mode 100644 vendor/github.com/goccy/go-json/internal/errors/error.go create mode 100644 vendor/github.com/goccy/go-json/internal/runtime/rtype.go create mode 100644 vendor/github.com/goccy/go-json/internal/runtime/struct_field.go create mode 100644 vendor/github.com/goccy/go-json/internal/runtime/type.go create mode 100644 vendor/github.com/goccy/go-json/json.go create mode 100644 vendor/github.com/goccy/go-json/option.go create mode 100644 vendor/github.com/goccy/go-json/path.go create mode 100644 vendor/github.com/goccy/go-json/query.go create mode 100644 vendor/github.com/google/go-querystring/LICENSE create mode 100644 vendor/github.com/google/go-querystring/query/encode.go create mode 100644 vendor/github.com/pmezard/go-difflib/LICENSE create mode 100644 vendor/github.com/pmezard/go-difflib/difflib/difflib.go create mode 100644 vendor/github.com/stretchr/testify/LICENSE create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_compare.go create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_format.go create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_forward.go create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_order.go create mode 100644 vendor/github.com/stretchr/testify/assert/assertions.go create mode 100644 vendor/github.com/stretchr/testify/assert/doc.go create mode 100644 vendor/github.com/stretchr/testify/assert/errors.go create mode 100644 vendor/github.com/stretchr/testify/assert/forward_assertions.go create mode 100644 vendor/github.com/stretchr/testify/assert/http_assertions.go create mode 100644 vendor/golang.org/x/net/LICENSE create mode 100644 vendor/golang.org/x/net/PATENTS create mode 100644 vendor/golang.org/x/net/idna/go118.go create mode 100644 vendor/golang.org/x/net/idna/idna10.0.0.go create mode 100644 vendor/golang.org/x/net/idna/idna9.0.0.go create mode 100644 vendor/golang.org/x/net/idna/pre_go118.go create mode 100644 vendor/golang.org/x/net/idna/punycode.go create mode 100644 vendor/golang.org/x/net/idna/tables10.0.0.go create mode 100644 vendor/golang.org/x/net/idna/tables11.0.0.go create mode 100644 vendor/golang.org/x/net/idna/tables12.0.0.go create mode 100644 vendor/golang.org/x/net/idna/tables13.0.0.go create mode 100644 vendor/golang.org/x/net/idna/tables15.0.0.go create mode 100644 vendor/golang.org/x/net/idna/tables9.0.0.go create mode 100644 vendor/golang.org/x/net/idna/trie.go create mode 100644 vendor/golang.org/x/net/idna/trie12.0.0.go create mode 100644 vendor/golang.org/x/net/idna/trie13.0.0.go create mode 100644 vendor/golang.org/x/net/idna/trieval.go delete mode 100644 vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go create mode 100644 vendor/golang.org/x/sys/unix/bpxsvc_zos.go create mode 100644 vendor/golang.org/x/sys/unix/bpxsvc_zos.s delete mode 100644 vendor/golang.org/x/sys/unix/epoll_zos.go delete mode 100644 vendor/golang.org/x/sys/unix/fstatfs_zos.go create mode 100644 vendor/golang.org/x/sys/unix/ioctl_signed.go rename vendor/golang.org/x/sys/unix/{ioctl.go => ioctl_unsigned.go} (92%) create mode 100644 vendor/golang.org/x/sys/unix/mmap_nomremap.go create mode 100644 vendor/golang.org/x/sys/unix/mremap.go create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_zos.go create mode 100644 vendor/golang.org/x/sys/unix/symaddr_zos_s390x.s create mode 100644 vendor/golang.org/x/sys/unix/zsymaddr_zos_s390x.s delete mode 100644 vendor/golang.org/x/sys/windows/empty.s create mode 100644 vendor/golang.org/x/text/LICENSE create mode 100644 vendor/golang.org/x/text/PATENTS create mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule.go create mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go create mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go create mode 100644 vendor/golang.org/x/text/transform/transform.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/bidi.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/bracket.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/core.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/prop.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/trieval.go create mode 100644 vendor/golang.org/x/text/unicode/norm/composition.go create mode 100644 vendor/golang.org/x/text/unicode/norm/forminfo.go create mode 100644 vendor/golang.org/x/text/unicode/norm/input.go create mode 100644 vendor/golang.org/x/text/unicode/norm/iter.go create mode 100644 vendor/golang.org/x/text/unicode/norm/normalize.go create mode 100644 vendor/golang.org/x/text/unicode/norm/readwriter.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables10.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables11.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables12.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables13.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables15.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables9.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/transform.go create mode 100644 vendor/golang.org/x/text/unicode/norm/trie.go create mode 100644 vendor/golang.org/x/time/LICENSE create mode 100644 vendor/golang.org/x/time/PATENTS create mode 100644 vendor/golang.org/x/time/rate/rate.go create mode 100644 vendor/golang.org/x/time/rate/sometimes.go create mode 100644 vendor/gopkg.in/yaml.v3/LICENSE create mode 100644 vendor/gopkg.in/yaml.v3/NOTICE create mode 100644 vendor/gopkg.in/yaml.v3/README.md create mode 100644 vendor/gopkg.in/yaml.v3/apic.go create mode 100644 vendor/gopkg.in/yaml.v3/decode.go create mode 100644 vendor/gopkg.in/yaml.v3/emitterc.go create mode 100644 vendor/gopkg.in/yaml.v3/encode.go create mode 100644 vendor/gopkg.in/yaml.v3/parserc.go create mode 100644 vendor/gopkg.in/yaml.v3/readerc.go create mode 100644 vendor/gopkg.in/yaml.v3/resolve.go create mode 100644 vendor/gopkg.in/yaml.v3/scannerc.go create mode 100644 vendor/gopkg.in/yaml.v3/sorter.go create mode 100644 vendor/gopkg.in/yaml.v3/writerc.go create mode 100644 vendor/gopkg.in/yaml.v3/yaml.go create mode 100644 vendor/gopkg.in/yaml.v3/yamlh.go create mode 100644 vendor/gopkg.in/yaml.v3/yamlprivateh.go diff --git a/cmd/dnsapi/cloudflare.go b/cmd/dnsapi/cloudflare.go index 7f96f31..6587187 100644 --- a/cmd/dnsapi/cloudflare.go +++ b/cmd/dnsapi/cloudflare.go @@ -1,51 +1,20 @@ package dnsapi import ( - "encoding/json" + "context" "errors" - "fmt" - "io" - "net/http" - "strings" + "github.com/cloudflare/cloudflare-go" "github.com/daruzero/cloudflare-dns-auto-updater-go/internal/config" "github.com/daruzero/cloudflare-dns-auto-updater-go/pkg/utils" "go.uber.org/zap" ) -type HTTPClient interface { - Do(req *http.Request) (*http.Response, error) -} - type CFDNS struct { - HTTPClient HTTPClient - Cfg *config.Config - Records map[string][]Record - Zones []Zone -} - -type Record struct { - Content string `json:"content"` - ID string `json:"id"` - Name string `json:"name"` - Type string `json:"type"` - ZoneID string `json:"zone_id"` - ZoneName string `json:"zone_name"` -} - -type Zone struct { - ID string `json:"id"` - Name string `json:"name"` -} - -type Error struct { - Message string `json:"message"` - Code int `json:"code"` -} - -type Message struct { - Message string `json:"message"` - Code int `json:"code"` + Api *cloudflare.API + Cfg *config.Config + Records map[string][]cloudflare.DNSRecord + Zones []cloudflare.Zone } // New creates a new Dns struct instance @@ -55,205 +24,58 @@ func New(cfg *config.Config) (dns *CFDNS, err error) { Cfg: cfg, } - dns.HTTPClient = http.DefaultClient - - if len(cfg.ZoneIDs) > 0 { - err = dns.checkZoneIDs() - if err != nil { - return dns, err - } - } else { - err = dns.getZoneIDs() - if err != nil { - return dns, err - } - } - - dns.Records = make(map[string][]Record) - err = dns.getRecords() + // Authenticate the APIs + dns.Api, err = cloudflare.New(dns.Cfg.AuthKey, dns.Cfg.Email) if err != nil { - return dns, err - } - - return dns, nil -} - -// CheckZoneIDs checks if the zone ids are valid -func (dns *CFDNS) checkZoneIDs() (err error) { - zap.S().Info("Getting zones info") - reqURL := "https://api.cloudflare.com/client/v4/zones/" - - req, err := createCFRequest(http.MethodGet, reqURL, dns.Cfg.Email, dns.Cfg.AuthKey, nil) - if err != nil { - return err - } - - res, err := dns.HTTPClient.Do(req) - if err != nil { - return err - } - - type ResponseBody struct { - Errors []Error `json:"errors"` - Messages []Message `json:"messages"` - Result []Zone `json:"result"` - Success bool `json:"success"` + return nil, err } - var resBody ResponseBody - err = unmarshalResponse(res.Body, &resBody) + // Fetch all zones + allZones, err := dns.Api.ListZones(context.Background()) if err != nil { - return err - } - res.Body.Close() - - if !resBody.Success || res.StatusCode != http.StatusOK { - zap.S().Errorf("Error checking zone id, skipping. HTTP status code: %d. Response body: %v", res.StatusCode, resBody) + return nil, err } - for _, zoneID := range dns.Cfg.ZoneIDs { - zap.S().Infof("Checking zone id %s", zoneID) - isValid := false - - for _, zone := range resBody.Result { - if zone.ID == zoneID { - dns.Zones = append(dns.Zones, zone) - isValid = true - break - } - } - - if !isValid { - zap.S().Warnf("Zone id %s is not valid, skipping.", zoneID) + // Filter only the selected zones + for _, zone := range allZones { + if utils.StringInSlice(zone.ID, dns.Cfg.ZoneIDs) || utils.StringInSlice(zone.Name, dns.Cfg.ZoneNames) { + dns.Zones = append(dns.Zones, zone) } } if len(dns.Zones) == 0 { - return errors.New("no valid zone ids found") + return nil, errors.New("no zones found") } - return nil -} - -// GetZoneIDs gets the zone id from the zone name -func (dns *CFDNS) getZoneIDs() (err error) { - for _, zoneName := range dns.Cfg.ZoneNames { - zap.S().Infof("Getting zone id for %s", zoneName) - reqURL := fmt.Sprintf("https://api.cloudflare.com/client/v4/zones?name=%s", zoneName) - - req, err := createCFRequest(http.MethodGet, reqURL, dns.Cfg.Email, dns.Cfg.AuthKey, nil) - if err != nil { - return err - } - - res, err := dns.HTTPClient.Do(req) - if err != nil { - return err - } - - type ResponseBody struct { - Errors []Error `json:"errors"` - Messages []Message `json:"messages"` - Result []Zone `json:"result"` - Success bool `json:"success"` - } - - var resBody ResponseBody - err = unmarshalResponse(res.Body, &resBody) - if err != nil { - return err - } - res.Body.Close() - - if !resBody.Success || res.StatusCode != http.StatusOK { - zap.S().Errorf("Error getting zone id, skipping. HTTP status code: %d. Response body: %v", res.StatusCode, resBody) - } - - if len(resBody.Result) == 0 { - zap.S().Errorf("No zone found with name %s, skipping.", zoneName) - } - - for _, zone := range resBody.Result { - if strings.EqualFold(zone.Name, zoneName) { - dns.Zones = append(dns.Zones, zone) - break - } - } - } - - if len(dns.Zones) == 0 { - return errors.New("no zone ids found") + // Get the zones records + dns.Records = make(map[string][]cloudflare.DNSRecord) + err = dns.getRecords() + if err != nil { + return dns, err } - return nil + return dns, nil } // getRecords gets all the records for the zone func (dns *CFDNS) getRecords() (err error) { zap.S().Info("Getting records") for _, zone := range dns.Zones { - reqURL := fmt.Sprintf("https://api.cloudflare.com/client/v4/zones/%s/dns_records", zone.ID) - - req, err := createCFRequest(http.MethodGet, reqURL, dns.Cfg.Email, dns.Cfg.AuthKey, nil) - if err != nil { - return err - } - - res, err := dns.HTTPClient.Do(req) + zap.S().Debugf("%+v", zone) + records, _, err := dns.Api.ListDNSRecords(context.Background(), cloudflare.ZoneIdentifier(zone.ID), cloudflare.ListDNSRecordsParams{Type: "A"}) if err != nil { return err } - type ResponseBody struct { - Errors []Error `json:"errors"` - Messages []Message `json:"messages"` - Result []Record `json:"result"` - Success bool `json:"success"` - } - - var resBody ResponseBody - err = unmarshalResponse(res.Body, &resBody) - if err != nil { - return err - } - res.Body.Close() - - if !resBody.Success || res.StatusCode != http.StatusOK { - strErr := fmt.Sprintf("Error getting records for zone %s. HTTP status code: %d. Response body: %v", zone.Name, res.StatusCode, resBody) - return errors.New(strErr) - } - - if len(resBody.Result) == 0 { - return errors.New("no records found") - } - - recordsMap := make(map[string]Record) - for _, record := range resBody.Result { - if record.Type == "A" && (len(dns.Cfg.RecordIDs) == 0 || utils.StringInSlice(record.ID, dns.Cfg.RecordIDs)) { - recordsMap[record.Name] = record - } - } - - if len(recordsMap) == 0 { - zap.S().Errorf("No records found for zone %s", zone.Name) - continue - } - - if _, ok := dns.Records[zone.Name]; !ok { - dns.Records[zone.Name] = make([]Record, 0) - } - - for i, record := range dns.Records[zone.Name] { - if updatedRecord, ok := recordsMap[record.Name]; ok { - dns.Records[zone.Name][i] = updatedRecord - delete(recordsMap, record.Name) + if len(dns.Cfg.RecordIDs) != 0 { + for _, record := range records { + if utils.StringInSlice(record.ID, dns.Cfg.RecordIDs) { + dns.Records[zone.ID] = append(dns.Records[zone.ID], record) + } } + } else { + dns.Records[zone.ID] = append(dns.Records[zone.ID], records...) } - - for _, newRecord := range recordsMap { - dns.Records[zone.Name] = append(dns.Records[zone.Name], newRecord) - } - } if len(dns.Records) == 0 { @@ -265,82 +87,34 @@ func (dns *CFDNS) getRecords() (err error) { // UpdateRecords updates the records with the current ip func (dns *CFDNS) UpdateRecords(currentIP string) (updatedRecords map[string][]string, err error) { + if err := dns.getRecords(); err != nil { + return nil, err + } + zap.S().Info("Checking records") updatedRecords = make(map[string][]string) - for zoneName, records := range dns.Records { + for zoneID, records := range dns.Records { for i, record := range records { zap.S().Infof("Updating record %s", record.Name) - reqURL := fmt.Sprintf("https://api.cloudflare.com/client/v4/zones/%s/dns_records/%s", record.ZoneID, record.ID) - payload := strings.NewReader(fmt.Sprintf(`{"content":"%s"}`, currentIP)) - req, err := createCFRequest(http.MethodPatch, reqURL, dns.Cfg.Email, dns.Cfg.AuthKey, payload) + newRecord, err := dns.Api.UpdateDNSRecord(context.Background(), cloudflare.ZoneIdentifier(zoneID), cloudflare.UpdateDNSRecordParams{ID: record.ID, Content: currentIP}) if err != nil { - zap.S().Fatal(err) - return updatedRecords, err + return nil, err } - res, err := dns.HTTPClient.Do(req) - if err != nil { - zap.S().Fatal(err) - return updatedRecords, err - } + records[i] = newRecord - type ResponseBody struct { - Result Record `json:"result"` - Errors []Error `json:"errors"` - Messages []Message `json:"messages"` - Success bool `json:"success"` + var zoneName string + for _, zone := range dns.Zones { + if zone.ID == zoneID { + zoneName = zone.Name + } } - var resBody ResponseBody - err = unmarshalResponse(res.Body, &resBody) - if err != nil { - zap.S().Fatal(err) - return updatedRecords, err - } - res.Body.Close() - zap.S().Debugf("Response body: %+v", resBody) - - if !resBody.Success || res.StatusCode != http.StatusOK { - strErr := fmt.Sprintf("Error updating record %s. HTTP status code: %d. Response body: %v", record.Name, res.StatusCode, resBody) - return updatedRecords, errors.New(strErr) - } - - records[i] = resBody.Result - updatedRecords[zoneName] = append(updatedRecords[zoneName], record.Name) } } return updatedRecords, nil } - -// createCFRequest creates an HTTP request with the cloudflare headers -func createCFRequest(method, url, email, authKey string, body io.Reader) (req *http.Request, err error) { - req, err = http.NewRequest(method, url, body) - if err != nil { - return nil, err - } - - req.Header.Set("X-Auth-Email", email) - req.Header.Set("X-Auth-Key", authKey) - req.Header.Set("Content-Type", "application/json") - - return req, nil -} - -// unmarshalResponse unmarshals the response body into the given interface -func unmarshalResponse(reader io.Reader, v interface{}) (err error) { - bodyBytes, err := io.ReadAll(reader) - if err != nil { - return err - } - - err = json.Unmarshal(bodyBytes, v) - if err != nil { - return err - } - - return nil -} diff --git a/cmd/dnsapi/cloudflare_test.go b/cmd/dnsapi/cloudflare_test.go index 29458a6..8d44de5 100644 --- a/cmd/dnsapi/cloudflare_test.go +++ b/cmd/dnsapi/cloudflare_test.go @@ -1,174 +1,99 @@ package dnsapi import ( - "bytes" - "io" + "encoding/json" "net/http" + "net/http/httptest" + "strconv" "testing" + "github.com/cloudflare/cloudflare-go" "github.com/daruzero/cloudflare-dns-auto-updater-go/internal/config" - "github.com/daruzero/cloudflare-dns-auto-updater-go/test/mocks" + "github.com/stretchr/testify/assert" "go.uber.org/zap" ) +var ( + // mux is the HTTP request multiplexer used with the test server. + mux *http.ServeMux + + // client is the API client being tested. + client *cloudflare.API + + // server is a test HTTP server used to provide mock API responses. + server *httptest.Server +) + func init() { logger, _ := zap.NewDevelopment() zap.ReplaceGlobals(logger) } -func TestDns_checkZoneIDs(t *testing.T) { - tests := []struct { - name string - mockResponse string - expectedZoneIDs []string - expectedNames []string - expectedZones int - wantErr bool - }{ - { - name: "ValidZoneIDs", - mockResponse: `{"success":true,"errors":[],"messages":[],"result":[{"id":"testZoneID1", "name": "testZoneName1"}, {"id":"testZoneID2", "name": "testZoneName2"}, {"id":"testZoneID3", "name": "testZoneName"}]}`, - expectedZones: 2, - expectedZoneIDs: []string{"testZoneID1", "testZoneID2"}, - expectedNames: []string{"testZoneName1", "testZoneName2"}, - wantErr: false, - }, - { - name: "InvalidZoneIDs", - mockResponse: `{"success":true,"errors":[],"messages":[],"result":[{"id":"testZoneID1", "name": "testZoneName1"}, {"id":"testZoneID4", "name": "testZoneName4"}, {"id":"testZoneID5", "name": "testZoneName5"}]}`, - expectedZones: 1, - expectedZoneIDs: []string{"testZoneID1"}, - expectedNames: []string{"testZoneName1"}, - wantErr: false, - }, - { - name: "NoZoneIDs", - mockResponse: `{"success":true,"errors":[],"messages":[],"result":[{"id":"testZoneID3", "name": "testZoneName3"}]}`, - expectedZones: 0, - expectedZoneIDs: []string{}, - expectedNames: []string{}, - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - mockClient := &mocks.MockClient{ - DoFunc: func(req *http.Request) (*http.Response, error) { - body := io.NopCloser(bytes.NewReader([]byte(tt.mockResponse))) - return &http.Response{ - StatusCode: 200, - Body: body, - }, nil - }, - } - - cfg := &config.Config{ - AuthKey: "testAuthKey", - Email: "testEmail", - ZoneIDs: []string{"testZoneID1", "testZoneID2"}, - } - - dns := &CFDNS{ - Cfg: cfg, - HTTPClient: mockClient, - } - - err := dns.checkZoneIDs() - if (err != nil) != tt.wantErr { - t.Fatalf("CheckZoneIDs() error = %v, wantErr %v", err, tt.wantErr) - } +func setup() { + // test server + mux = http.NewServeMux() + server = httptest.NewServer(mux) - if len(dns.Zones) != tt.expectedZones { - t.Fatalf("CheckZoneIDs() = %d; want %d", len(dns.Zones), tt.expectedZones) - } + // disable rate limits and retries in testing - prepended so any provided value overrides this + opts := []cloudflare.Option{cloudflare.UsingRateLimit(100000), cloudflare.UsingRetryPolicy(0, 0, 0)} - for i, zone := range dns.Zones { - if zone.ID != tt.expectedZoneIDs[i] { - t.Errorf("CheckZoneIDs() = %s; want %s", zone.ID, tt.expectedZoneIDs[i]) - } + // Cloudflare client configured to use test server + client, _ = cloudflare.New("deadbeef", "cloudflare@example.org", opts...) + client.BaseURL = server.URL +} - if zone.Name != tt.expectedNames[i] { - t.Errorf("CheckZoneIDs() = %s; want %s", zone.Name, tt.expectedNames[i]) - } - } - }) - } +func teardown() { + server.Close() } -func TestDns_getZoneIDs(t *testing.T) { - tests := []struct { - name string - mockResponse string - expectedZoneIDs []string - expectedNames []string - expectedZones int - wantErr bool - }{ - { - name: "ValidZoneNames", - mockResponse: `{"success":true,"errors":[],"messages":[],"result":[{"id":"testID", "name": "testZoneName"}, {"id":"testID2", "name": "testZoneName2"}]}`, - expectedZones: 2, - expectedZoneIDs: []string{"testID", "testID2"}, - expectedNames: []string{"testZoneName", "testZoneName2"}, - wantErr: false, - }, - { - name: "InvalidZoneNames", - mockResponse: `{"success":true,"errors":[],"messages":[],"result":[]}`, - expectedZones: 0, - expectedZoneIDs: []string{}, - expectedNames: []string{}, - wantErr: true, - }, +func parsePage(t *testing.T, total int, s string) (int, bool) { + if s == "" { + return 1, true } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - mockClient := &mocks.MockClient{ - DoFunc: func(req *http.Request) (*http.Response, error) { - body := io.NopCloser(bytes.NewReader([]byte(tt.mockResponse))) - return &http.Response{ - StatusCode: 200, - Body: body, - }, nil - }, - } - - cfg := &config.Config{ - AuthKey: "testAuthKey", - Email: "testEmail", - ZoneNames: []string{"testZoneName", "testZoneName2"}, - } - - dns := &CFDNS{ - Cfg: cfg, - HTTPClient: mockClient, - } - - err := dns.getZoneIDs() - if (err != nil) != tt.wantErr { - t.Fatalf("GetZoneIDs() error = %v, wantErr %v", err, tt.wantErr) - } - - if len(dns.Zones) != tt.expectedZones { - t.Fatalf("GetZoneIDs() = %d; want %d", len(dns.Zones), tt.expectedZones) - } - - for i, zone := range dns.Zones { - if zone.ID != tt.expectedZoneIDs[i] { - t.Errorf("GetZoneIDs() = %s; want %s", zone.ID, tt.expectedZoneIDs[i]) - } + page, err := strconv.Atoi(s) + if !assert.NoError(t, err) { + return 0, false + } - if zone.Name != tt.expectedNames[i] { - t.Errorf("GetZoneIDs() = %s; want %s", zone.Name, tt.expectedNames[i]) - } - } - }) + if !assert.LessOrEqual(t, page, total) || !assert.GreaterOrEqual(t, page, 1) { + return 0, false } + + return page, true } func TestDns_getRecords(t *testing.T) { + setup() + defer teardown() + + handler := func(w http.ResponseWriter, r *http.Request) { + switch { + case !assert.Equal(t, http.MethodGet, r.Method, "Expected method 'GET', got %s", r.Method): + return + case !assert.Equal(t, "50", r.URL.Query().Get("per_page")): + return + } + + page, ok := parsePage(t, totalPage, r.URL.Query().Get("page")) + if !ok { + return + } + + start := (page - 1) * 50 + + count := 50 + if page == totalPage { + count = total - start + } + + w.Header().Set("content-type", "application/json") + err := json.NewEncoder(w).Encode(mockZonesResponse(total, page, start, count)) + assert.NoError(t, err) + } + + mux.HandleFunc("/zones", handler) + tests := []struct { name string mockResponse string @@ -183,7 +108,6 @@ func TestDns_getRecords(t *testing.T) { { name: "NoRecordID", recordIDs: nil, - mockResponse: `{"success":true,"errors":[],"messages":[],"result":[{"id":"testRecordID", "name": "testRecordName", "type": "A", "content": "testContent"}]}`, expectedRecordsMaps: 1, expectedRecordIDs: []string{"testRecordID"}, expectedNames: []string{"testRecordName"}, @@ -194,7 +118,6 @@ func TestDns_getRecords(t *testing.T) { { name: "WithRecordID", recordIDs: []string{"testRecordID"}, - mockResponse: `{"success":true,"errors":[],"messages":[],"result":[{"id":"testRecordID", "name": "testRecordName", "type": "A", "content": "testContent"}]}`, expectedRecordsMaps: 1, expectedRecordIDs: []string{"testRecordID"}, expectedNames: []string{"testRecordName"}, @@ -205,7 +128,6 @@ func TestDns_getRecords(t *testing.T) { { name: "InvalidRecordID", recordIDs: []string{"testRecordID"}, - mockResponse: `{"success":true,"errors":[],"messages":[],"result":[{"id":"testRecordID2", "name": "testRecordName2", "type": "A", "content": "testContent2"}]}`, expectedRecordsMaps: 0, expectedRecordIDs: []string{}, expectedNames: []string{}, @@ -217,30 +139,19 @@ func TestDns_getRecords(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - mockClient := &mocks.MockClient{ - DoFunc: func(req *http.Request) (*http.Response, error) { - body := io.NopCloser(bytes.NewReader([]byte(tt.mockResponse))) - return &http.Response{ - StatusCode: 200, - Body: body, - }, nil - }, - } - cfg := &config.Config{ - AuthKey: "testAuthKey", - Email: "testEmail", + AuthKey: "deadbeef", + Email: "test@example.org", RecordIDs: tt.recordIDs, + ZoneIDs: []string{"testZoneID"}, } - dns := &CFDNS{ - Cfg: cfg, - HTTPClient: mockClient, - Records: make(map[string][]Record), - Zones: []Zone{{ID: "testZoneID", Name: "testZoneName"}}, + dns, err := New(cfg) + if err != nil { + t.Fatal(err) } - err := dns.getRecords() + err = dns.getRecords() if (err != nil) != tt.wantErr { t.Fatalf("GetRecords() error = %v, wantErr %v", err, tt.wantErr) } @@ -276,116 +187,105 @@ func TestDns_getRecords(t *testing.T) { } } -func TestDns_UpdateRecord(t *testing.T) { - tests := []struct { - initialRecords map[string][]Record - expectedRecords map[string][]string - updatedRecords map[string][]Record - name string - updateIP string - mockResponse string - wantErr bool - }{ - { - name: "UpdateRecordSuccess", - initialRecords: map[string][]Record{ - "testZoneID": { - { - ID: "testRecordID", - Name: "testRecordName", - Type: "A", - Content: "testIPOld", - }, - }, - }, - updateIP: "testIPNew", - mockResponse: `{"success":true,"errors":[],"messages":[],"result":{"id":"testRecordID", "name": "testRecordName", "type": "A", "content": "testIPNew"}}`, - expectedRecords: map[string][]string{ - "testZoneID": {"testRecordName"}, - }, - updatedRecords: map[string][]Record{ - "testZoneID": { - { - ID: "testRecordID", - Name: "testRecordName", - Type: "A", - Content: "testIPNew", - }, - }, - }, - wantErr: false, - }, - { - name: "UpdateRecordFail", - initialRecords: map[string][]Record{ - "testZoneID": { - { - ID: "testRecordID", - Name: "testRecordName", - Type: "A", - Content: "testIPOld", - }, - }, - }, - updateIP: "testIPNew", - mockResponse: `{"success":false,"errors":[{"code":1004,"message":"DNS Validation Error","error_chain":[{"code":9003,"message":"Invalid IP","error_chain":[]}]}],"messages":[],"result":null}`, - expectedRecords: map[string][]string{}, - updatedRecords: map[string][]Record{}, - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - mockClient := &mocks.MockClient{ - DoFunc: func(req *http.Request) (*http.Response, error) { - body := io.NopCloser(bytes.NewReader([]byte(tt.mockResponse))) - return &http.Response{ - StatusCode: 200, - Body: body, - }, nil - }, - } - - cfg := &config.Config{ - AuthKey: "testAuthKey", - Email: "testEmail", - } - - dns := &CFDNS{ - Cfg: cfg, - HTTPClient: mockClient, - Records: tt.initialRecords, - } - - updatedRecords, err := dns.UpdateRecords(tt.updateIP) - if (err != nil) != tt.wantErr { - t.Fatalf("UpdateRecords() error = %v, wantErr %v", err, tt.wantErr) - } - - for zoneID, updatedZoneRecords := range tt.expectedRecords { - if len(updatedRecords[zoneID]) != len(updatedZoneRecords) { - t.Fatalf("UpdateRecords() = %d; want %d", len(updatedRecords[zoneID]), len(updatedZoneRecords)) - } - - for i, expectedRecordName := range updatedZoneRecords { - if updatedRecords[zoneID][i] != expectedRecordName { - t.Errorf("UpdateRecords() = %s; want %s", updatedRecords[zoneID][i], expectedRecordName) - } - } - } - - for zoneID, updatedZoneRecords := range tt.updatedRecords { - if len(dns.Records[zoneID]) != len(updatedZoneRecords) { - t.Fatalf("UpdateRecords() = %d; want %d", len(dns.Records[zoneID]), len(updatedZoneRecords)) - } - - for i, expectedRecord := range updatedZoneRecords { - if dns.Records[zoneID][i] != expectedRecord { - t.Errorf("UpdateRecords() = %v; want %v", dns.Records[zoneID][i], expectedRecord) - } - } - } - }) - } -} +// func TestDns_UpdateRecord(t *testing.T) { +// tests := []struct { +// initialRecords map[string][]cloudflare.DNSRecord +// expectedRecords map[string][]string +// updatedRecords map[string][]cloudflare.DNSRecord +// name string +// updateIP string +// mockResponse string +// wantErr bool +// }{ +// { +// name: "UpdateRecordSuccess", +// initialRecords: map[string][]cloudflare.DNSRecord{ +// "testZoneID": { +// { +// ID: "testRecordID", +// Name: "testRecordName", +// Type: "A", +// Content: "testIPOld", +// }, +// }, +// }, +// updateIP: "testIPNew", +// mockResponse: `{"success":true,"errors":[],"messages":[],"result":{"id":"testRecordID", "name": "testRecordName", "type": "A", "content": "testIPNew"}}`, +// expectedRecords: map[string][]string{ +// "testZoneID": {"testRecordName"}, +// }, +// updatedRecords: map[string][]cloudflare.DNSRecord{ +// "testZoneID": { +// { +// ID: "testRecordID", +// Name: "testRecordName", +// Type: "A", +// Content: "testIPNew", +// }, +// }, +// }, +// wantErr: false, +// }, +// { +// name: "UpdateRecordFail", +// initialRecords: map[string][]cloudflare.DNSRecord{ +// "testZoneID": { +// { +// ID: "testRecordID", +// Name: "testRecordName", +// Type: "A", +// Content: "testIPOld", +// }, +// }, +// }, +// updateIP: "testIPNew", +// mockResponse: `{"success":false,"errors":[{"code":1004,"message":"DNS Validation Error","error_chain":[{"code":9003,"message":"Invalid IP","error_chain":[]}]}],"messages":[],"result":null}`, +// expectedRecords: map[string][]string{}, +// updatedRecords: map[string][]cloudflare.DNSRecord{}, +// wantErr: true, +// }, +// } +// +// for _, tt := range tests { +// t.Run(tt.name, func(t *testing.T) { +// cfg := &config.Config{ +// AuthKey: "deadbeef", +// Email: "test@example.org", +// } +// +// dns := &CFDNS{ +// Cfg: cfg, +// Records: tt.initialRecords, +// } +// +// updatedRecords, err := dns.UpdateRecords(tt.updateIP) +// if (err != nil) != tt.wantErr { +// t.Fatalf("UpdateRecords() error = %v, wantErr %v", err, tt.wantErr) +// } +// +// for zoneID, updatedZoneRecords := range tt.expectedRecords { +// if len(updatedRecords[zoneID]) != len(updatedZoneRecords) { +// t.Fatalf("UpdateRecords() = %d; want %d", len(updatedRecords[zoneID]), len(updatedZoneRecords)) +// } +// +// for i, expectedRecordName := range updatedZoneRecords { +// if updatedRecords[zoneID][i] != expectedRecordName { +// t.Errorf("UpdateRecords() = %s; want %s", updatedRecords[zoneID][i], expectedRecordName) +// } +// } +// } +// +// for zoneID, updatedZoneRecords := range tt.updatedRecords { +// if len(dns.Records[zoneID]) != len(updatedZoneRecords) { +// t.Fatalf("UpdateRecords() = %d; want %d", len(dns.Records[zoneID]), len(updatedZoneRecords)) +// } +// +// for i, expectedRecord := range updatedZoneRecords { +// if dns.Records[zoneID][i].Content != expectedRecord.Content { +// t.Errorf("UpdateRecords() = %v; want %v", dns.Records[zoneID][i], expectedRecord) +// } +// } +// } +// }) +// } +// } diff --git a/go.mod b/go.mod index 9ac0a2a..be660af 100644 --- a/go.mod +++ b/go.mod @@ -3,15 +3,25 @@ module github.com/daruzero/cloudflare-dns-auto-updater-go go 1.23 require ( + github.com/cloudflare/cloudflare-go v0.104.0 + github.com/stretchr/testify v1.9.0 go.elastic.co/ecszap v1.0.2 go.uber.org/zap v1.27.0 ) require ( + github.com/davecgh/go-spew v1.1.1 // indirect github.com/fatih/color v1.15.0 // indirect + github.com/goccy/go-json v0.10.3 // indirect + github.com/google/go-querystring v1.1.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.17 // indirect - golang.org/x/sys v0.6.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.18.0 // indirect + golang.org/x/time v0.6.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) require ( diff --git a/go.sum b/go.sum index 2d27d7c..05402bf 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,6 @@ github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/cloudflare/cloudflare-go v0.104.0 h1:R/lB0dZupaZbOgibAH/BRrkFbZ6Acn/WsKg2iX2xXuY= +github.com/cloudflare/cloudflare-go v0.104.0/go.mod h1:pfUQ4PIG4ISI0/Mmc21Bp86UnFU0ktmPf3iTgbSL+cM= github.com/containrrr/shoutrrr v0.8.0 h1:mfG2ATzIS7NR2Ec6XL+xyoHzN97H8WPjir8aYzJUSec= github.com/containrrr/shoutrrr v0.8.0/go.mod h1:ioyQAyu1LJY6sILuNyKaQaw+9Ttik5QePU8atnAdO2o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -10,17 +12,26 @@ github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/jarcoal/httpmock v1.3.0 h1:2RJ8GP0IIaWwcC9Fp2BmVi8Kog3v2Hn7VXM3fTd+nuc= github.com/jarcoal/httpmock v1.3.0/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= @@ -35,6 +46,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -42,8 +55,9 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.elastic.co/ecszap v1.0.2 h1:iW5OGx8IiokiUzx/shD4AJCPFMC9uUtr7ycaiEIU++I= go.elastic.co/ecszap v1.0.2/go.mod h1:dJkSlK3BTiwG/qXhCwe50Mz/jwu854vSip8sIeQhNZg= @@ -65,8 +79,8 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -75,26 +89,31 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/mocks/client.go b/test/mocks/client.go index daec000..5ed467e 100644 --- a/test/mocks/client.go +++ b/test/mocks/client.go @@ -1,6 +1,9 @@ package mocks -import "net/http" +import ( + "fmt" + "net/http" +) type MockClient struct { DoFunc func(req *http.Request) (*http.Response, error) @@ -9,3 +12,120 @@ type MockClient struct { func (m *MockClient) Do(req *http.Request) (*http.Response, error) { return m.DoFunc(req) } + +func mockZone(i int) *Zone { + zoneName := fmt.Sprintf("%d.example.com", i) + ownerName := "Test Account" + + return &Zone{ + ID: mockID(zoneName), + Name: zoneName, + DevMode: 0, + OriginalNS: []string{ + "linda.ns.cloudflare.com", + "merlin.ns.cloudflare.com", + }, + OriginalRegistrar: "cloudflare, inc. (id: 1910)", + OriginalDNSHost: "", + CreatedOn: mustParseTime("2021-07-28T05:06:20.736244Z"), + ModifiedOn: mustParseTime("2021-07-28T05:06:20.736244Z"), + NameServers: []string{ + "abby.ns.cloudflare.com", + "noel.ns.cloudflare.com", + }, + Owner: Owner{ + ID: mockID(ownerName), + Email: "", + Name: ownerName, + OwnerType: "organization", + }, + Permissions: []string{ + "#access:read", + "#analytics:read", + "#auditlogs:read", + "#billing:read", + "#dns_records:read", + "#lb:read", + "#legal:read", + "#logs:read", + "#member:read", + "#organization:read", + "#ssl:read", + "#stream:read", + "#subscription:read", + "#waf:read", + "#webhooks:read", + "#worker:read", + "#zone:read", + "#zone_settings:read", + }, + Plan: ZonePlan{ + ZonePlanCommon: ZonePlanCommon{ + ID: "0feeeeeeeeeeeeeeeeeeeeeeeeeeeeee", + Name: "Free Website", + Currency: "USD", + }, + IsSubscribed: false, + CanSubscribe: false, + LegacyID: "free", + LegacyDiscount: false, + ExternallyManaged: false, + }, + PlanPending: ZonePlan{ + ZonePlanCommon: ZonePlanCommon{ + ID: "", + }, + IsSubscribed: false, + CanSubscribe: false, + LegacyID: "", + LegacyDiscount: false, + ExternallyManaged: false, + }, + Status: "active", + Paused: false, + Type: "full", + Host: struct { + Name string + Website string + }{ + Name: "", + Website: "", + }, + VanityNS: nil, + Betas: nil, + DeactReason: "", + Meta: ZoneMeta{ + PageRuleQuota: 3, + WildcardProxiable: false, + PhishingDetected: false, + }, + Account: Account{ + ID: mockID(ownerName), + Name: ownerName, + }, + VerificationKey: "", + } +} + +func mockZonesResponse(total, page, start, count int) *ZonesResponse { + zones := make([]Zone, count) + for i := range zones { + zones[i] = *mockZone(start + i) + } + + return &ZonesResponse{ + Result: zones, + ResultInfo: ResultInfo{ + Page: page, + PerPage: 50, + TotalPages: (total + 49) / 50, + Count: count, + Total: total, + }, + Response: Response{ + Success: true, + Errors: []ResponseInfo{}, + Messages: []ResponseInfo{}, + }, + } +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/.gitignore b/vendor/github.com/cloudflare/cloudflare-go/.gitignore new file mode 100644 index 0000000..0953e13 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/.gitignore @@ -0,0 +1,6 @@ +.idea +.vscode/ +cmd/flarectl/dist/ +cmd/flarectl/flarectl +cmd/flarectl/flarectl.exe +.flox/ diff --git a/vendor/github.com/cloudflare/cloudflare-go/.golintci.yaml b/vendor/github.com/cloudflare/cloudflare-go/.golintci.yaml new file mode 100644 index 0000000..d5d1c93 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/.golintci.yaml @@ -0,0 +1,33 @@ +run: + timeout: 5m + issues-exit-code: 1 + tests: true + skip-dirs-use-default: true + modules-download-mode: readonly + +linters: + enable: + - bodyclose # ensure HTTP response bodies are successfully closed. + - contextcheck # check we are passing context an inherited context. + - gofmt # checks whether code was gofmt-ed. By default this tool runs with -s option to check for code simplification. + - errname # checks that sentinel errors are prefixed with the `Err`` and error types are suffixed with the `Error``. + - errorlint # used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13. + - godot # check if comments end in a period. + - misspell # finds commonly misspelled English words in comments. + - nilerr # checks that there is no simultaneous return of nil error and an invalid value. + - tparallel # detects inappropriate usage of t.Parallel() method in your Go test codes. + - unparam # reports unused function parameters. + - whitespace # detection of leading and trailing whitespace. + - gosec # inspects source code for security problems. + - bidichk # checks for dangerous unicode character sequences. + - exportloopref # prevent scope issues with pointers in loops. + - goconst # use constants where values are repeated. + - reassign # checks that package variables are not reassigned. + - goimports # checks import grouping and code formatting. + +output: + format: colored-line-number + +linters-settings: + goconst: + ignore-tests: true diff --git a/vendor/github.com/cloudflare/cloudflare-go/.semgrep.yml b/vendor/github.com/cloudflare/cloudflare-go/.semgrep.yml new file mode 100644 index 0000000..e0a6dc4 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/.semgrep.yml @@ -0,0 +1,88 @@ +rules: + - id: rfc-5737-ip-address + languages: + - go + message: Where a real IPv4 address isn't needed, use IPv4 addresses from RFC5737. + paths: + include: + - '*.go' + patterns: + - pattern-regex: '(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)' + - pattern-not-regex: '10\.\d+\.\d+.\d+' + - pattern-not-regex: '192\.168\.\d+.\d+' + - pattern-not-regex: '192\.0\.2\.\d+' # 192.0.2.0/24 (TEST-NET-1, rfc5737) + - pattern-not-regex: '198\.51\.100\.\d+' # 198.51.100.0/24 (TEST-NET-2, rfc5737) + - pattern-not-regex: '203\.0\.113\.\d+' # 203.0.113.0/24 (TEST-NET-3, rfc5737) + severity: WARNING + - id: rfc-3849-ip-address + languages: + - generic + message: Where a real IPv6 address isn't needed, use IPv6 addresses from RFC3849. + paths: + include: + - '*.go' + patterns: + - pattern-regex: '(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))' + severity: WARNING + + - id: calling-errors-wrap + languages: [go] + message: 'Do not call `errors.Wrap`. Use `fmt.Errorf(".. : %w", err)` instead.' + patterns: + - pattern-either: + - pattern: | + errors.Wrap(...) + severity: WARNING + - id: no-use-of-stdlib-json + languages: + - go + message: Favour "github.com/goccy/go-json" instead of "encoding/json" to allow better customisation of marshaling and unmarshaling JSON attributes. See https://github.com/cloudflare/cloudflare-go/pull/1360 for full details. + paths: + include: + - '*.go' + patterns: + - pattern-regex: '\"encoding\/json\"' + fix-regex: + regex: \".*\" + replacement: '"github.com/goccy/go-json"' + severity: WARNING + - id: use-pointers-for-booleans + languages: + - go + message: | + Booleans should always be represented as pointers in structs with an omitempty marshaling tag (most commonly as JSON). This ensures you can determine unset, false and truthy values. + paths: + include: + - '*.go' + patterns: + - pattern-regex: "bool" + - pattern-not-regex: "\\*bool" + - pattern-either: + - pattern-inside: | + type $STRUCTNAME struct { + ... + } + severity: WARNING + metadata: + references: + - https://github.com/cloudflare/cloudflare-go/blob/master/docs/conventions.md#booleans + - id: use-pointers-for-time + languages: + - go + message: | + time.Time should always be represented as pointers in structs. + paths: + include: + - '*.go' + patterns: + - pattern-regex: "time\\.Time" + - pattern-not-regex: "\\*time\\.Time" + - pattern-either: + - pattern-inside: | + type $STRUCTNAME struct { + ... + } + severity: WARNING + metadata: + references: + - https://github.com/cloudflare/cloudflare-go/blob/master/docs/conventions.md#timetime diff --git a/vendor/github.com/cloudflare/cloudflare-go/.semgrepignore b/vendor/github.com/cloudflare/cloudflare-go/.semgrepignore new file mode 100644 index 0000000..e12718f --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/.semgrepignore @@ -0,0 +1,22 @@ +# Based on the default .semgrepignore documented here: + +# https://semgrep.dev/docs/ignoring-files-folders-code/#understanding-semgrep-defaults + +# but not ignoring '\*\_test.go'. + +# Ignore git items + +.gitignore +.git/ +:include .gitignore + +.semgrep +.semgrep_logs/ + +.github/ +.vscode/ +.changelog/ +CHANGELOG.md +go.mod +go.sum +README.md diff --git a/vendor/github.com/cloudflare/cloudflare-go/CHANGELOG.md b/vendor/github.com/cloudflare/cloudflare-go/CHANGELOG.md new file mode 100644 index 0000000..ee95151 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/CHANGELOG.md @@ -0,0 +1,1233 @@ +## 0.105.0 (Unreleased) + +## 0.104.0 (September 11th, 2024) + +BREAKING CHANGES: + +* dns: removed deprecated `ZoneID` and `ZoneName` fields ([#2986](https://github.com/cloudflare/cloudflare-go/issues/2986)) + +ENHANCEMENTS: + +* bot_management: add ai_bots_protection to public API ([#2974](https://github.com/cloudflare/cloudflare-go/issues/2974)) + +DEPENDENCIES: + +* deps: bumps golang.org/x/net from 0.28.0 to 0.29.0 ([#3030](https://github.com/cloudflare/cloudflare-go/issues/3030)) +* deps: bumps golang.org/x/time from 0.5.0 to 0.6.0 ([#2816](https://github.com/cloudflare/cloudflare-go/issues/2816)) + +## 0.103.0 (August 28th, 2024) + +ENHANCEMENTS: + +* errors: implement the Unwrap method for custom error types to access the wrapped errors via errors.Is and errors.As ([#2857](https://github.com/cloudflare/cloudflare-go/issues/2857)) +* rulesets: add "contains" field to custom cache key header ([#2935](https://github.com/cloudflare/cloudflare-go/issues/2935)) +* teams_certificates: renamed `enabled` to `in_use` ([#2937](https://github.com/cloudflare/cloudflare-go/issues/2937)) +* waiting_room: add support for `enabled_origin_commands` ([#2931](https://github.com/cloudflare/cloudflare-go/issues/2931)) + +DEPENDENCIES: + +* deps: bumps github.com/urfave/cli/v2 from 2.27.3 to 2.27.4 ([#2863](https://github.com/cloudflare/cloudflare-go/issues/2863)) + +## 0.102.0 (August 14th, 2024) + +ENHANCEMENTS: + +* rulesets: Add `DeleteRulesetRule` ([#2833](https://github.com/cloudflare/cloudflare-go/issues/2833)) +* rulesets: Export `DeleteRulesetRuleParams` fields ([#2886](https://github.com/cloudflare/cloudflare-go/issues/2886)) +* teams_accounts: Add `disable_for_time` attribute ([#2797](https://github.com/cloudflare/cloudflare-go/issues/2797)) + +DEPENDENCIES: + +* deps: bumps golang.org/x/net from 0.27.0 to 0.28.0 ([#2835](https://github.com/cloudflare/cloudflare-go/issues/2835)) + +## 0.101.0 (July 31st, 2024) + +ENHANCEMENTS: + +* access_application: add `skip_app_launcher_login_page` flag to skip the App Launcher landing page ([#2793](https://github.com/cloudflare/cloudflare-go/issues/2793)) +* device_posture_rule: support extended_key_usage, check_private_key, and locations for client_certificate_v2 posture rule ([#1685](https://github.com/cloudflare/cloudflare-go/issues/1685)) +* devices_policy: Add new tunnel_protocol field to policy ([#2778](https://github.com/cloudflare/cloudflare-go/issues/2778)) +* risk_score_integration: Add support for Risk Score Integrations ([#2786](https://github.com/cloudflare/cloudflare-go/issues/2786)) + +DEPENDENCIES: + +* deps: bumps github.com/urfave/cli/v2 from 2.27.2 to 2.27.3 ([#2787](https://github.com/cloudflare/cloudflare-go/issues/2787)) + +## 0.100.0 (July 18th, 2024) + +BREAKING CHANGES: + +* teams_accounts: rename `TeamsCertificate` in `TeamsAccountConfiguration` to `TeamsCertificateSetting` ([#2754](https://github.com/cloudflare/cloudflare-go/issues/2754)) + +ENHANCEMENTS: + +* Add CloudConnectorAPI Client ([#2698](https://github.com/cloudflare/cloudflare-go/issues/2698)) +* gateway_categories: add ListGatewayCategories which returns all gateway categories. ([#2722](https://github.com/cloudflare/cloudflare-go/issues/2722)) +* teams_certificates: add `TeamsCertificate` resource to manage gateway certificates ([#2754](https://github.com/cloudflare/cloudflare-go/issues/2754)) + +DEPENDENCIES: + +* deps: bumps dependabot/fetch-metadata from 2.1.0 to 2.2.0 ([#2727](https://github.com/cloudflare/cloudflare-go/issues/2727)) +* deps: bumps golang.org/x/net from 0.26.0 to 0.27.0 ([#2726](https://github.com/cloudflare/cloudflare-go/issues/2726)) + +## 0.99.0 (July 3rd, 2024) + +ENHANCEMENTS: + +* teams: added per account certificate setting to teams gateway configuration ([#2713](https://github.com/cloudflare/cloudflare-go/issues/2713)) +* teams_list: Added description to ZT list item ([#2621](https://github.com/cloudflare/cloudflare-go/issues/2621)) +* teams_rules: Added ZT rule settings `ignore_cname_category_matches` ([#2621](https://github.com/cloudflare/cloudflare-go/issues/2621)) + +DEPENDENCIES: + +* deps: bumps github.com/hashicorp/go-retryablehttp from 0.7.1 to 0.7.7 ([#2699](https://github.com/cloudflare/cloudflare-go/issues/2699)) + +## 0.98.0 (June 19th, 2024) + +ENHANCEMENTS: + +* access_application: Add support for SaaS OIDC Access Token Lifetime ([#2455](https://github.com/cloudflare/cloudflare-go/issues/2455)) + +DEPENDENCIES: + +* deps: bumps golang.org/x/net from 0.25.0 to 0.26.0 ([#2364](https://github.com/cloudflare/cloudflare-go/issues/2364)) +* deps: bumps goreleaser/goreleaser-action from 5.1.0 to 6.0.0 ([#2365](https://github.com/cloudflare/cloudflare-go/issues/2365)) + +## 0.97.0 (June 5th, 2024) + +ENHANCEMENTS: + +* access_application: Add support for Hybrid/Implicit flows and options ([#2131](https://github.com/cloudflare/cloudflare-go/issues/2131)) +* teams_account: Add Zero Trust connectivity settings ([#2165](https://github.com/cloudflare/cloudflare-go/issues/2165)) +* teams_accounts: Add `use_zt_virtual_ip` attribute ([#2126](https://github.com/cloudflare/cloudflare-go/issues/2126)) + +DEPENDENCIES: + +* deps: bumps `github.com/goccy/go-json` from 0.10.2 to 0.10.3 ([#2107](https://github.com/cloudflare/cloudflare-go/issues/2107)) +* deps: bumps github.com/hashicorp/go-retryablehttp from 0.7.6 to 0.7.7 ([#2249](https://github.com/cloudflare/cloudflare-go/issues/2249)) + +## 0.96.0 (May 22nd, 2024) + +ENHANCEMENTS: + +* access_application: Add Refresh Token, Custom Claims, and PKCE Without Client Secret support for OIDC SaaS configurations ([#1981](https://github.com/cloudflare/cloudflare-go/issues/1981)) +* ruleset: add support for action parameters `fonts` and `disable_rum` ([#1832](https://github.com/cloudflare/cloudflare-go/issues/1832)) + +DEPENDENCIES: + +* deps: bumps bflad/action-milestone-comment from 1 to 2 ([#1991](https://github.com/cloudflare/cloudflare-go/issues/1991)) +* deps: bumps github.com/hashicorp/go-retryablehttp from 0.7.5 to 0.7.6 ([#1993](https://github.com/cloudflare/cloudflare-go/issues/1993)) +* deps: bumps goreleaser/goreleaser-action from 5.0.0 to 5.1.0 ([#1992](https://github.com/cloudflare/cloudflare-go/issues/1992)) + +## 0.95.0 (May 8th, 2024) + +ENHANCEMENTS: + +* access_application: add support for `policies` array ([#1956](https://github.com/cloudflare/cloudflare-go/issues/1956)) +* access_application: add support for `scim_config` ([#1921](https://github.com/cloudflare/cloudflare-go/issues/1921)) +* access_policy: add support for reusable policies ([#1956](https://github.com/cloudflare/cloudflare-go/issues/1956)) +* dlp: add support for zt risk behavior configuration ([#1887](https://github.com/cloudflare/cloudflare-go/issues/1887)) + +BUG FIXES: + +* access_application: fix scim configuration authentication json marshalling ([#1959](https://github.com/cloudflare/cloudflare-go/issues/1959)) + +DEPENDENCIES: + +* deps: bumps dependabot/fetch-metadata from 2.0.0 to 2.1.0 ([#1839](https://github.com/cloudflare/cloudflare-go/issues/1839)) +* deps: bumps github.com/urfave/cli/v2 from 2.27.1 to 2.27.2 ([#1861](https://github.com/cloudflare/cloudflare-go/issues/1861)) +* deps: bumps golang.org/x/net from 0.24.0 to 0.25.0 ([#1974](https://github.com/cloudflare/cloudflare-go/issues/1974)) +* deps: bumps golangci/golangci-lint-action from 4 to 5 ([#1845](https://github.com/cloudflare/cloudflare-go/issues/1845)) +* deps: bumps golangci/golangci-lint-action from 5 to 6 ([#1975](https://github.com/cloudflare/cloudflare-go/issues/1975)) + +## 0.94.0 (April 24th, 2024) + +ENHANCEMENTS: + +* access_application: support options_preflight_bypass for access_application ([#1790](https://github.com/cloudflare/cloudflare-go/issues/1790)) +* gateway: added ecs_support field to teams_location resource ([#1826](https://github.com/cloudflare/cloudflare-go/issues/1826)) +* teams_account: adds custom certificate setting to teams account configuration ([#1811](https://github.com/cloudflare/cloudflare-go/issues/1811)) +* workers: support deleting namespaced Workers ([#1737](https://github.com/cloudflare/cloudflare-go/issues/1737)) + +DEPENDENCIES: + +* deps: bumps golang.org/x/net from 0.19.0 to 0.23.0 ([#1825](https://github.com/cloudflare/cloudflare-go/issues/1825)) + +## 0.93.0 (April 10th, 2024) + +BREAKING CHANGES: + +* dns: Remove "locked" flag which is always false ([#1618](https://github.com/cloudflare/cloudflare-go/issues/1618)) + +ENHANCEMENTS: + +* magic_transit_ipsec_tunnel: Adds support for replay_protection boolean flag ([#1710](https://github.com/cloudflare/cloudflare-go/issues/1710)) + +DEPENDENCIES: + +* deps: bumps golang.org/x/net from 0.22.0 to 0.24.0 ([#1688](https://github.com/cloudflare/cloudflare-go/issues/1688)) + +## 0.92.0 (March 27th, 2024) + +ENHANCEMENTS: + +- dlp: Adds support for ocr_enabled boolean flag ([#1600](https://github.com/cloudflare/cloudflare-go/issues/1600)) + +BUG FIXES: + +- teams_rules: add "resolve" to allowable actions ([#1615](https://github.com/cloudflare/cloudflare-go/issues/1615)) + +DEPENDENCIES: + +- deps: bumps dependabot/fetch-metadata from 1.6.0 to 1.7.0 ([#1593](https://github.com/cloudflare/cloudflare-go/issues/1593)) +- deps: bumps dependabot/fetch-metadata from 1.7.0 to 2.0.0 ([#1607](https://github.com/cloudflare/cloudflare-go/issues/1607)) + +## 0.91.0 (March 22nd, 2024) + +ENHANCEMENTS: + +- access_application: add support for `saml_attribute_transform_jsonata` in saas apps ([#1562](https://github.com/cloudflare/cloudflare-go/issues/1562)) +- dlp: Adds support for ocr_enabled boolean flag ([#1600](https://github.com/cloudflare/cloudflare-go/issues/1600)) + +BUG FIXES: + +- teams_rules: add "resolve" to allowable actions ([#1615](https://github.com/cloudflare/cloudflare-go/issues/1615)) + +DEPENDENCIES: + +- deps: bumps actions/checkout from 2 to 4 ([#1573](https://github.com/cloudflare/cloudflare-go/issues/1573)) +- deps: bumps dependabot/fetch-metadata from 1.6.0 to 1.7.0 ([#1593](https://github.com/cloudflare/cloudflare-go/issues/1593)) +- deps: bumps dependabot/fetch-metadata from 1.7.0 to 2.0.0 ([#1607](https://github.com/cloudflare/cloudflare-go/issues/1607)) +- deps: bumps google.golang.org/protobuf from 1.28.0 to 1.33.0 ([#1558](https://github.com/cloudflare/cloudflare-go/issues/1558)) + +## 0.90.0 (March 13th, 2024) + +ENHANCEMENTS: + +- access_mutual_tls_certificates: add support for mutual tls hostname settings ([#1516](https://github.com/cloudflare/cloudflare-go/issues/1516)) +- device_posture_rule: support last_seen and state for crowdstrike_s2s posture rule ([#1509](https://github.com/cloudflare/cloudflare-go/issues/1509)) +- dlp: add support for Context Awareness in DLP profiles ([#1497](https://github.com/cloudflare/cloudflare-go/issues/1497)) +- workers: Add Workers for Platforms support for getting a Worker, content and bindings ([#1508](https://github.com/cloudflare/cloudflare-go/issues/1508)) +- workers_for_platforms: Add ability to list Workers for Platforms namespaces, get a namespace, create a new namespace or delete a namespace. ([#1508](https://github.com/cloudflare/cloudflare-go/issues/1508)) + +BUG FIXES: + +- dlp: added optional ContextAwareness support ([#1510](https://github.com/cloudflare/cloudflare-go/issues/1510)) + +DEPENDENCIES: + +- deps: bumps github.com/stretchr/testify from 1.8.4 to 1.9.0 ([#1511](https://github.com/cloudflare/cloudflare-go/issues/1511)) +- deps: bumps golang.org/x/net from 0.21.0 to 0.22.0 ([#1513](https://github.com/cloudflare/cloudflare-go/issues/1513)) + +## 0.89.0 (February 28th, 2024) + +NOTES: + +- zaraz: replace deprecated neoEvents with Actions on Zaraz Config tools schema ([#1490](https://github.com/cloudflare/cloudflare-go/issues/1490)) + +ENHANCEMENTS: + +- magic-transit: Adds IPsec tunnel healthcheck direction & rate parameters ([#1503](https://github.com/cloudflare/cloudflare-go/issues/1503)) + +BUG FIXES: + +- registrar: Fix request method to call domain list endpoint from POST to GET ([#1506](https://github.com/cloudflare/cloudflare-go/issues/1506)) + +## 0.88.0 (February 14th, 2024) + +ENHANCEMENTS: + +- access_application: Add support for OIDC SaaS Applications ([#1500](https://github.com/cloudflare/cloudflare-go/issues/1500)) +- access_application: Add support for `allow_authenticate_via_warp` ([#1496](https://github.com/cloudflare/cloudflare-go/issues/1496)) +- access_application: add support for `name_id_transform_jsonata` in saas apps ([#1505](https://github.com/cloudflare/cloudflare-go/issues/1505)) +- access_organization: Add support for `allow_authenticate_via_warp` and `warp_auth_session_duration` ([#1496](https://github.com/cloudflare/cloudflare-go/issues/1496)) +- hyperdrive: Add support for hyperdrive CRUD operations ([#1492](https://github.com/cloudflare/cloudflare-go/issues/1492)) +- images_variants: Add support for Images Variants CRUD operations ([#1494](https://github.com/cloudflare/cloudflare-go/issues/1494)) +- teams_rules: `AntiVirus` settings includes notification settings ([#1499](https://github.com/cloudflare/cloudflare-go/issues/1499)) + +BUG FIXES: + +- hyperdrive: password should be nested in origin ([#1501](https://github.com/cloudflare/cloudflare-go/issues/1501)) + +DEPENDENCIES: + +- deps: bumps golang.org/x/net from 0.20.0 to 0.21.0 ([#1502](https://github.com/cloudflare/cloudflare-go/issues/1502)) +- deps: bumps golangci/golangci-lint-action from 3 to 4 ([#1504](https://github.com/cloudflare/cloudflare-go/issues/1504)) + +## 0.87.0 (January 31st, 2024) + +ENHANCEMENTS: + +- access_seats: Add `UpdateAccessUsersSeats` with an array as input for multiple operations ([#1480](https://github.com/cloudflare/cloudflare-go/issues/1480)) +- dlp: add support for EDM and CWL datasets ([#1485](https://github.com/cloudflare/cloudflare-go/issues/1485)) +- logpush: Add support for Output Options ([#1468](https://github.com/cloudflare/cloudflare-go/issues/1468)) +- pages_project: Add `build_caching` attribute ([#1489](https://github.com/cloudflare/cloudflare-go/issues/1489)) +- streams: adds support for stream create parameters for tus upload initiate ([#1386](https://github.com/cloudflare/cloudflare-go/issues/1386)) +- teams_accounts: add support for extended email matching ([#1486](https://github.com/cloudflare/cloudflare-go/issues/1486)) + +BUG FIXES: + +- access_seats: UpdateAccessUserSeat: fix parameters not being an array when sending to the api. This caused an error when updating a user's seat ([#1480](https://github.com/cloudflare/cloudflare-go/issues/1480)) +- access_users: ListAccessUsers was returning wrong values in pointer fields due to variable missused in loop ([#1482](https://github.com/cloudflare/cloudflare-go/issues/1482)) +- flarectl: alias zone certs to "ct" instead of duplicating the "c" alias ([#1484](https://github.com/cloudflare/cloudflare-go/issues/1484)) + +DEPENDENCIES: + +- deps: bumps actions/cache from 3 to 4 ([#1483](https://github.com/cloudflare/cloudflare-go/issues/1483)) + +## 0.86.0 (January 17, 2024) + +ENHANCEMENTS: + +- access_application: Add support for default_relay_state in saas apps ([#1477](https://github.com/cloudflare/cloudflare-go/issues/1477)) +- zaraz: Add support for CRUD APIs ([#1474](https://github.com/cloudflare/cloudflare-go/issues/1474)) + +DEPENDENCIES: + +- deps: bumps github.com/cloudflare/circl from 1.3.3 to 1.3.7 ([#1475](https://github.com/cloudflare/cloudflare-go/issues/1475)) +- deps: bumps golang.org/x/net from 0.19.0 to 0.20.0 ([#1476](https://github.com/cloudflare/cloudflare-go/issues/1476)) + +## 0.85.0 (January 3rd, 2024) + +DEPENDENCIES: + +- deps: bumps github.com/go-git/go-git/v5 from 5.4.2 to 5.11.0 ([#1470](https://github.com/cloudflare/cloudflare-go/issues/1470)) +- deps: bumps github.com/urfave/cli/v2 from 2.26.0 to 2.27.0 ([#1471](https://github.com/cloudflare/cloudflare-go/issues/1471)) +- deps: bumps github.com/urfave/cli/v2 from 2.27.0 to 2.27.1 ([#1472](https://github.com/cloudflare/cloudflare-go/issues/1472)) + +## 0.84.0 (December 20th, 2023) + +ENHANCEMENTS: + +- access_group: Add support for email lists ([#1445](https://github.com/cloudflare/cloudflare-go/issues/1445)) +- device_posture_rules: add support for Access client fields in device posture integrations ([#1464](https://github.com/cloudflare/cloudflare-go/issues/1464)) +- page_shield: added support for page shield ([#1459](https://github.com/cloudflare/cloudflare-go/issues/1459)) + +DEPENDENCIES: + +- deps: bumps actions/setup-go from 4 to 5 ([#1460](https://github.com/cloudflare/cloudflare-go/issues/1460)) +- deps: bumps github/codeql-action from 2 to 3 ([#1462](https://github.com/cloudflare/cloudflare-go/issues/1462)) +- deps: bumps golang.org/x/crypto from 0.14.0 to 0.17.0 ([#1466](https://github.com/cloudflare/cloudflare-go/issues/1466)) + +## 0.83.0 (December 6th, 2023) + +ENHANCEMENTS: + +- cloudflare: Add ResultInfo to RawResponse ([#1453](https://github.com/cloudflare/cloudflare-go/issues/1453)) +- devices_policy: add fields for Opt-In Split Tunnel Overlapping IPs feature. ([#1454](https://github.com/cloudflare/cloudflare-go/issues/1454)) +- stream: Add ScheduledDeletion to StreamCreateVideoParameters ([#1457](https://github.com/cloudflare/cloudflare-go/issues/1457)) +- stream: Add ScheduledDeletion to StreamUploadFromURLParameters ([#1457](https://github.com/cloudflare/cloudflare-go/issues/1457)) +- stream: Add ScheduledDeletion to StreamVideo ([#1457](https://github.com/cloudflare/cloudflare-go/issues/1457)) +- stream: Add ScheduledDeletion to StreamVideoCreate ([#1457](https://github.com/cloudflare/cloudflare-go/issues/1457)) +- worker_bindings: Fixing form element name for d1 binding ([#1450](https://github.com/cloudflare/cloudflare-go/issues/1450)) +- worker_bindings: add support for `d1` bindings ([#1446](https://github.com/cloudflare/cloudflare-go/issues/1446)) + +DEPENDENCIES: + +- deps: bumps github.com/urfave/cli/v2 from 2.25.7 to 2.26.0 ([#1456](https://github.com/cloudflare/cloudflare-go/issues/1456)) +- deps: bumps golang.org/x/net from 0.18.0 to 0.19.0 ([#1452](https://github.com/cloudflare/cloudflare-go/issues/1452)) +- deps: bumps golang.org/x/time from 0.4.0 to 0.5.0 ([#1449](https://github.com/cloudflare/cloudflare-go/issues/1449)) + +## 0.82.0 (November 22nd, 2023) + +ENHANCEMENTS: + +- ip_access_rules: Add ListIPAccessRules() to list IP Access Rules ([#1428](https://github.com/cloudflare/cloudflare-go/issues/1428)) +- load_balancing: add healthy field to LoadBalancerPool ([#1442](https://github.com/cloudflare/cloudflare-go/issues/1442)) + +BUG FIXES: + +- load_balancing: Add support for virtual network id in origins ([#1441](https://github.com/cloudflare/cloudflare-go/issues/1441)) +- per_hostname_tls_setting: use `buildURI` for defining the query parameters when sorting ([#1440](https://github.com/cloudflare/cloudflare-go/issues/1440)) + +DEPENDENCIES: + +- deps: bumps github.com/hashicorp/go-retryablehttp from 0.7.4 to 0.7.5 ([#1438](https://github.com/cloudflare/cloudflare-go/issues/1438)) +- deps: bumps golang.org/x/net from 0.17.0 to 0.18.0 ([#1439](https://github.com/cloudflare/cloudflare-go/issues/1439)) + +## 0.81.0 (November 8th, 2023) + +BREAKING CHANGES: + +- devices_policy: `CreateDeviceSettingsPolicy` is updated with method signatures matching the library conventions ([#1433](https://github.com/cloudflare/cloudflare-go/issues/1433)) +- devices_policy: `DeleteDeviceSettingsPolicy` is updated with method signatures matching the library conventions ([#1433](https://github.com/cloudflare/cloudflare-go/issues/1433)) +- devices_policy: `DeviceClientCertificates` is renamed to `DeviceClientCertificates` ([#1433](https://github.com/cloudflare/cloudflare-go/issues/1433)) +- devices_policy: `GetDefaultDeviceSettingsPolicy` is updated with method signatures matching the library conventions ([#1433](https://github.com/cloudflare/cloudflare-go/issues/1433)) +- devices_policy: `GetDeviceClientCertificatesZone` is renamed to `GetDeviceClientCertificates` with updated method signatures ([#1433](https://github.com/cloudflare/cloudflare-go/issues/1433)) +- devices_policy: `GetDeviceClientCertificates` is updated with method signatures matching the library conventions ([#1433](https://github.com/cloudflare/cloudflare-go/issues/1433)) +- devices_policy: `GetDeviceSettingsPolicy` is updated with method signatures matching the library conventions ([#1433](https://github.com/cloudflare/cloudflare-go/issues/1433)) +- devices_policy: `UpdateDefaultDeviceSettingsPolicy` is updated with method signatures matching the library conventions ([#1433](https://github.com/cloudflare/cloudflare-go/issues/1433)) +- devices_policy: `UpdateDeviceClientCertificatesZone` is renamed to `UpdateDeviceClientCertificates` with updated method signatures ([#1433](https://github.com/cloudflare/cloudflare-go/issues/1433)) +- devices_policy: `UpdateDeviceSettingsPolicy` is updated with method signatures matching the library conventions ([#1433](https://github.com/cloudflare/cloudflare-go/issues/1433)) + +ENHANCEMENTS: + +- access_seats: Add UpdateAccessUserSeat() to list IP Access Rules ([#1427](https://github.com/cloudflare/cloudflare-go/issues/1427)) +- access_user: Add GetAccessUserActiveSessions() to get all active sessions for a Access/Zero-Trust user. ([#1427](https://github.com/cloudflare/cloudflare-go/issues/1427)) +- access_user: Add GetAccessUserFailedLogins() to get all failed login attempts for a Access/Zero-Trust user. ([#1427](https://github.com/cloudflare/cloudflare-go/issues/1427)) +- access_user: Add GetAccessUserLastSeenIdentity() to get last seen identity for a Access/Zero-Trust user. ([#1427](https://github.com/cloudflare/cloudflare-go/issues/1427)) +- access_user: Add GetAccessUserSingleActiveSession() to get an active session for a Access/Zero-Trust user. ([#1427](https://github.com/cloudflare/cloudflare-go/issues/1427)) +- access_user: Add ListAccessUsers() to get a list of users for a Access/Zero-Trust account. ([#1427](https://github.com/cloudflare/cloudflare-go/issues/1427)) +- devices_policy: Add support for listing device settings policies ([#1433](https://github.com/cloudflare/cloudflare-go/issues/1433)) +- teams_rules: Add support for resolver policies ([#1436](https://github.com/cloudflare/cloudflare-go/issues/1436)) + +DEPENDENCIES: + +- deps: bumps golang.org/x/time from 0.3.0 to 0.4.0 ([#1434](https://github.com/cloudflare/cloudflare-go/issues/1434)) + +## 0.80.0 (October 25th, 2023) + +BREAKING CHANGES: + +- teams: `BrowserIsolation.UrlBrowserIsolationEnabled` has changed from `bool` to `*bool` to meet the library conventions ([#1424](https://github.com/cloudflare/cloudflare-go/issues/1424)) + +ENHANCEMENTS: + +- access_application: Add support for app launcher customization fields ([#1407](https://github.com/cloudflare/cloudflare-go/issues/1407)) +- api_shield_schema: Add support for Get/Update API Shield Operation Schema Validation Settings ([#1422](https://github.com/cloudflare/cloudflare-go/issues/1422)) +- api_shield_schema: Add support for Get/Update API Shield Schema Validation Settings ([#1418](https://github.com/cloudflare/cloudflare-go/issues/1418)) +- teams: Add support for body_scanning (Enhanced File Detection) in teams account configuration ([#1423](https://github.com/cloudflare/cloudflare-go/issues/1423)) +- load_balancing: extend documentation for least_connections steering policy ([#1414](https://github.com/cloudflare/cloudflare-go/issues/1414)) +- teams: Add `non_identity_enabled` boolean in browser isolation settings ([#1424](https://github.com/cloudflare/cloudflare-go/issues/1424)) + +DEPENDENCIES: + +- deps: bumps golang.org/x/net from 0.7.0 to 0.17.0 ([#1421](https://github.com/cloudflare/cloudflare-go/issues/1421)) + +## 0.79.0 (October 11th, 2023) + +ENHANCEMENTS: + +- access_organization: Add support for session_duration ([#1415](https://github.com/cloudflare/cloudflare-go/issues/1415)) +- access_policy: Add support for session_duration ([#1415](https://github.com/cloudflare/cloudflare-go/issues/1415)) + +ENHANCEMENTS: + +- api_shield_discovery: Add support for Get/Patch API Shield API Discovery Operations ([#1413](https://github.com/cloudflare/cloudflare-go/issues/1413)) +- api_shield_schema: Add support for managing schemas for API Shield Schema Validation 2.0 ([#1406](https://github.com/cloudflare/cloudflare-go/issues/1406)) +- d1: adds support for d1 ([#1417](https://github.com/cloudflare/cloudflare-go/issues/1417)) +- teams: Add `audit_ssh_settings` endpoints ([#1419](https://github.com/cloudflare/cloudflare-go/issues/1419)) + +BUG FIXES: + +- custom_nameservers: change `NSSet` from string to int to match API response ([#1410](https://github.com/cloudflare/cloudflare-go/issues/1410)) +- observatory: fix double url encoding ([#1412](https://github.com/cloudflare/cloudflare-go/issues/1412)) + +DEPENDENCIES: + +- deps: bumps golang.org/x/net from 0.15.0 to 0.16.0 ([#1416](https://github.com/cloudflare/cloudflare-go/issues/1416)) +- deps: bumps golang.org/x/net from 0.16.0 to 0.17.0 ([#1420](https://github.com/cloudflare/cloudflare-go/issues/1420)) + +## 0.78.0 (September 27th, 2023) + +BREAKING CHANGES: + +- account_role: `AccountRole` has been renamed to `GetAccountRole` to align with the updated method conventions ([#1405](https://github.com/cloudflare/cloudflare-go/issues/1405)) +- account_role: `AccountRoles` has been renamed to `ListAccountRoles` to align with the updated method conventions ([#1405](https://github.com/cloudflare/cloudflare-go/issues/1405)) + +ENHANCEMENTS: + +- access_application: Add support for tags ([#1403](https://github.com/cloudflare/cloudflare-go/issues/1403)) +- access_tag: Add support for tags ([#1403](https://github.com/cloudflare/cloudflare-go/issues/1403)) +- list_item: allow filtering by search term, cursor and per page attributes ([#1409](https://github.com/cloudflare/cloudflare-go/issues/1409)) +- observatory: add support for observatory API ([#1401](https://github.com/cloudflare/cloudflare-go/issues/1401)) + +BUG FIXES: + +- account_role: autopaginate all available results instead of a static number ([#1405](https://github.com/cloudflare/cloudflare-go/issues/1405)) +- semgrep: Improved IPv4 validation by implementing a new pattern to handle cases where non-IPv4 addresses were previously accepted. ([#1382](https://github.com/cloudflare/cloudflare-go/issues/1382)) + +DEPENDENCIES: + +- deps: bumps codecov/codecov-action from 3 to 4 ([#1402](https://github.com/cloudflare/cloudflare-go/issues/1402)) + +## 0.77.0 (September 13th, 2023) + +ENHANCEMENTS: + +- access_identity_provider: add support for email_claim_name and authorization_server_id ([#1390](https://github.com/cloudflare/cloudflare-go/issues/1390)) +- access_identity_provider: add support for ping_env_id ([#1391](https://github.com/cloudflare/cloudflare-go/issues/1391)) +- dcv_delegation: add GET for DCV Delegation UUID ([#1384](https://github.com/cloudflare/cloudflare-go/issues/1384)) +- streams: adds support to initiate tus upload ([#1359](https://github.com/cloudflare/cloudflare-go/issues/1359)) +- tunnel: add support for `include_prefix`, `exclude_prefix` in list operations ([#1385](https://github.com/cloudflare/cloudflare-go/issues/1385)) + +BUG FIXES: + +- dns: keep comments when calling UpdateDNSRecord with zero values of UpdateDNSRecordParams ([#1393](https://github.com/cloudflare/cloudflare-go/issues/1393)) + +DEPENDENCIES: + +- deps: bumps actions/checkout from 3 to 4 ([#1387](https://github.com/cloudflare/cloudflare-go/issues/1387)) +- deps: bumps golang.org/x/net from 0.14.0 to 0.15.0 ([#1389](https://github.com/cloudflare/cloudflare-go/issues/1389)) +- deps: bumps goreleaser/goreleaser-action from 4.4.0 to 4.6.0 ([#1388](https://github.com/cloudflare/cloudflare-go/issues/1388)) +- deps: bumps goreleaser/goreleaser-action from 4.6.0 to 5.0.0 ([#1396](https://github.com/cloudflare/cloudflare-go/issues/1396)) + +## 0.76.0 (August 30th, 2023) + +BREAKING CHANGES: + +- images: Renamed Image struct "Metadata" field to "Meta" ([#1379](https://github.com/cloudflare/cloudflare-go/issues/1379)) + +ENHANCEMENTS: + +- access_application: added custom_non_identity_deny_url ([#1373](https://github.com/cloudflare/cloudflare-go/issues/1373)) +- load_balancer_monitor: add support for `consecutive_up`, `consecutive_down` ([#1380](https://github.com/cloudflare/cloudflare-go/issues/1380)) +- workers: Add support for retrieving and uploading only script content. ([#1361](https://github.com/cloudflare/cloudflare-go/issues/1361)) +- workers: Add support for retrieving and uploading only script metadata. ([#1361](https://github.com/cloudflare/cloudflare-go/issues/1361)) +- workers: allow namespaced scripts to be used as Worker tail consumers ([#1377](https://github.com/cloudflare/cloudflare-go/issues/1377)) + +BUG FIXES: + +- access_application: Use autopaginate flag as expected ([#1372](https://github.com/cloudflare/cloudflare-go/issues/1372)) +- access_ca_certificate: Use autopaginate flag as expected ([#1372](https://github.com/cloudflare/cloudflare-go/issues/1372)) +- access_group: Use autopaginate flag as expected ([#1372](https://github.com/cloudflare/cloudflare-go/issues/1372)) +- access_mutual_tls_certifcate: Use autopaginate flag as expected ([#1372](https://github.com/cloudflare/cloudflare-go/issues/1372)) +- access_policy: Use autopaginate flag as expected ([#1372](https://github.com/cloudflare/cloudflare-go/issues/1372)) +- images: Fix issue parsing Image Details from API due to incorrect struct json field ([#1379](https://github.com/cloudflare/cloudflare-go/issues/1379)) +- pagination: Will look at `total_count` and `per_page` to calculate `total_pages` if `total_pages` is zero ([#1372](https://github.com/cloudflare/cloudflare-go/issues/1372)) + +## 0.75.0 (August 16th, 2023) + +BREAKING CHANGES: + +- cloudflare: `Raw` method now returns a RawResponse rather than the raw JSON `Result` message ([#1355](https://github.com/cloudflare/cloudflare-go/issues/1355)) +- rulesets: Rename `RulesetPhaseRateLimit` to `RulesetPhaseHTTPRatelimit`, to match the phase name ([#1367](https://github.com/cloudflare/cloudflare-go/issues/1367)) +- rulesets: Rename `RulesetPhaseSuperBotFightMode` to `RulesetPhaseHTTPRequestSBFM`, to match the phase name ([#1367](https://github.com/cloudflare/cloudflare-go/issues/1367)) + +NOTES: + +- rulesets: Remove non-existent `allow` action ([#1367](https://github.com/cloudflare/cloudflare-go/issues/1367)) +- rulesets: Remove non-existent `http_request_main` phase ([#1367](https://github.com/cloudflare/cloudflare-go/issues/1367)) +- rulesets: Remove non-public `http_response_headers_transform_managed` and `http_request_late_transform_managed` phases ([#1367](https://github.com/cloudflare/cloudflare-go/issues/1367)) + +ENHANCEMENTS: + +- access_group: add auth_context group ruletype ([#1344](https://github.com/cloudflare/cloudflare-go/issues/1344)) +- access_identity_provider: add attr conditional_access_enabled ([#1344](https://github.com/cloudflare/cloudflare-go/issues/1344)) +- access_identity_provider: add auth context list/put endpoint ([#1344](https://github.com/cloudflare/cloudflare-go/issues/1344)) +- access_service_token: add support for managing `Duration` ([#1347](https://github.com/cloudflare/cloudflare-go/issues/1347)) +- bot_management: add support for bot_management API ([#1363](https://github.com/cloudflare/cloudflare-go/issues/1363)) +- cloudflare: swap `encoding/json` for `github.com/goccy/go-json` ([#1360](https://github.com/cloudflare/cloudflare-go/issues/1360)) +- device_posture_rule: support eid_last_seen and risk_level and correct total_score for Tanium posture rule ([#1366](https://github.com/cloudflare/cloudflare-go/issues/1366)) +- per_hostname_tls_settings: add support for managing hostname level TLS settings ([#1356](https://github.com/cloudflare/cloudflare-go/issues/1356)) +- rulesets: Add the `ddos_mitigation` action ([#1367](https://github.com/cloudflare/cloudflare-go/issues/1367)) +- waiting_room: add support for `queueing_status_code` ([#1357](https://github.com/cloudflare/cloudflare-go/issues/1357)) +- web_analytics: add support for web_analytics API ([#1348](https://github.com/cloudflare/cloudflare-go/issues/1348)) +- workers: add support for tagging Worker scripts ([#1368](https://github.com/cloudflare/cloudflare-go/issues/1368)) +- zone_hold: add support for zone hold API ([#1365](https://github.com/cloudflare/cloudflare-go/issues/1365)) + +BUG FIXES: + +- cache_purge: don't escape HTML entity values in URLs for cache keys ([#1360](https://github.com/cloudflare/cloudflare-go/issues/1360)) + +DEPENDENCIES: + +- deps: bumps golang.org/x/net from 0.12.0 to 0.13.0 ([#1353](https://github.com/cloudflare/cloudflare-go/issues/1353)) +- deps: bumps golang.org/x/net from 0.13.0 to 0.14.0 ([#1362](https://github.com/cloudflare/cloudflare-go/issues/1362)) +- deps: bumps goreleaser/goreleaser-action from 4.3.0 to 4.4.0 ([#1369](https://github.com/cloudflare/cloudflare-go/issues/1369)) + +## 0.74.0 (August 2nd, 2023) + +ENHANCEMENTS: + +- access_application: Add support for custom pages ([#1343](https://github.com/cloudflare/cloudflare-go/issues/1343)) +- access_custom_page: Add support for custom pages ([#1343](https://github.com/cloudflare/cloudflare-go/issues/1343)) +- access_organization: add support for custom pages ([#1343](https://github.com/cloudflare/cloudflare-go/issues/1343)) +- rulesets: Remove internal-only schema kind ([#1346](https://github.com/cloudflare/cloudflare-go/issues/1346)) +- rulesets: Remove some request parameters that are not allowed or have no effect ([#1346](https://github.com/cloudflare/cloudflare-go/issues/1346)) +- rulesets: Update API reference links ([#1346](https://github.com/cloudflare/cloudflare-go/issues/1346)) +- teams-accounts: Adds support for protocol detection ([#1340](https://github.com/cloudflare/cloudflare-go/issues/1340)) +- workers: Add `pipeline_hash` field to Workers script response struct. ([#1330](https://github.com/cloudflare/cloudflare-go/issues/1330)) +- workers: Add support for declaring arbitrary bindings with UnsafeBinding. ([#1330](https://github.com/cloudflare/cloudflare-go/issues/1330)) +- workers: Add support for uploading scripts to a Workers for Platforms namespace. ([#1330](https://github.com/cloudflare/cloudflare-go/issues/1330)) +- workers: Add support for uploading workers with Workers for Platforms namespace bindings. ([#1330](https://github.com/cloudflare/cloudflare-go/issues/1330)) + +BUG FIXES: + +- flarectl: allow for create or update to actually create the record ([#1341](https://github.com/cloudflare/cloudflare-go/issues/1341)) +- load_balancing: Fix pool creation with MinimumOrigins set to 0 ([#1338](https://github.com/cloudflare/cloudflare-go/issues/1338)) +- workers: Fix namespace dispatch upload API path ([#1345](https://github.com/cloudflare/cloudflare-go/issues/1345)) + +## 0.73.0 (July 19th, 2023) + +BREAKING CHANGES: + +- pages_deployment: add support for auto pagination ([#1264](https://github.com/cloudflare/cloudflare-go/issues/1264)) +- pages_deployment: change DeletePagesDeploymentParams to contain all parameters ([#1264](https://github.com/cloudflare/cloudflare-go/issues/1264)) +- pages_project: change to use ResourceContainer for account ID ([#1264](https://github.com/cloudflare/cloudflare-go/issues/1264)) +- pages_project: rename PagesProject to GetPagesProject ([#1264](https://github.com/cloudflare/cloudflare-go/issues/1264)) +- rulesets: `CreateAccountRuleset` is removed in favour of `CreateRuleset` ([#1333](https://github.com/cloudflare/cloudflare-go/issues/1333)) +- rulesets: `CreateZoneRuleset` is removed in favour of `CreateRuleset` ([#1333](https://github.com/cloudflare/cloudflare-go/issues/1333)) +- rulesets: `DeleteAccountRuleset` is removed in favour of `DeleteRuleset` ([#1333](https://github.com/cloudflare/cloudflare-go/issues/1333)) +- rulesets: `DeleteZoneRuleset` is removed in favour of `DeleteRuleset` ([#1333](https://github.com/cloudflare/cloudflare-go/issues/1333)) +- rulesets: `GetAccountRulesetPhase` is removed in favour of `GetEntrypointRuleset` ([#1333](https://github.com/cloudflare/cloudflare-go/issues/1333)) +- rulesets: `GetAccountRuleset` is removed in favour of `GetRuleset` ([#1333](https://github.com/cloudflare/cloudflare-go/issues/1333)) +- rulesets: `GetZoneRulesetPhase` is removed in favour of `GetEntrypointRuleset` ([#1333](https://github.com/cloudflare/cloudflare-go/issues/1333)) +- rulesets: `GetZoneRuleset` is removed in favour of `GetRuleset` ([#1333](https://github.com/cloudflare/cloudflare-go/issues/1333)) +- rulesets: `UpdateAccountRulesetPhase` is removed in favour of `UpdateEntrypointRuleset` ([#1333](https://github.com/cloudflare/cloudflare-go/issues/1333)) +- rulesets: `UpdateAccountRuleset` is removed in favour of `UpdateRuleset` ([#1333](https://github.com/cloudflare/cloudflare-go/issues/1333)) +- rulesets: `UpdateZoneRulesetPhase` is removed in favour of `UpdateEntrypointRuleset` ([#1333](https://github.com/cloudflare/cloudflare-go/issues/1333)) +- rulesets: `UpdateZoneRuleset` is removed in favour of `UpdateRuleset` ([#1333](https://github.com/cloudflare/cloudflare-go/issues/1333)) + +ENHANCEMENTS: + +- device_posture_rule: support active_threats, network_status, infected, and is_active for sentinelone_s2s posture rule ([#1339](https://github.com/cloudflare/cloudflare-go/issues/1339)) +- device_posture_rule: support certificate_id and cn for client_certificate posture rule ([#1339](https://github.com/cloudflare/cloudflare-go/issues/1339)) +- images: adds ability to upload image by url ([#1335](https://github.com/cloudflare/cloudflare-go/issues/1335)) +- load_balancing: support header session affinity policy ([#1302](https://github.com/cloudflare/cloudflare-go/issues/1302)) +- zone: Added `GetRegionalTieredCache` and `UpdateRegionalTieredCache` to allow setting Regional Tiered Cache for a zone. ([#1336](https://github.com/cloudflare/cloudflare-go/issues/1336)) + +DEPENDENCIES: + +- deps: bumps golang.org/x/net from 0.11.0 to 0.12.0 ([#1328](https://github.com/cloudflare/cloudflare-go/issues/1328)) + +## 0.72.0 (July 5th, 2023) + +BREAKING CHANGES: + +- logpush: `CheckAccountLogpushDestinationExists` is removed in favour of `CheckLogpushDestinationExists` with `ResourceContainer` method parameter ([#1326](https://github.com/cloudflare/cloudflare-go/issues/1326)) +- logpush: `CheckZoneLogpushDestinationExists` is removed in favour of `CheckLogpushDestinationExists` with `ResourceContainer` method parameter ([#1326](https://github.com/cloudflare/cloudflare-go/issues/1326)) +- logpush: `CreateAccountLogpushJob` is removed in favour of `CreateLogpushJob` with `ResourceContainer` method parameter ([#1326](https://github.com/cloudflare/cloudflare-go/issues/1326)) +- logpush: `CreateZoneLogpushJob` is removed in favour of `CreateLogpushJob` with `ResourceContainer` method parameter ([#1326](https://github.com/cloudflare/cloudflare-go/issues/1326)) +- logpush: `DeleteAccountLogpushJob` is removed in favour of `DeleteLogpushJob` with `ResourceContainer` method parameter ([#1326](https://github.com/cloudflare/cloudflare-go/issues/1326)) +- logpush: `DeleteZoneLogpushJob` is removed in favour of `DeleteLogpushJob` with `ResourceContainer` method parameter ([#1326](https://github.com/cloudflare/cloudflare-go/issues/1326)) +- logpush: `GetAccountLogpushFields` is removed in favour of `GetLogpushFields` with `ResourceContainer` method parameter ([#1326](https://github.com/cloudflare/cloudflare-go/issues/1326)) +- logpush: `GetAccountLogpushJob` is removed in favour of `GetLogpushJob` with `ResourceContainer` method parameter ([#1326](https://github.com/cloudflare/cloudflare-go/issues/1326)) +- logpush: `GetAccountLogpushOwnershipChallenge` is removed in favour of `GetLogpushOwnershipChallenge` with `ResourceContainer` method parameter ([#1326](https://github.com/cloudflare/cloudflare-go/issues/1326)) +- logpush: `GetZoneLogpushFields` is removed in favour of `GetLogpushFields` with `ResourceContainer` method parameter ([#1326](https://github.com/cloudflare/cloudflare-go/issues/1326)) +- logpush: `GetZoneLogpushJob` is removed in favour of `GetLogpushJob` with `ResourceContainer` method parameter ([#1326](https://github.com/cloudflare/cloudflare-go/issues/1326)) +- logpush: `GetZoneLogpushOwnershipChallenge` is removed in favour of `GetLogpushOwnershipChallenge` with `ResourceContainer` method parameter ([#1326](https://github.com/cloudflare/cloudflare-go/issues/1326)) +- logpush: `ListAccountLogpushJobsForDataset` is removed in favour of `ListLogpushJobsForDataset` with `ResourceContainer` method parameter ([#1326](https://github.com/cloudflare/cloudflare-go/issues/1326)) +- logpush: `ListAccountLogpushJobs` is removed in favour of `ListLogpushJobs` with `ResourceContainer` method parameter ([#1326](https://github.com/cloudflare/cloudflare-go/issues/1326)) +- logpush: `ListZoneLogpushJobsForDataset` is removed in favour of `ListLogpushJobsForDataset` with `ResourceContainer` method parameter ([#1326](https://github.com/cloudflare/cloudflare-go/issues/1326)) +- logpush: `ListZoneLogpushJobs` is removed in favour of `ListLogpushJobs` with `ResourceContainer` method parameter ([#1326](https://github.com/cloudflare/cloudflare-go/issues/1326)) +- logpush: `UpdateAccountLogpushJob` is removed in favour of `UpdateLogpushJob` with `ResourceContainer` method parameter ([#1326](https://github.com/cloudflare/cloudflare-go/issues/1326)) +- logpush: `UpdateZoneLogpushJob` is removed in favour of `UpdateLogpushJob` with `ResourceContainer` method parameter ([#1326](https://github.com/cloudflare/cloudflare-go/issues/1326)) +- logpush: `ValidateAccountLogpushOwnershipChallenge` is removed in favour of `ValidateLogpushOwnershipChallenge` with `ResourceContainer` method parameter ([#1326](https://github.com/cloudflare/cloudflare-go/issues/1326)) +- logpush: `ValidateZoneLogpushOwnershipChallenge` is removed in favour of `ValidateLogpushOwnershipChallenge` with `ResourceContainer` method parameter ([#1326](https://github.com/cloudflare/cloudflare-go/issues/1326)) +- logpush: all methods are updated to use the newer client conventions for method signatures ([#1326](https://github.com/cloudflare/cloudflare-go/issues/1326)) + +ENHANCEMENTS: + +- resource_container: expose `Type` on `*ResourceContainer` to explicitly denote what type of resource it is instead of inferring from `Level`. ([#1325](https://github.com/cloudflare/cloudflare-go/issues/1325)) + +## 0.71.0 (July 5th, 2023) + +BREAKING CHANGES: + +- access_application: refactor methods to use `ResourceContainer` instead of dedicated account/zone methods ([#1319](https://github.com/cloudflare/cloudflare-go/issues/1319)) +- access_ca_certificate: refactor methods to use `ResourceContainer` instead of dedicated account/zone methods ([#1319](https://github.com/cloudflare/cloudflare-go/issues/1319)) +- access_group: refactor methods to use `ResourceContainer` instead of dedicated account/zone methods ([#1319](https://github.com/cloudflare/cloudflare-go/issues/1319)) +- access_identity_provider: refactor methods to use `ResourceContainer` instead of dedicated account/zone methods ([#1319](https://github.com/cloudflare/cloudflare-go/issues/1319)) +- access_mutual_tls_certificates: refactor methods to use `ResourceContainer` instead of dedicated account/zone methods ([#1319](https://github.com/cloudflare/cloudflare-go/issues/1319)) +- access_organization: refactor methods to use `ResourceContainer` instead of dedicated account/zone methods ([#1319](https://github.com/cloudflare/cloudflare-go/issues/1319)) +- access_policy: refactor methods to use `ResourceContainer` instead of dedicated account/zone methods ([#1319](https://github.com/cloudflare/cloudflare-go/issues/1319)) +- access_service_tokens: refactor methods to use `ResourceContainer` instead of dedicated account/zone methods ([#1319](https://github.com/cloudflare/cloudflare-go/issues/1319)) +- access_user_token: refactor methods to use `ResourceContainer` instead of dedicated account/zone methods ([#1319](https://github.com/cloudflare/cloudflare-go/issues/1319)) +- images: renamed `BaseImage` to `GetBaseImage` to match library conventions ([#1322](https://github.com/cloudflare/cloudflare-go/issues/1322)) +- images: renamed `ImageDetails` to `GetImage` to match library conventions ([#1322](https://github.com/cloudflare/cloudflare-go/issues/1322)) +- images: renamed `ImagesStats` to `GetImagesStats` to match library conventions ([#1322](https://github.com/cloudflare/cloudflare-go/issues/1322)) +- images: updated method signatures of `DeleteImage` to match newer conventions and standards ([#1322](https://github.com/cloudflare/cloudflare-go/issues/1322)) +- images: updated method signatures of `ListImages` to match newer conventions and standards ([#1322](https://github.com/cloudflare/cloudflare-go/issues/1322)) +- images: updated method signatures of `UpdateImage` to match newer conventions and standards ([#1322](https://github.com/cloudflare/cloudflare-go/issues/1322)) +- images: updated method signatures of `UploadImage` to match newer conventions and standards ([#1322](https://github.com/cloudflare/cloudflare-go/issues/1322)) + +ENHANCEMENTS: + +- access_application: add support for auto pagination ([#1319](https://github.com/cloudflare/cloudflare-go/issues/1319)) +- access_ca_certificate: add support for auto pagination ([#1319](https://github.com/cloudflare/cloudflare-go/issues/1319)) +- access_group: add support for auto pagination ([#1319](https://github.com/cloudflare/cloudflare-go/issues/1319)) +- access_identity_provider: add support for auto pagination ([#1319](https://github.com/cloudflare/cloudflare-go/issues/1319)) +- access_mutual_tls_certificates: add support for auto pagination ([#1319](https://github.com/cloudflare/cloudflare-go/issues/1319)) +- access_policy: add support for auto pagination ([#1319](https://github.com/cloudflare/cloudflare-go/issues/1319)) +- device_posture_rule: support os_version_extra ([#1316](https://github.com/cloudflare/cloudflare-go/issues/1316)) +- images: adds support for v2 when uploading images directly ([#1322](https://github.com/cloudflare/cloudflare-go/issues/1322)) +- workers: Add ability to specify tail Workers in script metadata ([#1317](https://github.com/cloudflare/cloudflare-go/issues/1317)) + +DEPENDENCIES: + +- deps: bumps dependabot/fetch-metadata from 1.5.1 to 1.6.0 ([#1320](https://github.com/cloudflare/cloudflare-go/issues/1320)) + +## 0.70.0 (June 21st, 2023) + +BREAKING CHANGES: + +- cloudflare: remove `UsingAccount` in favour of resource specific attributes ([#1315](https://github.com/cloudflare/cloudflare-go/issues/1315)) +- cloudflare: remove `api.AccountID` from client struct ([#1315](https://github.com/cloudflare/cloudflare-go/issues/1315)) +- dns_firewall: modernise method signatures and conventions to align with the experimental client ([#1313](https://github.com/cloudflare/cloudflare-go/issues/1313)) +- railgun: remove support for railgun ([#1312](https://github.com/cloudflare/cloudflare-go/issues/1312)) +- tunnel: swap `ConnectTimeout`, `TLSTimeout`, `TCPKeepAlive` and `KeepAliveTimeout` to `TunnelDuration` instead of `time.Duration` ([#1303](https://github.com/cloudflare/cloudflare-go/issues/1303)) +- virtualdns: remove support in favour of newer DNS firewall methods ([#1313](https://github.com/cloudflare/cloudflare-go/issues/1313)) + +ENHANCEMENTS: + +- custom_nameservers: add support for managing custom nameservers ([#1304](https://github.com/cloudflare/cloudflare-go/issues/1304)) +- load_balancing: extend documentation for least_outstanding_requests steering policy ([#1293](https://github.com/cloudflare/cloudflare-go/issues/1293)) +- waiting_room: add support for `additional_routes` and `cookie_suffix` ([#1311](https://github.com/cloudflare/cloudflare-go/issues/1311)) + +DEPENDENCIES: + +- deps: bumps github.com/hashicorp/go-retryablehttp from 0.7.3 to 0.7.4 ([#1301](https://github.com/cloudflare/cloudflare-go/issues/1301)) +- deps: bumps github.com/urfave/cli/v2 from 2.25.5 to 2.25.6 ([#1305](https://github.com/cloudflare/cloudflare-go/issues/1305)) +- deps: bumps github.com/urfave/cli/v2 from 2.25.6 to 2.25.7 ([#1314](https://github.com/cloudflare/cloudflare-go/issues/1314)) +- deps: bumps golang.org/x/net from 0.10.0 to 0.11.0 ([#1307](https://github.com/cloudflare/cloudflare-go/issues/1307)) +- deps: bumps goreleaser/goreleaser-action from 4.2.0 to 4.3.0 ([#1306](https://github.com/cloudflare/cloudflare-go/issues/1306)) + +## 0.69.0 (June 7th, 2023) + +BREAKING CHANGES: + +- stream: StreamVideo.Duration has changed from int to float64. ([#1190](https://github.com/cloudflare/cloudflare-go/issues/1190)) + +ENHANCEMENTS: + +- access: Added `self_hosted_domains` support to access applications ([#1281](https://github.com/cloudflare/cloudflare-go/issues/1281)) +- custom_hostname: add support for `bundle_method` TLS configuration ([#1298](https://github.com/cloudflare/cloudflare-go/issues/1298)) +- devices_policy: Add missing description field to policy ([#1294](https://github.com/cloudflare/cloudflare-go/issues/1294)) +- stream: added metadata support ([#1088](https://github.com/cloudflare/cloudflare-go/issues/1088)) + +BUG FIXES: + +- email_routing_destination: return encountered error, not `ErrMissingAccountID` all the time ([#1297](https://github.com/cloudflare/cloudflare-go/issues/1297)) +- stream: Fix a bug that cannot unmarshal video duration number. ([#1190](https://github.com/cloudflare/cloudflare-go/issues/1190)) + +DEPENDENCIES: + +- deps: bumps dependabot/fetch-metadata from 1.5.0 to 1.5.1 ([#1292](https://github.com/cloudflare/cloudflare-go/issues/1292)) +- deps: bumps github.com/hashicorp/go-retryablehttp from 0.7.2 to 0.7.3 ([#1300](https://github.com/cloudflare/cloudflare-go/issues/1300)) +- deps: bumps github.com/stretchr/testify from 1.8.3 to 1.8.4 ([#1296](https://github.com/cloudflare/cloudflare-go/issues/1296)) +- deps: bumps github.com/urfave/cli/v2 from 2.25.3 to 2.25.5 ([#1295](https://github.com/cloudflare/cloudflare-go/issues/1295)) + +## 0.68.0 (May 24th, 2023) + +BREAKING CHANGES: + +- r2_bucket: change creation time from string to \*time.Time ([#1265](https://github.com/cloudflare/cloudflare-go/issues/1265)) + +ENHANCEMENTS: + +- adds OriginRequest field to UnvalidatedIngressRule struct. ([#1138](https://github.com/cloudflare/cloudflare-go/issues/1138)) +- lists: add support for hostname and ASN lists. ([#1288](https://github.com/cloudflare/cloudflare-go/issues/1288)) +- pages: add support for Smart Placement. Added `Placement` in `PagesProjectDeploymentConfigEnvironment`. ([#1279](https://github.com/cloudflare/cloudflare-go/issues/1279)) +- r2_bucket: add support for getting a bucket ([#1265](https://github.com/cloudflare/cloudflare-go/issues/1265)) +- tunnels: add support for `access` and `http2Origin` keys ([#1291](https://github.com/cloudflare/cloudflare-go/issues/1291)) +- workers: add support for Smart Placement. Added `Placement` in `CreateWorkerParams`. ([#1279](https://github.com/cloudflare/cloudflare-go/issues/1279)) +- zone: Added `GetCacheReserve` and `UpdateacheReserve` to allow setting Cache Reserve for a zone. ([#1278](https://github.com/cloudflare/cloudflare-go/issues/1278)) + +BUG FIXES: + +- dns: fix MX record priority not set by UpdateDNSRecord ([#1290](https://github.com/cloudflare/cloudflare-go/issues/1290)) +- flarectl/dns: ensure MX priority value is dereferenced ([#1289](https://github.com/cloudflare/cloudflare-go/issues/1289)) +- turnstile: remove `SiteKey` being sent in rotate secret's request body ([#1285](https://github.com/cloudflare/cloudflare-go/issues/1285)) +- turnstile: remove `SiteKey`/`Secret` being sent in update request body ([#1284](https://github.com/cloudflare/cloudflare-go/issues/1284)) + +DEPENDENCIES: + +- deps: bumps dependabot/fetch-metadata from 1.4.0 to 1.5.0 ([#1287](https://github.com/cloudflare/cloudflare-go/issues/1287)) +- deps: bumps github.com/stretchr/testify from 1.8.2 to 1.8.3 ([#1286](https://github.com/cloudflare/cloudflare-go/issues/1286)) + +## 0.67.0 (May 10th, 2023) + +NOTES: + +- dns_firewall: The `OriginIPs` field has been renamed to `UpstreamIPs`. ([#1246](https://github.com/cloudflare/cloudflare-go/issues/1246)) + +ENHANCEMENTS: + +- device_posture_rule: add input fields tanium, intune and kolide ([#1268](https://github.com/cloudflare/cloudflare-go/issues/1268)) +- waiting_room: add support for zone-level settings ([#1276](https://github.com/cloudflare/cloudflare-go/issues/1276)) + +BUG FIXES: + +- rulesets: allow `PreserveQueryString` to be nullable ([#1275](https://github.com/cloudflare/cloudflare-go/issues/1275)) + +DEPENDENCIES: + +- deps: bumps github.com/urfave/cli/v2 from 2.25.1 to 2.25.3 ([#1274](https://github.com/cloudflare/cloudflare-go/issues/1274)) +- deps: bumps golang.org/x/net from 0.9.0 to 0.10.0 ([#1280](https://github.com/cloudflare/cloudflare-go/issues/1280)) + +## 0.66.0 (26th April, 2023) + +ENHANCEMENTS: + +- access_application: Add `path_cookie_attribute` app setting ([#1223](https://github.com/cloudflare/cloudflare-go/issues/1223)) +- certificate_packs: add `Status` field to indicate the status of certificate pack ([#1271](https://github.com/cloudflare/cloudflare-go/issues/1271)) +- data localization: add support for regional hostnames API ([#1270](https://github.com/cloudflare/cloudflare-go/issues/1270)) +- dns: add support for importing and exporting DNS records using BIND file configurations ([#1266](https://github.com/cloudflare/cloudflare-go/issues/1266)) +- logpush: add support for max upload parameters ([#1272](https://github.com/cloudflare/cloudflare-go/issues/1272)) +- turnstile: add support for turnstile ([#1267](https://github.com/cloudflare/cloudflare-go/issues/1267)) + +DEPENDENCIES: + +- deps: bumps dependabot/fetch-metadata from 1.3.6 to 1.4.0 ([#1269](https://github.com/cloudflare/cloudflare-go/issues/1269)) + +## 0.65.0 (12th April, 2023) + +ENHANCEMENTS: + +- access: Add `auto_redirect_to_identity` flag to Access organizations ([#1260](https://github.com/cloudflare/cloudflare-go/issues/1260)) +- access: Add `isolation_required` flag to Access policies ([#1258](https://github.com/cloudflare/cloudflare-go/issues/1258)) +- rulesets: add support for add operation to HTTP header configuration ([#1253](https://github.com/cloudflare/cloudflare-go/issues/1253)) +- rulesets: add support for the `compress_response` action ([#1261](https://github.com/cloudflare/cloudflare-go/issues/1261)) +- rulesets: add support for the `http_response_compression` phase ([#1261](https://github.com/cloudflare/cloudflare-go/issues/1261)) + +DEPENDENCIES: + +- deps: bumps golang.org/x/net from 0.8.0 to 0.9.0 ([#1263](https://github.com/cloudflare/cloudflare-go/issues/1263)) + +## 0.64.0 (29th March, 2023) + +BREAKING CHANGES: + +- dns: Changed Create/UpdateDNSRecord method signatures to return (DNSRecord, error) ([#1243](https://github.com/cloudflare/cloudflare-go/issues/1243)) +- zone: `UpdateZoneSingleSetting` has been renamed to `UpdateZoneSetting` and updated method signature inline with our expected conventions ([#1251](https://github.com/cloudflare/cloudflare-go/issues/1251)) +- zone: `ZoneSingleSetting` has been renamed to `GetZoneSetting` and updated method signature inline with our expected conventions ([#1251](https://github.com/cloudflare/cloudflare-go/issues/1251)) + +ENHANCEMENTS: + +- access_identity_provider: add `claims` and `scopes` fields ([#1237](https://github.com/cloudflare/cloudflare-go/issues/1237)) +- access_identity_provider: add scim_config field ([#1178](https://github.com/cloudflare/cloudflare-go/issues/1178)) +- devices_policy: update `Mode` field to use new `ServiceMode` string type with explicit const service mode values ([#1249](https://github.com/cloudflare/cloudflare-go/issues/1249)) +- ssl: make `GeoRestrictions` a pointer inside of ZoneCustomSSL ([#1244](https://github.com/cloudflare/cloudflare-go/issues/1244)) +- zone: `GetZoneSetting` and `UpdateZoneSetting` now allow configuring the path for where a setting resides instead of assuming `settings` ([#1251](https://github.com/cloudflare/cloudflare-go/issues/1251)) + +BUG FIXES: + +- teams_rules: `AllowChildBypass` changes from a `bool` to `*bool` ([#1242](https://github.com/cloudflare/cloudflare-go/issues/1242)) +- teams_rules: `BypassParentRule` changes from a `bool` to `*bool` ([#1242](https://github.com/cloudflare/cloudflare-go/issues/1242)) +- tunnel: Fix 'CreateTunnel' for tunnels using config_src ([#1238](https://github.com/cloudflare/cloudflare-go/issues/1238)) + +DEPENDENCIES: + +- deps: bumps actions/setup-go from 3 to 4 ([#1236](https://github.com/cloudflare/cloudflare-go/issues/1236)) +- deps: bumps github.com/urfave/cli/v2 from 2.25.0 to 2.25.1 ([#1250](https://github.com/cloudflare/cloudflare-go/issues/1250)) + +## 0.63.0 (15th March, 2023) + +BREAKING CHANGES: + +- tunnel: renamed `Tunnel` to `GetTunnel` ([#1227](https://github.com/cloudflare/cloudflare-go/issues/1227)) +- tunnel: renamed `Tunnels` to `ListTunnels` ([#1227](https://github.com/cloudflare/cloudflare-go/issues/1227)) + +ENHANCEMENTS: + +- access_organization: add ui_read_only_toggle_reason field ([#1181](https://github.com/cloudflare/cloudflare-go/issues/1181)) +- added audit_ssh to gateway actions, updated gateway rule settings ([#1226](https://github.com/cloudflare/cloudflare-go/issues/1226)) +- addressing: Add `Address Map` support ([#1232](https://github.com/cloudflare/cloudflare-go/issues/1232)) +- teams_account: add support for `check_disks` ([#1197](https://github.com/cloudflare/cloudflare-go/issues/1197)) +- tunnel: updated parameters to latest API docs ([#1227](https://github.com/cloudflare/cloudflare-go/issues/1227)) + +DEPENDENCIES: + +- deps: bumps github.com/urfave/cli/v2 from 2.24.4 to 2.25.0 ([#1229](https://github.com/cloudflare/cloudflare-go/issues/1229)) +- deps: bumps golang.org/x/net from 0.7.0 to 0.8.0 ([#1228](https://github.com/cloudflare/cloudflare-go/issues/1228)) + +## 0.62.0 (1st March, 2023) + +ENHANCEMENTS: + +- dex_test: add CRUD functionality for DEX test configurations ([#1209](https://github.com/cloudflare/cloudflare-go/issues/1209)) +- dlp: Adds support for partial payload logging ([#1212](https://github.com/cloudflare/cloudflare-go/issues/1212)) +- teams_accounts: Add new root_certificate_installation_enabled field ([#1208](https://github.com/cloudflare/cloudflare-go/issues/1208)) +- teams_rules: Add `untrusted_cert` rule setting ([#1214](https://github.com/cloudflare/cloudflare-go/issues/1214)) +- tunnels: automatically paginate `ListTunnels` ([#1206](https://github.com/cloudflare/cloudflare-go/issues/1206)) + +BUG FIXES: + +- dex_test: use dex test types and json struct mappings instead of managed networks ([#1213](https://github.com/cloudflare/cloudflare-go/issues/1213)) +- dns: dont reuse DNSListResponse when using pagination to avoid Proxied pointer overwrite ([#1222](https://github.com/cloudflare/cloudflare-go/issues/1222)) + +DEPENDENCIES: + +- deps: bumps github.com/stretchr/testify from 1.8.1 to 1.8.2 ([#1220](https://github.com/cloudflare/cloudflare-go/issues/1220)) +- deps: bumps github.com/urfave/cli/v2 from 2.24.3 to 2.24.4 ([#1210](https://github.com/cloudflare/cloudflare-go/issues/1210)) +- deps: bumps golang.org/x/net from 0.0.0-20220722155237-a158d28d115b to 0.7.0 ([#1218](https://github.com/cloudflare/cloudflare-go/issues/1218)) +- deps: bumps golang.org/x/net from 0.0.0-20220722155237-a158d28d115b to 0.7.0 ([#1219](https://github.com/cloudflare/cloudflare-go/issues/1219)) +- deps: bumps golang.org/x/text from 0.3.7 to 0.3.8 ([#1215](https://github.com/cloudflare/cloudflare-go/issues/1215)) +- deps: bumps golang.org/x/text from 0.3.7 to 0.3.8 ([#1216](https://github.com/cloudflare/cloudflare-go/issues/1216)) +- deps: bumps golang.org/x/time from 0.0.0-20220224211638-0e9765cccd65 to 0.3.0 ([#1217](https://github.com/cloudflare/cloudflare-go/issues/1217)) + +## 0.61.0 (15th February, 2023) + +ENHANCEMENTS: + +- cloudflare: make it clearer when we hit a server error and to retry later ([#1207](https://github.com/cloudflare/cloudflare-go/issues/1207)) +- devices_policy: Add new exclude_office_ips field to policy ([#1205](https://github.com/cloudflare/cloudflare-go/issues/1205)) +- dlp_profile: Use int rather than uint for allowed_match_count field ([#1200](https://github.com/cloudflare/cloudflare-go/issues/1200)) + +BUG FIXES: + +- dns: always send `tags` to allow clearing ([#1196](https://github.com/cloudflare/cloudflare-go/issues/1196)) +- stream: renamed `RequiredSignedURLs` to `RequireSignedURLs` ([#1202](https://github.com/cloudflare/cloudflare-go/issues/1202)) + +DEPENDENCIES: + +- deps: bumps github.com/urfave/cli/v2 from 2.24.2 to 2.24.3 ([#1199](https://github.com/cloudflare/cloudflare-go/issues/1199)) + +## 0.60.0 (1st February, 2023) + +BREAKING CHANGES: + +- queues: UpdateQueue has been updated to match the API and now correctly updates a Queue's name ([#1188](https://github.com/cloudflare/cloudflare-go/issues/1188)) + +ENHANCEMENTS: + +- dlp_profile: Add new allowed_match_count field to profiles ([#1193](https://github.com/cloudflare/cloudflare-go/issues/1193)) +- dns: allow sending empty strings to remove comments ([#1195](https://github.com/cloudflare/cloudflare-go/issues/1195)) +- magic_transit_ipsec_tunnel: makes customer endpoint an optional field for ipsec tunnel creation ([#1185](https://github.com/cloudflare/cloudflare-go/issues/1185)) +- rulesets: add support for `score_per_period` and `score_response_header_name` ([#1183](https://github.com/cloudflare/cloudflare-go/issues/1183)) + +DEPENDENCIES: + +- deps: bumps dependabot/fetch-metadata from 1.3.5 to 1.3.6 ([#1184](https://github.com/cloudflare/cloudflare-go/issues/1184)) +- deps: bumps github.com/urfave/cli/v2 from 2.23.7 to 2.24.1 ([#1180](https://github.com/cloudflare/cloudflare-go/issues/1180)) +- deps: bumps github.com/urfave/cli/v2 from 2.24.1 to 2.24.2 ([#1191](https://github.com/cloudflare/cloudflare-go/issues/1191)) +- deps: bumps goreleaser/goreleaser-action from 4.1.0 to 4.2.0 ([#1192](https://github.com/cloudflare/cloudflare-go/issues/1192)) + +## 0.59.0 (January 18th, 2023) + +BREAKING CHANGES: + +- dns: remove these read-only fields from `UpdateDNSRecordParams`: `CreatedOn`, `ModifiedOn`, `Meta`, `ZoneID`, `ZoneName`, `Proxiable`, and `Locked` ([#1170](https://github.com/cloudflare/cloudflare-go/issues/1170)) +- dns: the fields `CreatedOn` and `ModifiedOn` are removed from `ListDNSRecordsParams` ([#1173](https://github.com/cloudflare/cloudflare-go/issues/1173)) + +NOTES: + +- dns: remove additional lookup from `Update` operations when `Name` or `Type` was omitted ([#1170](https://github.com/cloudflare/cloudflare-go/issues/1170)) + +ENHANCEMENTS: + +- access_organization: add user_seat_expiration_inactive_time field ([#1159](https://github.com/cloudflare/cloudflare-go/issues/1159)) +- dns: `GetDNSRecord`, `UpdateDNSRecord` and `DeleteDNSRecord` now return the new, dedicated error `ErrMissingDNSRecordID` when an empty DNS record ID is given. ([#1174](https://github.com/cloudflare/cloudflare-go/issues/1174)) +- dns: the URL parameter `tag-match` for listing DNS records is now supported as the field `TagMatch` in `ListDNSRecordsParams` ([#1173](https://github.com/cloudflare/cloudflare-go/issues/1173)) +- dns: update default `per_page` attribute to 100 records ([#1171](https://github.com/cloudflare/cloudflare-go/issues/1171)) +- teams_rules: adds support for Egress Policies ([#1142](https://github.com/cloudflare/cloudflare-go/issues/1142)) +- workers: Add support for compatibility_date and compatibility_flags when upoading a worker script ([#1177](https://github.com/cloudflare/cloudflare-go/issues/1177)) +- workers: script upload now supports Queues bindings ([#1176](https://github.com/cloudflare/cloudflare-go/issues/1176)) + +BUG FIXES: + +- dns: don't send "priority" for list operations as it isn't supported and is only used for internal filtering ([#1167](https://github.com/cloudflare/cloudflare-go/issues/1167)) +- dns: the field `Tags` in `ListDNSRecordsParams` was not correctly serialized into URL queries ([#1173](https://github.com/cloudflare/cloudflare-go/issues/1173)) +- managednetworks: Update should be PUT ([#1172](https://github.com/cloudflare/cloudflare-go/issues/1172)) + +## 0.58.1 (January 5th, 2023) + +ENHANCEMENTS: + +- cloudflare: automatically redact sensitive values from HTTP interactions ([#1164](https://github.com/cloudflare/cloudflare-go/issues/1164)) + +## 0.58.0 (January 4th, 2023) + +BREAKING CHANGES: + +- dns: `DNSRecord` has been renamed to `GetDNSRecord` ([#1151](https://github.com/cloudflare/cloudflare-go/issues/1151)) +- dns: `DNSRecords` has been renamed to `ListDNSRecords` ([#1151](https://github.com/cloudflare/cloudflare-go/issues/1151)) +- dns: method signatures have been updated to align with the upcoming client conventions ([#1151](https://github.com/cloudflare/cloudflare-go/issues/1151)) +- origin_ca: renamed to `CreateOriginCertificate` to `CreateOriginCACertificate` ([#1161](https://github.com/cloudflare/cloudflare-go/issues/1161)) +- origin_ca: renamed to `OriginCARootCertificate` to `GetOriginCARootCertificate` ([#1161](https://github.com/cloudflare/cloudflare-go/issues/1161)) +- origin_ca: renamed to `OriginCertificate` to `GetOriginCACertificate` ([#1161](https://github.com/cloudflare/cloudflare-go/issues/1161)) +- origin_ca: renamed to `OriginCertificates` to `ListOriginCACertificates` ([#1161](https://github.com/cloudflare/cloudflare-go/issues/1161)) +- origin_ca: renamed to `RevokeOriginCertificate` to `RevokeOriginCACertificate` ([#1161](https://github.com/cloudflare/cloudflare-go/issues/1161)) + +ENHANCEMENTS: + +- dns: add support for tags and comments ([#1151](https://github.com/cloudflare/cloudflare-go/issues/1151)) +- mtls_certificate: add support for managing mTLS certificates and assocations ([#1150](https://github.com/cloudflare/cloudflare-go/issues/1150)) +- origin_ca: add support for using API keys, API tokens or API User service keys for interacting with Origin CA endpoints ([#1161](https://github.com/cloudflare/cloudflare-go/issues/1161)) +- workers: Add support for workers logpush enablement on script upload ([#1160](https://github.com/cloudflare/cloudflare-go/issues/1160)) + +BUG FIXES: + +- email_routing_destination: use empty reponse struct on each page call ([#1156](https://github.com/cloudflare/cloudflare-go/issues/1156)) +- email_routing_rules: use empty reponse struct on each page call ([#1156](https://github.com/cloudflare/cloudflare-go/issues/1156)) +- filter: use empty reponse struct on each page call ([#1156](https://github.com/cloudflare/cloudflare-go/issues/1156)) +- firewall_rules: use empty reponse struct on each page call ([#1156](https://github.com/cloudflare/cloudflare-go/issues/1156)) +- lockdown: use empty reponse struct on each page call ([#1156](https://github.com/cloudflare/cloudflare-go/issues/1156)) +- queue: use empty reponse struct on each page call ([#1156](https://github.com/cloudflare/cloudflare-go/issues/1156)) +- teams_list: use empty reponse struct on each page call ([#1156](https://github.com/cloudflare/cloudflare-go/issues/1156)) +- workers_kv: use empty reponse struct on each page call ([#1156](https://github.com/cloudflare/cloudflare-go/issues/1156)) + +DEPENDENCIES: + +- deps: bumps github.com/hashicorp/go-retryablehttp from 0.7.1 to 0.7.2 ([#1162](https://github.com/cloudflare/cloudflare-go/issues/1162)) + +## 0.57.1 (December 23rd, 2022) + +ENHANCEMENTS: + +- tiered_cache: Add support for Tiered Caching interactions for setting Smart and Generic topologies ([#1149](https://github.com/cloudflare/cloudflare-go/issues/1149)) + +BUG FIXES: + +- workers: correctly set `body` value for non-ES module uploads ([#1155](https://github.com/cloudflare/cloudflare-go/issues/1155)) + +## 0.57.0 (December 22nd, 2022) + +BREAKING CHANGES: + +- workers: API operations now target account level resources instead of older zone level resources (these are a 1:1 now) ([#1137](https://github.com/cloudflare/cloudflare-go/issues/1137)) +- workers: method signatures have been updated to align with the upcoming client conventions ([#1137](https://github.com/cloudflare/cloudflare-go/issues/1137)) +- workers_bindings: method signatures have been updated to align with the upcoming client conventions ([#1137](https://github.com/cloudflare/cloudflare-go/issues/1137)) +- workers_cron_triggers: method signatures have been updated to align with the upcoming client conventions ([#1137](https://github.com/cloudflare/cloudflare-go/issues/1137)) +- workers_kv: method signatures have been updated to align with the upcoming client conventions ([#1137](https://github.com/cloudflare/cloudflare-go/issues/1137)) +- workers_routes: method signatures have been updated to align with the upcoming client conventions ([#1137](https://github.com/cloudflare/cloudflare-go/issues/1137)) +- workers_secrets: method signatures have been updated to align with the upcoming client conventions ([#1137](https://github.com/cloudflare/cloudflare-go/issues/1137)) +- workers_tails: method signatures have been updated to align with the upcoming client conventions ([#1137](https://github.com/cloudflare/cloudflare-go/issues/1137)) + +NOTES: + +- workers: all worker methods have been split into product ownership(-ish) files ([#1137](https://github.com/cloudflare/cloudflare-go/issues/1137)) +- workers: all worker methods now require an explicit `ResourceContainer` for endpoints instead of relying on the globally defined `api.AccountID` ([#1137](https://github.com/cloudflare/cloudflare-go/issues/1137)) + +ENHANCEMENTS: + +- managed_networks: add CRUD functionality for managednetworks ([#1148](https://github.com/cloudflare/cloudflare-go/issues/1148)) + +DEPENDENCIES: + +- deps: bumps goreleaser/goreleaser-action from 3.2.0 to 4.1.0 ([#1146](https://github.com/cloudflare/cloudflare-go/issues/1146)) + +## 0.56.0 (December 5th, 2022) + +BREAKING CHANGES: + +- pages: Changed the type of EnvVars in PagesProjectDeploymentConfigEnvironment & PagesProjectDeployment in order to properly support secrets. ([#1136](https://github.com/cloudflare/cloudflare-go/issues/1136)) + +NOTES: + +- pages: removed the v1 logs endpoint for Pages deployments. Please switch to v2: https://developers.cloudflare.com/api/operations/pages-deployment-get-deployment-logs ([#1135](https://github.com/cloudflare/cloudflare-go/issues/1135)) + +ENHANCEMENTS: + +- cache_rules: add ignore option to query string struct ([#1140](https://github.com/cloudflare/cloudflare-go/issues/1140)) +- pages: Updates bindings and other Functions related propreties. Service bindings, secrets, fail open/close and usage model are all now supported. ([#1136](https://github.com/cloudflare/cloudflare-go/issues/1136)) +- workers: Support for Workers Analytics Engine bindings ([#1133](https://github.com/cloudflare/cloudflare-go/issues/1133)) + +DEPENDENCIES: + +- deps: bumps github.com/urfave/cli/v2 from 2.23.5 to 2.23.6 ([#1139](https://github.com/cloudflare/cloudflare-go/issues/1139)) + +## 0.55.0 (November 23th, 2022) + +BREAKING CHANGES: + +- workers_kv: `CreateWorkersKVNamespace` has been updated to match the experimental client method signatures (https://github.com/cloudflare/cloudflare-go/blob/master/docs/experimental.md). ([#1115](https://github.com/cloudflare/cloudflare-go/issues/1115)) +- workers_kv: `DeleteWorkersKVBulk` has been renamed to `DeleteWorkersKVEntries`. ([#1115](https://github.com/cloudflare/cloudflare-go/issues/1115)) +- workers_kv: `DeleteWorkersKVNamespace` has been updated to match the experimental client method signatures (https://github.com/cloudflare/cloudflare-go/blob/master/docs/experimental.md). ([#1115](https://github.com/cloudflare/cloudflare-go/issues/1115)) +- workers_kv: `DeleteWorkersKV` has been renamed to `DeleteWorkersKVEntry`. ([#1115](https://github.com/cloudflare/cloudflare-go/issues/1115)) +- workers_kv: `ListWorkersKVNamespaces` has been updated to match the experimental client method signatures (https://github.com/cloudflare/cloudflare-go/blob/master/docs/experimental.md). ([#1115](https://github.com/cloudflare/cloudflare-go/issues/1115)) +- workers_kv: `ListWorkersKVsWithOptions` has been removed. Use `ListWorkersKVKeys` instead and pass in the options. ([#1115](https://github.com/cloudflare/cloudflare-go/issues/1115)) +- workers_kv: `ListWorkersKVs` has been renamed to `ListWorkersKVKeys`. ([#1115](https://github.com/cloudflare/cloudflare-go/issues/1115)) +- workers_kv: `ReadWorkersKV` has been renamed to `GetWorkersKV`. ([#1115](https://github.com/cloudflare/cloudflare-go/issues/1115)) +- workers_kv: `UpdateWorkersKVNamespace` has been updated to match the experimental client method signatures (https://github.com/cloudflare/cloudflare-go/blob/master/docs/experimental.md). ([#1115](https://github.com/cloudflare/cloudflare-go/issues/1115)) +- workers_kv: `WriteWorkersKVBulk` has been renamed to `WriteWorkersKVEntries`. ([#1115](https://github.com/cloudflare/cloudflare-go/issues/1115)) +- workers_kv: `WriteWorkersKV` has been renamed to `WriteWorkersKVEntry`. ([#1115](https://github.com/cloudflare/cloudflare-go/issues/1115)) + +ENHANCEMENTS: + +- device_posture_rule: add input fields crowdstrike ([#1126](https://github.com/cloudflare/cloudflare-go/issues/1126)) +- queue: add support queue API ([#1131](https://github.com/cloudflare/cloudflare-go/issues/1131)) +- r2: Add support for listing R2 buckets ([#1063](https://github.com/cloudflare/cloudflare-go/issues/1063)) +- workers_domain: add support for workers domain API ([#1130](https://github.com/cloudflare/cloudflare-go/issues/1130)) +- workers_kv: `ListWorkersKVNamespaces` automatically paginates all results unless `PerPage` is defined. ([#1115](https://github.com/cloudflare/cloudflare-go/issues/1115)) + +DEPENDENCIES: + +- deps: bumps github.com/urfave/cli/v2 from 2.23.4 to 2.23.5 ([#1127](https://github.com/cloudflare/cloudflare-go/issues/1127)) + +## 0.54.0 (November 9th, 2022) + +ENHANCEMENTS: + +- access: add support for service token rotation ([#1120](https://github.com/cloudflare/cloudflare-go/issues/1120)) +- deps: fix import grouping, code formatting and enable goimports linter ([#1121](https://github.com/cloudflare/cloudflare-go/issues/1121)) + +DEPENDENCIES: + +- deps: bumps dependabot/fetch-metadata from 1.3.4 to 1.3.5 ([#1123](https://github.com/cloudflare/cloudflare-go/issues/1123)) +- deps: bumps github.com/urfave/cli/v2 from 2.20.3 to 2.23.0 ([#1122](https://github.com/cloudflare/cloudflare-go/issues/1122)) +- deps: bumps github.com/urfave/cli/v2 from 2.23.0 to 2.23.2 ([#1124](https://github.com/cloudflare/cloudflare-go/issues/1124)) +- deps: bumps github.com/urfave/cli/v2 from 2.23.2 to 2.23.4 ([#1125](https://github.com/cloudflare/cloudflare-go/issues/1125)) + +## 0.53.0 (October 26th, 2022) + +BREAKING CHANGES: + +- account_member: `CreateAccountMember` has been updated to accept a `CreateAccountMemberParams` struct instead of multiple parameters ([#1095](https://github.com/cloudflare/cloudflare-go/issues/1095)) +- teams_list: updated methods to match the experimental client format ([#1114](https://github.com/cloudflare/cloudflare-go/issues/1114)) + +ENHANCEMENTS: + +- account_member: add support for domain scoped roles ([#1095](https://github.com/cloudflare/cloudflare-go/issues/1095)) +- cloudflare: expose `Messages` from the `Response` object ([#1106](https://github.com/cloudflare/cloudflare-go/issues/1106)) +- dlp: Adds support for DLP resources ([#1111](https://github.com/cloudflare/cloudflare-go/issues/1111)) +- teams_list: `List` operations now automatically paginate ([#1114](https://github.com/cloudflare/cloudflare-go/issues/1114)) +- total_tls: adds support for TotalTLS ([#1105](https://github.com/cloudflare/cloudflare-go/issues/1105)) +- waiting_room: add support for waiting room rules ([#1102](https://github.com/cloudflare/cloudflare-go/issues/1102)) + +DEPENDENCIES: + +- deps: `ioutil` package is being deprecated in favor of `io` ([#1116](https://github.com/cloudflare/cloudflare-go/issues/1116)) +- deps: bumps github.com/stretchr/testify from 1.8.0 to 1.8.1 ([#1119](https://github.com/cloudflare/cloudflare-go/issues/1119)) +- deps: bumps github.com/urfave/cli/v2 from 2.19.2 to 2.20.2 ([#1108](https://github.com/cloudflare/cloudflare-go/issues/1108)) +- deps: bumps github.com/urfave/cli/v2 from 2.20.2 to 2.20.3 ([#1118](https://github.com/cloudflare/cloudflare-go/issues/1118)) +- deps: bumps goreleaser/goreleaser-action from 3.1.0 to 3.2.0 ([#1112](https://github.com/cloudflare/cloudflare-go/issues/1112)) +- deps: remove `github.com/pkg/errors` in favor of `errors` ([#1117](https://github.com/cloudflare/cloudflare-go/issues/1117)) + +## 0.52.0 (October 12th, 2022) + +ENHANCEMENTS: + +- access: add UI read-only field to organizations ([#1104](https://github.com/cloudflare/cloudflare-go/issues/1104)) +- devices_policy: Add support for additional device settings policies ([#1090](https://github.com/cloudflare/cloudflare-go/issues/1090)) +- rulesets: add support for `sensitivity_level` to override all rule sensitivity ([#1093](https://github.com/cloudflare/cloudflare-go/issues/1093)) + +DEPENDENCIES: + +- deps: bumps dependabot/fetch-metadata from 1.3.3 to 1.3.4 ([#1097](https://github.com/cloudflare/cloudflare-go/issues/1097)) +- deps: bumps github.com/urfave/cli/v2 from 2.16.3 to 2.17.1 ([#1094](https://github.com/cloudflare/cloudflare-go/issues/1094)) +- deps: bumps github.com/urfave/cli/v2 from 2.17.1 to 2.19.2 ([#1103](https://github.com/cloudflare/cloudflare-go/issues/1103)) + +## 0.51.0 (September 28th, 2022) + +BREAKING CHANGES: + +- load_balancing: update method signatures to match experimental conventions ([#1084](https://github.com/cloudflare/cloudflare-go/issues/1084)) + +ENHANCEMENTS: + +- device_posture_rule: add input fields for linux OS ([#1087](https://github.com/cloudflare/cloudflare-go/issues/1087)) +- load_balancing: support adaptive_routing and location_strategy ([#1091](https://github.com/cloudflare/cloudflare-go/issues/1091)) + +BUG FIXES: + +- user-agent-blocking-rules: add missing managed_challenge validation and removed the deprecated whitelist one ([#1089](https://github.com/cloudflare/cloudflare-go/issues/1089)) + +## 0.50.0 (September 14, 2022) + +ENHANCEMENTS: + +- auditlogs: add support for hide_user_logs filter parameter ([#1075](https://github.com/cloudflare/cloudflare-go/issues/1075)) + +BUG FIXES: + +- cloudflare: exiting closer to the source on context timeouts to improve error messaging and better defend from potential edge cases ([#1080](https://github.com/cloudflare/cloudflare-go/issues/1080)) +- origin certificate: Fix API auth type used ([#1082](https://github.com/cloudflare/cloudflare-go/issues/1082)) + +DEPENDENCIES: + +- deps: bumps github.com/urfave/cli/v2 from 2.11.2 to 2.14.0 ([#1077](https://github.com/cloudflare/cloudflare-go/issues/1077)) +- deps: bumps github.com/urfave/cli/v2 from 2.14.0 to 2.14.1 ([#1081](https://github.com/cloudflare/cloudflare-go/issues/1081)) +- deps: bumps github.com/urfave/cli/v2 from 2.14.1 to 2.15.0 ([#1085](https://github.com/cloudflare/cloudflare-go/issues/1085)) +- deps: bumps github.com/urfave/cli/v2 from 2.15.0 to 2.16.3 ([#1086](https://github.com/cloudflare/cloudflare-go/issues/1086)) + +## 0.49.0 (August 31st, 2022) + +ENHANCEMENTS: + +- access_service_token: add support for refreshing an existing token in place ([#1074](https://github.com/cloudflare/cloudflare-go/issues/1074)) +- api: addded context and headers to Raw method ([#1068](https://github.com/cloudflare/cloudflare-go/issues/1068)) +- api_shield: add GET/PUT for API Shield Configuration ([#1059](https://github.com/cloudflare/cloudflare-go/issues/1059)) +- pages_project: Add `kv_namespaces`, `durable_object_namespaces`, `r2_buckets`, and `d1_databases` bindings to deployment config ([#1065](https://github.com/cloudflare/cloudflare-go/issues/1065)) +- pages_project: Add `preview_deployment_setting`, `preview_branch_includes`, and `preview_branch_excludes` to source config ([#1065](https://github.com/cloudflare/cloudflare-go/issues/1065)) +- pages_project: Add `production_branch` field ([#1065](https://github.com/cloudflare/cloudflare-go/issues/1065)) +- teams_account: add support for `os_distro_name` and `os_distro_revision` ([#1073](https://github.com/cloudflare/cloudflare-go/issues/1073)) +- url_normalization_settings: Add APIs to get and update URL normalization settings ([#1071](https://github.com/cloudflare/cloudflare-go/issues/1071)) +- workers: Support for multipart encoding for DownloadWorker on a module-format Worker script ([#1040](https://github.com/cloudflare/cloudflare-go/issues/1040)) + +BUG FIXES: + +- cloudflare: fix nil dereference error in makeRequestWithAuthTypeAndHeaders ([#1072](https://github.com/cloudflare/cloudflare-go/issues/1072)) +- email_routing_rules: Fix response for email routing catch all rule. ([#1070](https://github.com/cloudflare/cloudflare-go/issues/1070)) +- email_routing_settings: change enable endpoint from `enabled` to `enable` ([#1060](https://github.com/cloudflare/cloudflare-go/issues/1060)) +- stream: Update pctComplete to string from int ([#1066](https://github.com/cloudflare/cloudflare-go/issues/1066)) + +DEPENDENCIES: + +- deps: bumps goreleaser/goreleaser-action from 3.0.0 to 3.1.0 ([#1067](https://github.com/cloudflare/cloudflare-go/issues/1067)) + +## 0.48.0 (August 22nd, 2022) + +ENHANCEMENTS: + +- errors: add some error type convenience functions for mocking and inspection ([#1047](https://github.com/cloudflare/cloudflare-go/issues/1047)) +- pages_project: Add compatibility date and compatibility_flags to pages deployment configs ([#1051](https://github.com/cloudflare/cloudflare-go/issues/1051)) +- teams_account: add support for `suppress_footer` ([#1053](https://github.com/cloudflare/cloudflare-go/issues/1053)) + +BUG FIXES: + +- r2: fix create bucket endpoint ([#1035](https://github.com/cloudflare/cloudflare-go/issues/1035)) +- tunnel_configuration: Remove unnecessary double-unmarshalling due to changes in the API ([#1046](https://github.com/cloudflare/cloudflare-go/issues/1046)) + +## 0.47.1 (August 18th, 2022) + +BUG FIXES: + +- zonelockdown: add `Priority` to `ZoneLockdownCreateParams` and `ZoneLockdownUpdateParams` ([#1052](https://github.com/cloudflare/cloudflare-go/issues/1052)) + +## 0.47.0 (August 17th, 2022) + +BREAKING CHANGES: + +- certificate_packs: deprecate "custom" configuration for ACM everywhere ([#1032](https://github.com/cloudflare/cloudflare-go/issues/1032)) + +ENHANCEMENTS: + +- cloudflare: make it clear when the rate limit retries have been exhausted ([#1043](https://github.com/cloudflare/cloudflare-go/issues/1043)) +- email_routing_destination: Adds support for the email routing destination API ([#1034](https://github.com/cloudflare/cloudflare-go/issues/1034)) +- email_routing_rules: Adds support for the email routing rules API ([#1034](https://github.com/cloudflare/cloudflare-go/issues/1034)) +- email_routing_settings: Adds support for the email routing settings API ([#1034](https://github.com/cloudflare/cloudflare-go/issues/1034)) +- filter: fix double endpoint calls & moving towards common method signature ([#1016](https://github.com/cloudflare/cloudflare-go/issues/1016)) +- firewall_rule: fix double endpoint calls & moving towards common method signature ([#1016](https://github.com/cloudflare/cloudflare-go/issues/1016)) +- lockdown: automatically paginate `List` results unless `Page` and `PerPage` are provided ([#1017](https://github.com/cloudflare/cloudflare-go/issues/1017)) +- r2: Add in support for creating and deleting R2 buckets ([#1028](https://github.com/cloudflare/cloudflare-go/issues/1028)) +- rulesets: add support for `http_config_settings` phase and supporting actions ([#1036](https://github.com/cloudflare/cloudflare-go/issues/1036)) +- workers-account-settings: Add in support for Workers account settings API ([#1027](https://github.com/cloudflare/cloudflare-go/issues/1027)) +- workers-subdomain: Add in support Workers Subdomain API ([#1031](https://github.com/cloudflare/cloudflare-go/issues/1031)) +- workers-tail: Add in support for Workers tail API ([#1026](https://github.com/cloudflare/cloudflare-go/issues/1026)) +- workers: Add support for attaching a worker to a domain ([#1014](https://github.com/cloudflare/cloudflare-go/issues/1014)) +- workers: Add support to upload module workers ([#1010](https://github.com/cloudflare/cloudflare-go/issues/1010)) + +BUG FIXES: + +- email_routing_destination: Update API reference URLs ([#1038](https://github.com/cloudflare/cloudflare-go/issues/1038)) +- email_routing_rules: Update API reference URLs ([#1038](https://github.com/cloudflare/cloudflare-go/issues/1038)) +- email_routing_settings: Update API reference URLs ([#1038](https://github.com/cloudflare/cloudflare-go/issues/1038)) +- tunnel_routes: Fix not removing route when it contains virtual network ([#1030](https://github.com/cloudflare/cloudflare-go/issues/1030)) +- workers_test: Fix incorrect test from PR #1014 ([#1048](https://github.com/cloudflare/cloudflare-go/issues/1048)) +- workers_test: Use application/json mime-type in headers ([#1049](https://github.com/cloudflare/cloudflare-go/issues/1049)) + +DEPENDENCIES: + +- deps: bumps golang.org/x/tools/gopls from 0.9.3 to 0.9.4 ([#1044](https://github.com/cloudflare/cloudflare-go/issues/1044)) +- deps: bumps github.com/golangci/golangci-lint from 1.47.3 to 1.48.0 ([#1020](https://github.com/cloudflare/cloudflare-go/issues/1020)) +- deps: bumps github.com/urfave/cli/v2 from 2.11.1 to 2.11.2 ([#1042](https://github.com/cloudflare/cloudflare-go/issues/1042)) +- deps: bumps golang.org/x/tools/gopls from 0.9.1 to 0.9.2 ([#1037](https://github.com/cloudflare/cloudflare-go/issues/1037)) +- deps: bumps golang.org/x/tools/gopls from 0.9.2 to 0.9.3 ([#1039](https://github.com/cloudflare/cloudflare-go/issues/1039)) + +## 0.46.0 (3rd August, 2022) + +NOTES: + +- docs: add release notes ([#1001](https://github.com/cloudflare/cloudflare-go/issues/1001)) + +ENHANCEMENTS: + +- filter: automatically paginate `List` results unless `Page` and `PerPage` are provided ([#1004](https://github.com/cloudflare/cloudflare-go/issues/1004)) +- firewall_rule: automatically paginate `List` results unless `Page` and `PerPage` are provided ([#1004](https://github.com/cloudflare/cloudflare-go/issues/1004)) +- rulesets: add support for `http_custom_errors` phase ([#998](https://github.com/cloudflare/cloudflare-go/issues/998)) +- rulesets: add support for `serve_error` action ([#998](https://github.com/cloudflare/cloudflare-go/issues/998)) + +BUG FIXES: + +- access_application: fix inability to set bool values to false ([#1006](https://github.com/cloudflare/cloudflare-go/issues/1006)) +- rulesets: fix sni action parameter ([#1002](https://github.com/cloudflare/cloudflare-go/issues/1002)) + +DEPENDENCIES: + +- provider: bumps github.com/golangci/golangci-lint from 1.47.1 to 1.47.2 ([#1005](https://github.com/cloudflare/cloudflare-go/issues/1005)) +- provider: bumps github.com/golangci/golangci-lint from 1.47.2 to 1.47.3 ([#1008](https://github.com/cloudflare/cloudflare-go/issues/1008)) +- provider: bumps github.com/urfave/cli/v2 from 2.11.0 to 2.11.1 ([#1003](https://github.com/cloudflare/cloudflare-go/issues/1003)) + +## 0.45.0 (July 20th, 2022) diff --git a/vendor/github.com/cloudflare/cloudflare-go/CODE_OF_CONDUCT.md b/vendor/github.com/cloudflare/cloudflare-go/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..bfbc69d --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/CODE_OF_CONDUCT.md @@ -0,0 +1,77 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ggalow@cloudflare.com. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq + diff --git a/vendor/github.com/cloudflare/cloudflare-go/LICENSE b/vendor/github.com/cloudflare/cloudflare-go/LICENSE new file mode 100644 index 0000000..39e4ddc --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/LICENSE @@ -0,0 +1,26 @@ +Copyright (c) 2015-2022, Cloudflare. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation and/or +other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors +may be used to endorse or promote products derived from this software without +specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/cloudflare/cloudflare-go/README.md b/vendor/github.com/cloudflare/cloudflare-go/README.md new file mode 100644 index 0000000..dc93558 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/README.md @@ -0,0 +1,85 @@ +# cloudflare-go + +[![Go Reference](https://pkg.go.dev/badge/github.com/cloudflare/cloudflare-go.svg)](https://pkg.go.dev/github.com/cloudflare/cloudflare-go) +![Test](https://github.com/cloudflare/cloudflare-go/workflows/Test/badge.svg) +[![Go Report Card](https://goreportcard.com/badge/github.com/cloudflare/cloudflare-go?style=flat-square)](https://goreportcard.com/report/github.com/cloudflare/cloudflare-go) + +> **Note**: This library is under active development as we expand it to cover +> our (expanding!) API. Consider the public API of this package a little +> unstable as we work towards a v1.0. + +A Go library for interacting with +[Cloudflare's API v4](https://api.cloudflare.com/). This library allows you to: + +- Manage and automate changes to your DNS records within Cloudflare +- Manage and automate changes to your zones (domains) on Cloudflare, including + adding new zones to your account +- List and modify the status of WAF (Web Application Firewall) rules for your + zones +- Fetch Cloudflare's IP ranges for automating your firewall whitelisting + +A command-line client, [flarectl](cmd/flarectl), is also available as part of +this project. + +## Installation + +You need a working Go environment. We officially support only currently supported Go versions according to [Go project's release policy](https://go.dev/doc/devel/release#policy). + +``` +go get github.com/cloudflare/cloudflare-go +``` + +## Getting Started + +```go +package main + +import ( + "context" + "fmt" + "log" + "os" + + "github.com/cloudflare/cloudflare-go" +) + +func main() { + // Construct a new API object using a global API key + api, err := cloudflare.New(os.Getenv("CLOUDFLARE_API_KEY"), os.Getenv("CLOUDFLARE_API_EMAIL")) + // alternatively, you can use a scoped API token + // api, err := cloudflare.NewWithAPIToken(os.Getenv("CLOUDFLARE_API_TOKEN")) + if err != nil { + log.Fatal(err) + } + + // Most API calls require a Context + ctx := context.Background() + + // Fetch user details on the account + u, err := api.UserDetails(ctx) + if err != nil { + log.Fatal(err) + } + // Print user details + fmt.Println(u) +} +``` + +Also refer to the +[API documentation](https://pkg.go.dev/github.com/cloudflare/cloudflare-go) for +how to use this package in-depth. + +## Experimental improvements + +This library is starting to ship with experimental improvements that are not yet +ready for production but will be introduced before the next major version. See +[experimental README](/docs/experimental.md) for full details. + +## Contributing + +Pull Requests are welcome, but please open an issue (or comment in an existing +issue) to discuss any non-trivial changes before submitting code. + +## License + +BSD licensed. See the [LICENSE](LICENSE) file for details. diff --git a/vendor/github.com/cloudflare/cloudflare-go/access_application.go b/vendor/github.com/cloudflare/cloudflare-go/access_application.go new file mode 100644 index 0000000..def62c0 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/access_application.go @@ -0,0 +1,511 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// AccessApplicationType represents the application type. +type AccessApplicationType string + +// These constants represent all valid application types. +const ( + SelfHosted AccessApplicationType = "self_hosted" + SSH AccessApplicationType = "ssh" + VNC AccessApplicationType = "vnc" + Biso AccessApplicationType = "biso" + AppLauncher AccessApplicationType = "app_launcher" + Warp AccessApplicationType = "warp" + Bookmark AccessApplicationType = "bookmark" + Saas AccessApplicationType = "saas" +) + +// AccessApplication represents an Access application. +type AccessApplication struct { + GatewayRules []AccessApplicationGatewayRule `json:"gateway_rules,omitempty"` + AllowedIdps []string `json:"allowed_idps,omitempty"` + CustomDenyMessage string `json:"custom_deny_message,omitempty"` + LogoURL string `json:"logo_url,omitempty"` + AUD string `json:"aud,omitempty"` + Domain string `json:"domain"` + SelfHostedDomains []string `json:"self_hosted_domains"` + Type AccessApplicationType `json:"type,omitempty"` + SessionDuration string `json:"session_duration,omitempty"` + SameSiteCookieAttribute string `json:"same_site_cookie_attribute,omitempty"` + CustomDenyURL string `json:"custom_deny_url,omitempty"` + CustomNonIdentityDenyURL string `json:"custom_non_identity_deny_url,omitempty"` + Name string `json:"name"` + ID string `json:"id,omitempty"` + PrivateAddress string `json:"private_address"` + CorsHeaders *AccessApplicationCorsHeaders `json:"cors_headers,omitempty"` + CreatedAt *time.Time `json:"created_at,omitempty"` + UpdatedAt *time.Time `json:"updated_at,omitempty"` + SaasApplication *SaasApplication `json:"saas_app,omitempty"` + AutoRedirectToIdentity *bool `json:"auto_redirect_to_identity,omitempty"` + SkipInterstitial *bool `json:"skip_interstitial,omitempty"` + AppLauncherVisible *bool `json:"app_launcher_visible,omitempty"` + EnableBindingCookie *bool `json:"enable_binding_cookie,omitempty"` + HttpOnlyCookieAttribute *bool `json:"http_only_cookie_attribute,omitempty"` + ServiceAuth401Redirect *bool `json:"service_auth_401_redirect,omitempty"` + PathCookieAttribute *bool `json:"path_cookie_attribute,omitempty"` + AllowAuthenticateViaWarp *bool `json:"allow_authenticate_via_warp,omitempty"` + OptionsPreflightBypass *bool `json:"options_preflight_bypass,omitempty"` + CustomPages []string `json:"custom_pages,omitempty"` + Tags []string `json:"tags,omitempty"` + SCIMConfig *AccessApplicationSCIMConfig `json:"scim_config,omitempty"` + Policies []AccessPolicy `json:"policies,omitempty"` + AccessAppLauncherCustomization +} + +type AccessApplicationGatewayRule struct { + ID string `json:"id,omitempty"` +} + +// AccessApplicationCorsHeaders represents the CORS HTTP headers for an Access +// Application. +type AccessApplicationCorsHeaders struct { + AllowedMethods []string `json:"allowed_methods,omitempty"` + AllowedOrigins []string `json:"allowed_origins,omitempty"` + AllowedHeaders []string `json:"allowed_headers,omitempty"` + AllowAllMethods bool `json:"allow_all_methods,omitempty"` + AllowAllHeaders bool `json:"allow_all_headers,omitempty"` + AllowAllOrigins bool `json:"allow_all_origins,omitempty"` + AllowCredentials bool `json:"allow_credentials,omitempty"` + MaxAge int `json:"max_age,omitempty"` +} + +// AccessApplicationSCIMConfig represents the configuration for provisioning to an Access Application via SCIM. +type AccessApplicationSCIMConfig struct { + Enabled *bool `json:"enabled,omitempty"` + RemoteURI string `json:"remote_uri,omitempty"` + Authentication *AccessApplicationScimAuthenticationJson `json:"authentication,omitempty"` + IdPUID string `json:"idp_uid,omitempty"` + DeactivateOnDelete *bool `json:"deactivate_on_delete,omitempty"` + Mappings []*AccessApplicationScimMapping `json:"mappings,omitempty"` +} + +type AccessApplicationScimAuthenticationScheme string + +const ( + AccessApplicationScimAuthenticationSchemeHttpBasic AccessApplicationScimAuthenticationScheme = "httpbasic" + AccessApplicationScimAuthenticationSchemeOauthBearerToken AccessApplicationScimAuthenticationScheme = "oauthbearertoken" + AccessApplicationScimAuthenticationSchemeOauth2 AccessApplicationScimAuthenticationScheme = "oauth2" +) + +type AccessApplicationScimAuthenticationJson struct { + Value AccessApplicationScimAuthentication +} + +func (a *AccessApplicationScimAuthenticationJson) UnmarshalJSON(buf []byte) error { + var scheme baseScimAuthentication + if err := json.Unmarshal(buf, &scheme); err != nil { + return err + } + + switch scheme.Scheme { + case AccessApplicationScimAuthenticationSchemeHttpBasic: + a.Value = new(AccessApplicationScimAuthenticationHttpBasic) + case AccessApplicationScimAuthenticationSchemeOauthBearerToken: + a.Value = new(AccessApplicationScimAuthenticationOauthBearerToken) + case AccessApplicationScimAuthenticationSchemeOauth2: + a.Value = new(AccessApplicationScimAuthenticationOauth2) + default: + return errors.New("invalid authentication scheme") + } + + return json.Unmarshal(buf, a.Value) +} + +func (a *AccessApplicationScimAuthenticationJson) MarshalJSON() ([]byte, error) { + return json.Marshal(a.Value) +} + +type AccessApplicationScimAuthentication interface { + isScimAuthentication() +} + +type baseScimAuthentication struct { + Scheme AccessApplicationScimAuthenticationScheme `json:"scheme"` +} + +func (baseScimAuthentication) isScimAuthentication() {} + +type AccessApplicationScimAuthenticationHttpBasic struct { + baseScimAuthentication + User string `json:"user"` + Password string `json:"password"` +} + +type AccessApplicationScimAuthenticationOauthBearerToken struct { + baseScimAuthentication + Token string `json:"token"` +} + +type AccessApplicationScimAuthenticationOauth2 struct { + baseScimAuthentication + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + AuthorizationURL string `json:"authorization_url"` + TokenURL string `json:"token_url"` + Scopes []string `json:"scopes,omitempty"` +} + +type AccessApplicationScimMapping struct { + Schema string `json:"schema"` + Enabled *bool `json:"enabled,omitempty"` + Filter string `json:"filter,omitempty"` + TransformJsonata string `json:"transform_jsonata,omitempty"` + Operations *AccessApplicationScimMappingOperations `json:"operations,omitempty"` +} + +type AccessApplicationScimMappingOperations struct { + Create *bool `json:"create,omitempty"` + Update *bool `json:"update,omitempty"` + Delete *bool `json:"delete,omitempty"` +} + +// AccessApplicationListResponse represents the response from the list +// access applications endpoint. +type AccessApplicationListResponse struct { + Result []AccessApplication `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +// AccessApplicationDetailResponse is the API response, containing a single +// access application. +type AccessApplicationDetailResponse struct { + Success bool `json:"success"` + Errors []string `json:"errors"` + Messages []string `json:"messages"` + Result AccessApplication `json:"result"` +} + +type SourceConfig struct { + Name string `json:"name,omitempty"` + NameByIDP map[string]string `json:"name_by_idp,omitempty"` +} + +type SAMLAttributeConfig struct { + Name string `json:"name,omitempty"` + NameFormat string `json:"name_format,omitempty"` + FriendlyName string `json:"friendly_name,omitempty"` + Required bool `json:"required,omitempty"` + Source SourceConfig `json:"source"` +} + +type OIDCClaimConfig struct { + Name string `json:"name,omitempty"` + Source SourceConfig `json:"source"` + Required *bool `json:"required,omitempty"` + Scope string `json:"scope,omitempty"` +} + +type RefreshTokenOptions struct { + Lifetime string `json:"lifetime,omitempty"` +} + +type AccessApplicationHybridAndImplicitOptions struct { + ReturnIDTokenFromAuthorizationEndpoint *bool `json:"return_id_token_from_authorization_endpoint,omitempty"` + ReturnAccessTokenFromAuthorizationEndpoint *bool `json:"return_access_token_from_authorization_endpoint,omitempty"` +} + +type SaasApplication struct { + // Items common to both SAML and OIDC + AppID string `json:"app_id,omitempty"` + UpdatedAt *time.Time `json:"updated_at,omitempty"` + CreatedAt *time.Time `json:"created_at,omitempty"` + PublicKey string `json:"public_key,omitempty"` + AuthType string `json:"auth_type,omitempty"` + + // SAML saas app + ConsumerServiceUrl string `json:"consumer_service_url,omitempty"` + SPEntityID string `json:"sp_entity_id,omitempty"` + IDPEntityID string `json:"idp_entity_id,omitempty"` + NameIDFormat string `json:"name_id_format,omitempty"` + SSOEndpoint string `json:"sso_endpoint,omitempty"` + DefaultRelayState string `json:"default_relay_state,omitempty"` + CustomAttributes *[]SAMLAttributeConfig `json:"custom_attributes"` + NameIDTransformJsonata string `json:"name_id_transform_jsonata,omitempty"` + SamlAttributeTransformJsonata string `json:"saml_attribute_transform_jsonata"` + + // OIDC saas app + ClientID string `json:"client_id,omitempty"` + ClientSecret string `json:"client_secret,omitempty"` + RedirectURIs []string `json:"redirect_uris,omitempty"` + GrantTypes []string `json:"grant_types,omitempty"` + Scopes []string `json:"scopes,omitempty"` + AppLauncherURL string `json:"app_launcher_url,omitempty"` + GroupFilterRegex string `json:"group_filter_regex,omitempty"` + CustomClaims *[]OIDCClaimConfig `json:"custom_claims"` + AllowPKCEWithoutClientSecret *bool `json:"allow_pkce_without_client_secret,omitempty"` + RefreshTokenOptions *RefreshTokenOptions `json:"refresh_token_options,omitempty"` + HybridAndImplicitOptions *AccessApplicationHybridAndImplicitOptions `json:"hybrid_and_implicit_options,omitempty"` + AccessTokenLifetime string `json:"access_token_lifetime,omitempty"` +} + +type AccessAppLauncherCustomization struct { + LandingPageDesign AccessLandingPageDesign `json:"landing_page_design"` + LogoURL string `json:"app_launcher_logo_url"` + HeaderBackgroundColor string `json:"header_bg_color"` + BackgroundColor string `json:"bg_color"` + FooterLinks []AccessFooterLink `json:"footer_links"` + SkipAppLauncherLoginPage *bool `json:"skip_app_launcher_login_page,omitempty"` +} + +type AccessFooterLink struct { + Name string `json:"name"` + URL string `json:"url"` +} + +type AccessLandingPageDesign struct { + Title string `json:"title"` + Message string `json:"message"` + ImageURL string `json:"image_url"` + ButtonColor string `json:"button_color"` + ButtonTextColor string `json:"button_text_color"` +} + +type ListAccessApplicationsParams struct { + ResultInfo +} + +type CreateAccessApplicationParams struct { + AllowedIdps []string `json:"allowed_idps,omitempty"` + AppLauncherVisible *bool `json:"app_launcher_visible,omitempty"` + AUD string `json:"aud,omitempty"` + AutoRedirectToIdentity *bool `json:"auto_redirect_to_identity,omitempty"` + CorsHeaders *AccessApplicationCorsHeaders `json:"cors_headers,omitempty"` + CustomDenyMessage string `json:"custom_deny_message,omitempty"` + CustomDenyURL string `json:"custom_deny_url,omitempty"` + CustomNonIdentityDenyURL string `json:"custom_non_identity_deny_url,omitempty"` + Domain string `json:"domain"` + EnableBindingCookie *bool `json:"enable_binding_cookie,omitempty"` + GatewayRules []AccessApplicationGatewayRule `json:"gateway_rules,omitempty"` + HttpOnlyCookieAttribute *bool `json:"http_only_cookie_attribute,omitempty"` + LogoURL string `json:"logo_url,omitempty"` + Name string `json:"name"` + PathCookieAttribute *bool `json:"path_cookie_attribute,omitempty"` + PrivateAddress string `json:"private_address"` + SaasApplication *SaasApplication `json:"saas_app,omitempty"` + SameSiteCookieAttribute string `json:"same_site_cookie_attribute,omitempty"` + SelfHostedDomains []string `json:"self_hosted_domains"` + ServiceAuth401Redirect *bool `json:"service_auth_401_redirect,omitempty"` + SessionDuration string `json:"session_duration,omitempty"` + SkipInterstitial *bool `json:"skip_interstitial,omitempty"` + OptionsPreflightBypass *bool `json:"options_preflight_bypass,omitempty"` + Type AccessApplicationType `json:"type,omitempty"` + AllowAuthenticateViaWarp *bool `json:"allow_authenticate_via_warp,omitempty"` + CustomPages []string `json:"custom_pages,omitempty"` + Tags []string `json:"tags,omitempty"` + SCIMConfig *AccessApplicationSCIMConfig `json:"scim_config,omitempty"` + // List of policy ids to link to this application in ascending order of precedence. + Policies []string `json:"policies,omitempty"` + AccessAppLauncherCustomization +} + +type UpdateAccessApplicationParams struct { + ID string `json:"id,omitempty"` + AllowedIdps []string `json:"allowed_idps,omitempty"` + AppLauncherVisible *bool `json:"app_launcher_visible,omitempty"` + AUD string `json:"aud,omitempty"` + AutoRedirectToIdentity *bool `json:"auto_redirect_to_identity,omitempty"` + CorsHeaders *AccessApplicationCorsHeaders `json:"cors_headers,omitempty"` + CustomDenyMessage string `json:"custom_deny_message,omitempty"` + CustomDenyURL string `json:"custom_deny_url,omitempty"` + CustomNonIdentityDenyURL string `json:"custom_non_identity_deny_url,omitempty"` + Domain string `json:"domain"` + EnableBindingCookie *bool `json:"enable_binding_cookie,omitempty"` + GatewayRules []AccessApplicationGatewayRule `json:"gateway_rules,omitempty"` + HttpOnlyCookieAttribute *bool `json:"http_only_cookie_attribute,omitempty"` + LogoURL string `json:"logo_url,omitempty"` + Name string `json:"name"` + PathCookieAttribute *bool `json:"path_cookie_attribute,omitempty"` + PrivateAddress string `json:"private_address"` + SaasApplication *SaasApplication `json:"saas_app,omitempty"` + SameSiteCookieAttribute string `json:"same_site_cookie_attribute,omitempty"` + SelfHostedDomains []string `json:"self_hosted_domains"` + ServiceAuth401Redirect *bool `json:"service_auth_401_redirect,omitempty"` + SessionDuration string `json:"session_duration,omitempty"` + SkipInterstitial *bool `json:"skip_interstitial,omitempty"` + Type AccessApplicationType `json:"type,omitempty"` + AllowAuthenticateViaWarp *bool `json:"allow_authenticate_via_warp,omitempty"` + OptionsPreflightBypass *bool `json:"options_preflight_bypass,omitempty"` + CustomPages []string `json:"custom_pages,omitempty"` + Tags []string `json:"tags,omitempty"` + SCIMConfig *AccessApplicationSCIMConfig `json:"scim_config,omitempty"` + // List of policy ids to link to this application in ascending order of precedence. + // Can reference reusable policies and policies specific to this application. + // If this field is not provided, the existing policies will not be modified. + Policies *[]string `json:"policies,omitempty"` + AccessAppLauncherCustomization +} + +// ListAccessApplications returns all applications within an account or zone. +// +// Account API reference: https://developers.cloudflare.com/api/operations/access-applications-list-access-applications +// Zone API reference: https://developers.cloudflare.com/api/operations/zone-level-access-applications-list-access-applications +func (api *API) ListAccessApplications(ctx context.Context, rc *ResourceContainer, params ListAccessApplicationsParams) ([]AccessApplication, *ResultInfo, error) { + baseURL := fmt.Sprintf("/%s/%s/access/apps", rc.Level, rc.Identifier) + + autoPaginate := true + if params.PerPage >= 1 || params.Page >= 1 { + autoPaginate = false + } + + if params.PerPage < 1 { + params.PerPage = 25 + } + + if params.Page < 1 { + params.Page = 1 + } + + var applications []AccessApplication + var r AccessApplicationListResponse + + for { + uri := buildURI(baseURL, params) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []AccessApplication{}, &ResultInfo{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + err = json.Unmarshal(res, &r) + if err != nil { + return []AccessApplication{}, &ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + applications = append(applications, r.Result...) + params.ResultInfo = r.ResultInfo.Next() + if params.ResultInfo.Done() || !autoPaginate { + break + } + } + + return applications, &r.ResultInfo, nil +} + +// GetAccessApplication returns a single application based on the application +// ID for either account or zone. +// +// Account API reference: https://developers.cloudflare.com/api/operations/access-applications-get-an-access-application +// Zone API reference: https://developers.cloudflare.com/api/operations/zone-level-access-applications-get-an-access-application +func (api *API) GetAccessApplication(ctx context.Context, rc *ResourceContainer, applicationID string) (AccessApplication, error) { + uri := fmt.Sprintf( + "/%s/%s/access/apps/%s", + rc.Level, + rc.Identifier, + applicationID, + ) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return AccessApplication{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var accessApplicationDetailResponse AccessApplicationDetailResponse + err = json.Unmarshal(res, &accessApplicationDetailResponse) + if err != nil { + return AccessApplication{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessApplicationDetailResponse.Result, nil +} + +// CreateAccessApplication creates a new access application. +// +// Account API reference: https://developers.cloudflare.com/api/operations/access-applications-add-an-application +// Zone API reference: https://developers.cloudflare.com/api/operations/zone-level-access-applications-add-a-bookmark-application +func (api *API) CreateAccessApplication(ctx context.Context, rc *ResourceContainer, params CreateAccessApplicationParams) (AccessApplication, error) { + uri := fmt.Sprintf("/%s/%s/access/apps", rc.Level, rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return AccessApplication{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var accessApplicationDetailResponse AccessApplicationDetailResponse + err = json.Unmarshal(res, &accessApplicationDetailResponse) + if err != nil { + return AccessApplication{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessApplicationDetailResponse.Result, nil +} + +// UpdateAccessApplication updates an existing access application. +// +// Account API reference: https://developers.cloudflare.com/api/operations/access-applications-update-a-bookmark-application +// Zone API reference: https://developers.cloudflare.com/api/operations/zone-level-access-applications-update-a-bookmark-application +func (api *API) UpdateAccessApplication(ctx context.Context, rc *ResourceContainer, params UpdateAccessApplicationParams) (AccessApplication, error) { + if params.ID == "" { + return AccessApplication{}, fmt.Errorf("access application ID cannot be empty") + } + + uri := fmt.Sprintf( + "/%s/%s/access/apps/%s", + rc.Level, + rc.Identifier, + params.ID, + ) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return AccessApplication{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var accessApplicationDetailResponse AccessApplicationDetailResponse + err = json.Unmarshal(res, &accessApplicationDetailResponse) + if err != nil { + return AccessApplication{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessApplicationDetailResponse.Result, nil +} + +// DeleteAccessApplication deletes an access application. +// +// Account API reference: https://developers.cloudflare.com/api/operations/access-applications-delete-an-access-application +// Zone API reference: https://developers.cloudflare.com/api/operations/zone-level-access-applications-delete-an-access-application +func (api *API) DeleteAccessApplication(ctx context.Context, rc *ResourceContainer, applicationID string) error { + uri := fmt.Sprintf( + "/%s/%s/access/apps/%s", + rc.Level, + rc.Identifier, + applicationID, + ) + + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + return nil +} + +// RevokeAccessApplicationTokens revokes tokens associated with an +// access application. +// +// Account API reference: https://developers.cloudflare.com/api/operations/access-applications-revoke-service-tokens +// Zone API reference: https://developers.cloudflare.com/api/operations/zone-level-access-applications-revoke-service-tokens +func (api *API) RevokeAccessApplicationTokens(ctx context.Context, rc *ResourceContainer, applicationID string) error { + uri := fmt.Sprintf( + "/%s/%s/access/apps/%s/revoke-tokens", + rc.Level, + rc.Identifier, + applicationID, + ) + + _, err := api.makeRequestContext(ctx, http.MethodPost, uri, nil) + if err != nil { + return fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/access_audit_log.go b/vendor/github.com/cloudflare/cloudflare-go/access_audit_log.go new file mode 100644 index 0000000..62658e3 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/access_audit_log.go @@ -0,0 +1,86 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/goccy/go-json" +) + +// AccessAuditLogRecord is the structure of a single Access Audit Log entry. +type AccessAuditLogRecord struct { + UserEmail string `json:"user_email"` + IPAddress string `json:"ip_address"` + AppUID string `json:"app_uid"` + AppDomain string `json:"app_domain"` + Action string `json:"action"` + Connection string `json:"connection"` + Allowed bool `json:"allowed"` + CreatedAt *time.Time `json:"created_at"` + RayID string `json:"ray_id"` +} + +// AccessAuditLogListResponse represents the response from the list +// access applications endpoint. +type AccessAuditLogListResponse struct { + Result []AccessAuditLogRecord `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +// AccessAuditLogFilterOptions provides the structure of available audit log +// filters. +type AccessAuditLogFilterOptions struct { + Direction string + Since *time.Time + Until *time.Time + Limit int +} + +// AccessAuditLogs retrieves all audit logs for the Access service. +// +// API reference: https://api.cloudflare.com/#access-requests-access-requests-audit +func (api *API) AccessAuditLogs(ctx context.Context, accountID string, opts AccessAuditLogFilterOptions) ([]AccessAuditLogRecord, error) { + uri := fmt.Sprintf("/accounts/%s/access/logs/access-requests?%s", accountID, opts.Encode()) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []AccessAuditLogRecord{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var accessAuditLogListResponse AccessAuditLogListResponse + err = json.Unmarshal(res, &accessAuditLogListResponse) + if err != nil { + return []AccessAuditLogRecord{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessAuditLogListResponse.Result, nil +} + +// Encode is a custom method for encoding the filter options into a usable HTTP +// query parameter string. +func (a AccessAuditLogFilterOptions) Encode() string { + v := url.Values{} + + if a.Direction != "" { + v.Set("direction", a.Direction) + } + + if a.Limit > 0 { + v.Set("limit", strconv.Itoa(a.Limit)) + } + + if a.Since != nil { + v.Set("since", a.Since.Format(time.RFC3339)) + } + + if a.Until != nil { + v.Set("until", a.Until.Format(time.RFC3339)) + } + + return v.Encode() +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/access_bookmark.go b/vendor/github.com/cloudflare/cloudflare-go/access_bookmark.go new file mode 100644 index 0000000..769c861 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/access_bookmark.go @@ -0,0 +1,206 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// AccessBookmark represents an Access bookmark application. +type AccessBookmark struct { + ID string `json:"id,omitempty"` + Domain string `json:"domain"` + Name string `json:"name"` + LogoURL string `json:"logo_url,omitempty"` + AppLauncherVisible *bool `json:"app_launcher_visible,omitempty"` + CreatedAt *time.Time `json:"created_at,omitempty"` + UpdatedAt *time.Time `json:"updated_at,omitempty"` +} + +// AccessBookmarkListResponse represents the response from the list +// access bookmarks endpoint. +type AccessBookmarkListResponse struct { + Result []AccessBookmark `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +// AccessBookmarkDetailResponse is the API response, containing a single +// access bookmark. +type AccessBookmarkDetailResponse struct { + Response + Result AccessBookmark `json:"result"` +} + +// AccessBookmarks returns all bookmarks within an account. +// +// API reference: https://api.cloudflare.com/#access-bookmarks-list-access-bookmarks +func (api *API) AccessBookmarks(ctx context.Context, accountID string, pageOpts PaginationOptions) ([]AccessBookmark, ResultInfo, error) { + return api.accessBookmarks(ctx, accountID, pageOpts, AccountRouteRoot) +} + +// ZoneLevelAccessBookmarks returns all bookmarks within a zone. +// +// API reference: https://api.cloudflare.com/#zone-level-access-bookmarks-list-access-bookmarks +func (api *API) ZoneLevelAccessBookmarks(ctx context.Context, zoneID string, pageOpts PaginationOptions) ([]AccessBookmark, ResultInfo, error) { + return api.accessBookmarks(ctx, zoneID, pageOpts, ZoneRouteRoot) +} + +func (api *API) accessBookmarks(ctx context.Context, id string, pageOpts PaginationOptions, routeRoot RouteRoot) ([]AccessBookmark, ResultInfo, error) { + uri := buildURI(fmt.Sprintf("/%s/%s/access/bookmarks", routeRoot, id), pageOpts) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []AccessBookmark{}, ResultInfo{}, err + } + + var accessBookmarkListResponse AccessBookmarkListResponse + err = json.Unmarshal(res, &accessBookmarkListResponse) + if err != nil { + return []AccessBookmark{}, ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessBookmarkListResponse.Result, accessBookmarkListResponse.ResultInfo, nil +} + +// AccessBookmark returns a single bookmark based on the +// bookmark ID. +// +// API reference: https://api.cloudflare.com/#access-bookmarks-access-bookmarks-details +func (api *API) AccessBookmark(ctx context.Context, accountID, bookmarkID string) (AccessBookmark, error) { + return api.accessBookmark(ctx, accountID, bookmarkID, AccountRouteRoot) +} + +// ZoneLevelAccessBookmark returns a single zone level bookmark based on the +// bookmark ID. +// +// API reference: https://api.cloudflare.com/#zone-level-access-bookmarks-access-bookmarks-details +func (api *API) ZoneLevelAccessBookmark(ctx context.Context, zoneID, bookmarkID string) (AccessBookmark, error) { + return api.accessBookmark(ctx, zoneID, bookmarkID, ZoneRouteRoot) +} + +func (api *API) accessBookmark(ctx context.Context, id, bookmarkID string, routeRoot RouteRoot) (AccessBookmark, error) { + uri := fmt.Sprintf( + "/%s/%s/access/bookmarks/%s", + routeRoot, + id, + bookmarkID, + ) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return AccessBookmark{}, err + } + + var accessBookmarkDetailResponse AccessBookmarkDetailResponse + err = json.Unmarshal(res, &accessBookmarkDetailResponse) + if err != nil { + return AccessBookmark{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessBookmarkDetailResponse.Result, nil +} + +// CreateAccessBookmark creates a new access bookmark. +// +// API reference: https://api.cloudflare.com/#access-bookmarks-create-access-bookmark +func (api *API) CreateAccessBookmark(ctx context.Context, accountID string, accessBookmark AccessBookmark) (AccessBookmark, error) { + return api.createAccessBookmark(ctx, accountID, accessBookmark, AccountRouteRoot) +} + +// CreateZoneLevelAccessBookmark creates a new zone level access bookmark. +// +// API reference: https://api.cloudflare.com/#zone-level-access-bookmarks-create-access-bookmark +func (api *API) CreateZoneLevelAccessBookmark(ctx context.Context, zoneID string, accessBookmark AccessBookmark) (AccessBookmark, error) { + return api.createAccessBookmark(ctx, zoneID, accessBookmark, ZoneRouteRoot) +} + +func (api *API) createAccessBookmark(ctx context.Context, id string, accessBookmark AccessBookmark, routeRoot RouteRoot) (AccessBookmark, error) { + uri := fmt.Sprintf("/%s/%s/access/bookmarks", routeRoot, id) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, accessBookmark) + if err != nil { + return AccessBookmark{}, err + } + + var accessBookmarkDetailResponse AccessBookmarkDetailResponse + err = json.Unmarshal(res, &accessBookmarkDetailResponse) + if err != nil { + return AccessBookmark{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessBookmarkDetailResponse.Result, nil +} + +// UpdateAccessBookmark updates an existing access bookmark. +// +// API reference: https://api.cloudflare.com/#access-bookmarks-update-access-bookmark +func (api *API) UpdateAccessBookmark(ctx context.Context, accountID string, accessBookmark AccessBookmark) (AccessBookmark, error) { + return api.updateAccessBookmark(ctx, accountID, accessBookmark, AccountRouteRoot) +} + +// UpdateZoneLevelAccessBookmark updates an existing zone level access bookmark. +// +// API reference: https://api.cloudflare.com/#zone-level-access-bookmarks-update-access-bookmark +func (api *API) UpdateZoneLevelAccessBookmark(ctx context.Context, zoneID string, accessBookmark AccessBookmark) (AccessBookmark, error) { + return api.updateAccessBookmark(ctx, zoneID, accessBookmark, ZoneRouteRoot) +} + +func (api *API) updateAccessBookmark(ctx context.Context, id string, accessBookmark AccessBookmark, routeRoot RouteRoot) (AccessBookmark, error) { + if accessBookmark.ID == "" { + return AccessBookmark{}, fmt.Errorf("access bookmark ID cannot be empty") + } + + uri := fmt.Sprintf( + "/%s/%s/access/bookmarks/%s", + routeRoot, + id, + accessBookmark.ID, + ) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, accessBookmark) + if err != nil { + return AccessBookmark{}, err + } + + var accessBookmarkDetailResponse AccessBookmarkDetailResponse + err = json.Unmarshal(res, &accessBookmarkDetailResponse) + if err != nil { + return AccessBookmark{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessBookmarkDetailResponse.Result, nil +} + +// DeleteAccessBookmark deletes an access bookmark. +// +// API reference: https://api.cloudflare.com/#access-bookmarks-delete-access-bookmark +func (api *API) DeleteAccessBookmark(ctx context.Context, accountID, bookmarkID string) error { + return api.deleteAccessBookmark(ctx, accountID, bookmarkID, AccountRouteRoot) +} + +// DeleteZoneLevelAccessBookmark deletes a zone level access bookmark. +// +// API reference: https://api.cloudflare.com/#zone-level-access-bookmarks-delete-access-bookmark +func (api *API) DeleteZoneLevelAccessBookmark(ctx context.Context, zoneID, bookmarkID string) error { + return api.deleteAccessBookmark(ctx, zoneID, bookmarkID, ZoneRouteRoot) +} + +func (api *API) deleteAccessBookmark(ctx context.Context, id, bookmarkID string, routeRoot RouteRoot) error { + uri := fmt.Sprintf( + "/%s/%s/access/bookmarks/%s", + routeRoot, + id, + bookmarkID, + ) + + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/access_ca_certificate.go b/vendor/github.com/cloudflare/cloudflare-go/access_ca_certificate.go new file mode 100644 index 0000000..8f16787 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/access_ca_certificate.go @@ -0,0 +1,153 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +// AccessCACertificate is the structure of the CA certificate used for +// short-lived certificates. +type AccessCACertificate struct { + ID string `json:"id"` + Aud string `json:"aud"` + PublicKey string `json:"public_key"` +} + +// AccessCACertificateListResponse represents the response of all CA +// certificates within Access. +type AccessCACertificateListResponse struct { + Response + Result []AccessCACertificate `json:"result"` + ResultInfo +} + +// AccessCACertificateResponse represents the response of a single CA +// certificate. +type AccessCACertificateResponse struct { + Response + Result AccessCACertificate `json:"result"` +} + +type ListAccessCACertificatesParams struct { + ResultInfo +} + +type CreateAccessCACertificateParams struct { + ApplicationID string +} + +// ListAccessCACertificates returns all AccessCACertificate within Access. +// +// Account API reference: https://developers.cloudflare.com/api/operations/access-short-lived-certificate-c-as-list-short-lived-certificate-c-as +// Zone API reference: https://developers.cloudflare.com/api/operations/zone-level-access-short-lived-certificate-c-as-list-short-lived-certificate-c-as +func (api *API) ListAccessCACertificates(ctx context.Context, rc *ResourceContainer, params ListAccessCACertificatesParams) ([]AccessCACertificate, *ResultInfo, error) { + baseURL := fmt.Sprintf("/%s/%s/access/apps/ca", rc.Level, rc.Identifier) + + autoPaginate := true + if params.PerPage >= 1 || params.Page >= 1 { + autoPaginate = false + } + + if params.PerPage < 1 { + params.PerPage = 25 + } + + if params.Page < 1 { + params.Page = 1 + } + + var accessCACertificates []AccessCACertificate + var r AccessCACertificateListResponse + + for { + uri := buildURI(baseURL, params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []AccessCACertificate{}, &ResultInfo{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + err = json.Unmarshal(res, &r) + if err != nil { + return []AccessCACertificate{}, &ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + accessCACertificates = append(accessCACertificates, r.Result...) + params.ResultInfo = r.ResultInfo.Next() + if params.ResultInfo.Done() || !autoPaginate { + break + } + } + + return accessCACertificates, &r.ResultInfo, nil +} + +// GetAccessCACertificate returns a single CA certificate associated within +// Access. +// +// Account API reference: https://developers.cloudflare.com/api/operations/access-short-lived-certificate-c-as-get-a-short-lived-certificate-ca +// Zone API reference: https://developers.cloudflare.com/api/operations/zone-level-access-short-lived-certificate-c-as-get-a-short-lived-certificate-ca +func (api *API) GetAccessCACertificate(ctx context.Context, rc *ResourceContainer, applicationID string) (AccessCACertificate, error) { + uri := fmt.Sprintf("/%s/%s/access/apps/%s/ca", rc.Level, rc.Identifier, applicationID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return AccessCACertificate{}, err + } + + var accessCAResponse AccessCACertificateResponse + err = json.Unmarshal(res, &accessCAResponse) + if err != nil { + return AccessCACertificate{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessCAResponse.Result, nil +} + +// CreateAccessCACertificate creates a new CA certificate for an AccessApplication. +// +// Account API reference: https://developers.cloudflare.com/api/operations/access-short-lived-certificate-c-as-create-a-short-lived-certificate-ca +// Zone API reference: https://developers.cloudflare.com/api/operations/zone-level-access-short-lived-certificate-c-as-create-a-short-lived-certificate-ca +func (api *API) CreateAccessCACertificate(ctx context.Context, rc *ResourceContainer, params CreateAccessCACertificateParams) (AccessCACertificate, error) { + uri := fmt.Sprintf( + "/%s/%s/access/apps/%s/ca", + rc.Level, + rc.Identifier, + params.ApplicationID, + ) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, nil) + if err != nil { + return AccessCACertificate{}, err + } + + var accessCACertificate AccessCACertificateResponse + err = json.Unmarshal(res, &accessCACertificate) + if err != nil { + return AccessCACertificate{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessCACertificate.Result, nil +} + +// DeleteAccessCACertificate deletes an Access CA certificate on a defined +// AccessApplication. +// +// Account API reference: https://developers.cloudflare.com/api/operations/access-short-lived-certificate-c-as-delete-a-short-lived-certificate-ca +// Zone API reference: https://developers.cloudflare.com/api/operations/zone-level-access-short-lived-certificate-c-as-delete-a-short-lived-certificate-ca +func (api *API) DeleteAccessCACertificate(ctx context.Context, rc *ResourceContainer, applicationID string) error { + uri := fmt.Sprintf( + "/%s/%s/access/apps/%s/ca", + rc.Level, + rc.Identifier, + applicationID, + ) + + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/access_custom_page.go b/vendor/github.com/cloudflare/cloudflare-go/access_custom_page.go new file mode 100644 index 0000000..cb7e2cc --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/access_custom_page.go @@ -0,0 +1,127 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +var ErrMissingUID = errors.New("required UID missing") + +type AccessCustomPageType string + +const ( + Forbidden AccessCustomPageType = "forbidden" + IdentityDenied AccessCustomPageType = "identity_denied" +) + +type AccessCustomPage struct { + // The HTML content of the custom page. + CustomHTML string `json:"custom_html,omitempty"` + Name string `json:"name,omitempty"` + AppCount int `json:"app_count,omitempty"` + Type AccessCustomPageType `json:"type,omitempty"` + UID string `json:"uid,omitempty"` +} + +type AccessCustomPageListResponse struct { + Response + Result []AccessCustomPage `json:"result"` + ResultInfo `json:"result_info"` +} + +type AccessCustomPageResponse struct { + Response + Result AccessCustomPage `json:"result"` +} + +type ListAccessCustomPagesParams struct{} + +type CreateAccessCustomPageParams struct { + CustomHTML string `json:"custom_html,omitempty"` + Name string `json:"name,omitempty"` + Type AccessCustomPageType `json:"type,omitempty"` +} + +type UpdateAccessCustomPageParams struct { + CustomHTML string `json:"custom_html,omitempty"` + Name string `json:"name,omitempty"` + Type AccessCustomPageType `json:"type,omitempty"` + UID string `json:"uid,omitempty"` +} + +func (api *API) ListAccessCustomPages(ctx context.Context, rc *ResourceContainer, params ListAccessCustomPagesParams) ([]AccessCustomPage, error) { + uri := buildURI(fmt.Sprintf("/%s/%s/access/custom_pages", rc.Level, rc.Identifier), params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []AccessCustomPage{}, err + } + + var customPagesResponse AccessCustomPageListResponse + err = json.Unmarshal(res, &customPagesResponse) + if err != nil { + return []AccessCustomPage{}, err + } + return customPagesResponse.Result, nil +} + +func (api *API) GetAccessCustomPage(ctx context.Context, rc *ResourceContainer, id string) (AccessCustomPage, error) { + uri := fmt.Sprintf("/%s/%s/access/custom_pages/%s", rc.Level, rc.Identifier, id) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return AccessCustomPage{}, err + } + + var customPageResponse AccessCustomPageResponse + err = json.Unmarshal(res, &customPageResponse) + if err != nil { + return AccessCustomPage{}, err + } + return customPageResponse.Result, nil +} + +func (api *API) CreateAccessCustomPage(ctx context.Context, rc *ResourceContainer, params CreateAccessCustomPageParams) (AccessCustomPage, error) { + uri := fmt.Sprintf("/%s/%s/access/custom_pages", rc.Level, rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return AccessCustomPage{}, err + } + + var customPageResponse AccessCustomPageResponse + err = json.Unmarshal(res, &customPageResponse) + if err != nil { + return AccessCustomPage{}, err + } + return customPageResponse.Result, nil +} + +func (api *API) DeleteAccessCustomPage(ctx context.Context, rc *ResourceContainer, id string) error { + uri := fmt.Sprintf("/%s/%s/access/custom_pages/%s", rc.Level, rc.Identifier, id) + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + return nil +} + +func (api *API) UpdateAccessCustomPage(ctx context.Context, rc *ResourceContainer, params UpdateAccessCustomPageParams) (AccessCustomPage, error) { + if params.UID == "" { + return AccessCustomPage{}, ErrMissingUID + } + + uri := fmt.Sprintf("/%s/%s/access/custom_pages/%s", rc.Level, rc.Identifier, params.UID) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return AccessCustomPage{}, err + } + + var customPageResponse AccessCustomPageResponse + err = json.Unmarshal(res, &customPageResponse) + if err != nil { + return AccessCustomPage{}, err + } + return customPageResponse.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/access_group.go b/vendor/github.com/cloudflare/cloudflare-go/access_group.go new file mode 100644 index 0000000..a521db8 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/access_group.go @@ -0,0 +1,405 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// AccessGroup defines a group for allowing or disallowing access to +// one or more Access applications. +type AccessGroup struct { + ID string `json:"id,omitempty"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + Name string `json:"name"` + + // The include group works like an OR logical operator. The user must + // satisfy one of the rules. + Include []interface{} `json:"include"` + + // The exclude group works like a NOT logical operator. The user must + // not satisfy all the rules in exclude. + Exclude []interface{} `json:"exclude"` + + // The require group works like a AND logical operator. The user must + // satisfy all the rules in require. + Require []interface{} `json:"require"` +} + +// AccessGroupEmail is used for managing access based on the email. +// For example, restrict access to users with the email addresses +// `test@example.com` or `someone@example.com`. +type AccessGroupEmail struct { + Email struct { + Email string `json:"email"` + } `json:"email"` +} + +// AccessGroupEmailList is used for managing access based on the email +// list. For example, restrict access to users with the email addresses +// in the email list with the ID `1234567890abcdef1234567890abcdef`. +type AccessGroupEmailList struct { + EmailList struct { + ID string `json:"id"` + } `json:"email_list"` +} + +// AccessGroupEmailDomain is used for managing access based on an email +// domain such as `example.com` instead of individual addresses. +type AccessGroupEmailDomain struct { + EmailDomain struct { + Domain string `json:"domain"` + } `json:"email_domain"` +} + +// AccessGroupIP is used for managing access based in the IP. It +// accepts individual IPs or CIDRs. +type AccessGroupIP struct { + IP struct { + IP string `json:"ip"` + } `json:"ip"` +} + +// AccessGroupGeo is used for managing access based on the country code. +type AccessGroupGeo struct { + Geo struct { + CountryCode string `json:"country_code"` + } `json:"geo"` +} + +// AccessGroupEveryone is used for managing access to everyone. +type AccessGroupEveryone struct { + Everyone struct{} `json:"everyone"` +} + +// AccessGroupServiceToken is used for managing access based on a specific +// service token. +type AccessGroupServiceToken struct { + ServiceToken struct { + ID string `json:"token_id"` + } `json:"service_token"` +} + +// AccessGroupAnyValidServiceToken is used for managing access for all valid +// service tokens (not restricted). +type AccessGroupAnyValidServiceToken struct { + AnyValidServiceToken struct{} `json:"any_valid_service_token"` +} + +// AccessGroupAccessGroup is used for managing access based on an +// access group. +type AccessGroupAccessGroup struct { + Group struct { + ID string `json:"id"` + } `json:"group"` +} + +// AccessGroupCertificate is used for managing access to based on a valid +// mTLS certificate being presented. +type AccessGroupCertificate struct { + Certificate struct{} `json:"certificate"` +} + +// AccessGroupCertificateCommonName is used for managing access based on a +// common name within a certificate. +type AccessGroupCertificateCommonName struct { + CommonName struct { + CommonName string `json:"common_name"` + } `json:"common_name"` +} + +// AccessGroupExternalEvaluation is used for passing user identity to an external url. +type AccessGroupExternalEvaluation struct { + ExternalEvaluation struct { + EvaluateURL string `json:"evaluate_url"` + KeysURL string `json:"keys_url"` + } `json:"external_evaluation"` +} + +// AccessGroupGSuite is used to configure access based on GSuite group. +type AccessGroupGSuite struct { + Gsuite struct { + Email string `json:"email"` + IdentityProviderID string `json:"identity_provider_id"` + } `json:"gsuite"` +} + +// AccessGroupGitHub is used to configure access based on a GitHub organisation. +type AccessGroupGitHub struct { + GitHubOrganization struct { + Name string `json:"name"` + Team string `json:"team,omitempty"` + IdentityProviderID string `json:"identity_provider_id"` + } `json:"github-organization"` +} + +// AccessGroupAzure is used to configure access based on a Azure group. +type AccessGroupAzure struct { + AzureAD struct { + ID string `json:"id"` + IdentityProviderID string `json:"identity_provider_id"` + } `json:"azureAD"` +} + +// AccessGroupOkta is used to configure access based on a Okta group. +type AccessGroupOkta struct { + Okta struct { + Name string `json:"name"` + IdentityProviderID string `json:"identity_provider_id"` + } `json:"okta"` +} + +// AccessGroupSAML is used to allow SAML users with a specific attribute +// configuration. +type AccessGroupSAML struct { + Saml struct { + AttributeName string `json:"attribute_name"` + AttributeValue string `json:"attribute_value"` + IdentityProviderID string `json:"identity_provider_id"` + } `json:"saml"` +} + +// AccessGroupAzureAuthContext is used to configure access based on Azure auth contexts. +type AccessGroupAzureAuthContext struct { + AuthContext struct { + ID string `json:"id"` + IdentityProviderID string `json:"identity_provider_id"` + ACID string `json:"ac_id"` + } `json:"auth_context"` +} + +// AccessGroupAuthMethod is used for managing access by the "amr" +// (Authentication Methods References) identifier. For example, an +// application may want to require that users authenticate using a hardware +// key by setting the "auth_method" to "swk". A list of values are listed +// here: https://tools.ietf.org/html/rfc8176#section-2. Custom values are +// supported as well. +type AccessGroupAuthMethod struct { + AuthMethod struct { + AuthMethod string `json:"auth_method"` + } `json:"auth_method"` +} + +// AccessGroupLoginMethod restricts the application to specific IdP instances. +type AccessGroupLoginMethod struct { + LoginMethod struct { + ID string `json:"id"` + } `json:"login_method"` +} + +// AccessGroupDevicePosture restricts the application to specific devices. +type AccessGroupDevicePosture struct { + DevicePosture struct { + ID string `json:"integration_uid"` + } `json:"device_posture"` +} + +// AccessGroupListResponse represents the response from the list +// access group endpoint. +type AccessGroupListResponse struct { + Result []AccessGroup `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +// AccessGroupIPList restricts the application to specific teams_list of ips. +type AccessGroupIPList struct { + IPList struct { + ID string `json:"id"` + } `json:"ip_list"` +} + +// AccessGroupDetailResponse is the API response, containing a single +// access group. +type AccessGroupDetailResponse struct { + Success bool `json:"success"` + Errors []string `json:"errors"` + Messages []string `json:"messages"` + Result AccessGroup `json:"result"` +} + +type ListAccessGroupsParams struct { + ResultInfo +} + +type CreateAccessGroupParams struct { + Name string `json:"name"` + + // The include group works like an OR logical operator. The user must + // satisfy one of the rules. + Include []interface{} `json:"include"` + + // The exclude group works like a NOT logical operator. The user must + // not satisfy all the rules in exclude. + Exclude []interface{} `json:"exclude"` + + // The require group works like a AND logical operator. The user must + // satisfy all the rules in require. + Require []interface{} `json:"require"` +} + +type UpdateAccessGroupParams struct { + ID string `json:"id,omitempty"` + Name string `json:"name"` + + // The include group works like an OR logical operator. The user must + // satisfy one of the rules. + Include []interface{} `json:"include"` + + // The exclude group works like a NOT logical operator. The user must + // not satisfy all the rules in exclude. + Exclude []interface{} `json:"exclude"` + + // The require group works like a AND logical operator. The user must + // satisfy all the rules in require. + Require []interface{} `json:"require"` +} + +// ListAccessGroups returns all access groups for an access application. +// +// Account API Reference: https://developers.cloudflare.com/api/operations/access-groups-list-access-groups +// Zone API Reference: https://developers.cloudflare.com/api/operations/zone-level-access-groups-list-access-groups +func (api *API) ListAccessGroups(ctx context.Context, rc *ResourceContainer, params ListAccessGroupsParams) ([]AccessGroup, *ResultInfo, error) { + baseURL := fmt.Sprintf("/%s/%s/access/groups", rc.Level, rc.Identifier) + + autoPaginate := true + if params.PerPage >= 1 || params.Page >= 1 { + autoPaginate = false + } + + if params.PerPage < 1 { + params.PerPage = 25 + } + + if params.Page < 1 { + params.Page = 1 + } + + var accessGroups []AccessGroup + var r AccessGroupListResponse + + for { + uri := buildURI(baseURL, params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []AccessGroup{}, &ResultInfo{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + err = json.Unmarshal(res, &r) + if err != nil { + return []AccessGroup{}, &ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + accessGroups = append(accessGroups, r.Result...) + params.ResultInfo = r.ResultInfo.Next() + if params.ResultInfo.Done() || !autoPaginate { + break + } + } + + return accessGroups, &r.ResultInfo, nil +} + +// GetAccessGroup returns a single group based on the group ID. +// +// Account API Reference: https://developers.cloudflare.com/api/operations/access-groups-get-an-access-group +// Zone API Reference: https://developers.cloudflare.com/api/operations/zone-level-access-groups-get-an-access-group +func (api *API) GetAccessGroup(ctx context.Context, rc *ResourceContainer, groupID string) (AccessGroup, error) { + uri := fmt.Sprintf( + "/%s/%s/access/groups/%s", + rc.Level, + rc.Identifier, + groupID, + ) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return AccessGroup{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var accessGroupDetailResponse AccessGroupDetailResponse + err = json.Unmarshal(res, &accessGroupDetailResponse) + if err != nil { + return AccessGroup{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessGroupDetailResponse.Result, nil +} + +// CreateAccessGroup creates a new access group. +// +// Account API Reference: https://developers.cloudflare.com/api/operations/access-groups-create-an-access-group +// Zone API Reference:https://developers.cloudflare.com/api/operations/zone-level-access-groups-create-an-access-group +func (api *API) CreateAccessGroup(ctx context.Context, rc *ResourceContainer, params CreateAccessGroupParams) (AccessGroup, error) { + uri := fmt.Sprintf( + "/%s/%s/access/groups", + rc.Level, + rc.Identifier, + ) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return AccessGroup{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var accessGroupDetailResponse AccessGroupDetailResponse + err = json.Unmarshal(res, &accessGroupDetailResponse) + if err != nil { + return AccessGroup{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessGroupDetailResponse.Result, nil +} + +// UpdateAccessGroup updates an existing access group. +// +// Account API Reference: https://developers.cloudflare.com/api/operations/access-groups-update-an-access-group +// Zone API Reference: https://developers.cloudflare.com/api/operations/zone-level-access-groups-update-an-access-group +func (api *API) UpdateAccessGroup(ctx context.Context, rc *ResourceContainer, params UpdateAccessGroupParams) (AccessGroup, error) { + if params.ID == "" { + return AccessGroup{}, fmt.Errorf("access group ID cannot be empty") + } + + uri := fmt.Sprintf( + "/%s/%s/access/groups/%s", + rc.Level, + rc.Identifier, + params.ID, + ) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return AccessGroup{}, err + } + + var accessGroupDetailResponse AccessGroupDetailResponse + err = json.Unmarshal(res, &accessGroupDetailResponse) + if err != nil { + return AccessGroup{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessGroupDetailResponse.Result, nil +} + +// DeleteAccessGroup deletes an access group +// +// Account API Reference: https://developers.cloudflare.com/api/operations/access-groups-delete-an-access-group +// Zone API Reference: https://developers.cloudflare.com/api/operations/zone-level-access-groups-delete-an-access-group +func (api *API) DeleteAccessGroup(ctx context.Context, rc *ResourceContainer, groupID string) error { + uri := fmt.Sprintf( + "/%s/%s/access/groups/%s", + rc.Level, + rc.Identifier, + groupID, + ) + + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/access_identity_provider.go b/vendor/github.com/cloudflare/cloudflare-go/access_identity_provider.go new file mode 100644 index 0000000..a1ffc8c --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/access_identity_provider.go @@ -0,0 +1,306 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +// AccessIdentityProvider is the structure of the provider object. +type AccessIdentityProvider struct { + ID string `json:"id,omitempty"` + Name string `json:"name"` + Type string `json:"type"` + Config AccessIdentityProviderConfiguration `json:"config"` + ScimConfig AccessIdentityProviderScimConfiguration `json:"scim_config"` +} + +// AccessIdentityProviderConfiguration is the combined structure of *all* +// identity provider configuration fields. This is done to simplify the use of +// Access products and their relationship to each other. +// +// API reference: https://developers.cloudflare.com/access/configuring-identity-providers/ +type AccessIdentityProviderConfiguration struct { + APIToken string `json:"api_token,omitempty"` + AppsDomain string `json:"apps_domain,omitempty"` + Attributes []string `json:"attributes,omitempty"` + AuthURL string `json:"auth_url,omitempty"` + CentrifyAccount string `json:"centrify_account,omitempty"` + CentrifyAppID string `json:"centrify_app_id,omitempty"` + CertsURL string `json:"certs_url,omitempty"` + ClientID string `json:"client_id,omitempty"` + ClientSecret string `json:"client_secret,omitempty"` + Claims []string `json:"claims,omitempty"` + Scopes []string `json:"scopes,omitempty"` + DirectoryID string `json:"directory_id,omitempty"` + EmailAttributeName string `json:"email_attribute_name,omitempty"` + EmailClaimName string `json:"email_claim_name,omitempty"` + IdpPublicCert string `json:"idp_public_cert,omitempty"` + IssuerURL string `json:"issuer_url,omitempty"` + OktaAccount string `json:"okta_account,omitempty"` + OktaAuthorizationServerID string `json:"authorization_server_id,omitempty"` + OneloginAccount string `json:"onelogin_account,omitempty"` + PingEnvID string `json:"ping_env_id,omitempty"` + RedirectURL string `json:"redirect_url,omitempty"` + SignRequest bool `json:"sign_request,omitempty"` + SsoTargetURL string `json:"sso_target_url,omitempty"` + SupportGroups bool `json:"support_groups,omitempty"` + TokenURL string `json:"token_url,omitempty"` + PKCEEnabled *bool `json:"pkce_enabled,omitempty"` + ConditionalAccessEnabled bool `json:"conditional_access_enabled,omitempty"` +} + +type AccessIdentityProviderScimConfiguration struct { + Enabled bool `json:"enabled,omitempty"` + Secret string `json:"secret,omitempty"` + UserDeprovision bool `json:"user_deprovision,omitempty"` + SeatDeprovision bool `json:"seat_deprovision,omitempty"` + GroupMemberDeprovision bool `json:"group_member_deprovision,omitempty"` +} + +// AccessIdentityProvidersListResponse is the API response for multiple +// Access Identity Providers. +type AccessIdentityProvidersListResponse struct { + Response + Result []AccessIdentityProvider `json:"result"` + ResultInfo `json:"result_info"` +} + +// AccessIdentityProviderResponse is the API response for a single +// Access Identity Provider. +type AccessIdentityProviderResponse struct { + Response + Result AccessIdentityProvider `json:"result"` +} + +type ListAccessIdentityProvidersParams struct { + ResultInfo +} + +type CreateAccessIdentityProviderParams struct { + Name string `json:"name"` + Type string `json:"type"` + Config AccessIdentityProviderConfiguration `json:"config"` + ScimConfig AccessIdentityProviderScimConfiguration `json:"scim_config"` +} + +type UpdateAccessIdentityProviderParams struct { + ID string `json:"-"` + Name string `json:"name"` + Type string `json:"type"` + Config AccessIdentityProviderConfiguration `json:"config"` + ScimConfig AccessIdentityProviderScimConfiguration `json:"scim_config"` +} + +// AccessAuthContext represents an Access Azure Identity Provider Auth Context. +type AccessAuthContext struct { + ID string `json:"id"` + UID string `json:"uid"` + ACID string `json:"ac_id"` + DisplayName string `json:"display_name"` + Description string `json:"description"` +} + +// AccessAuthContextsListResponse represents the response from the list +// Access Auth Contexts endpoint. +type AccessAuthContextsListResponse struct { + Result []AccessAuthContext `json:"result"` + Response +} + +// ListAccessIdentityProviders returns all Access Identity Providers for an +// account or zone. +// +// Account API Reference: https://developers.cloudflare.com/api/operations/access-identity-providers-list-access-identity-providers +// Zone API Reference: https://developers.cloudflare.com/api/operations/zone-level-access-identity-providers-list-access-identity-providers +func (api *API) ListAccessIdentityProviders(ctx context.Context, rc *ResourceContainer, params ListAccessIdentityProvidersParams) ([]AccessIdentityProvider, *ResultInfo, error) { + baseURL := fmt.Sprintf("/%s/%s/access/identity_providers", rc.Level, rc.Identifier) + + autoPaginate := true + if params.PerPage >= 1 || params.Page >= 1 { + autoPaginate = false + } + + if params.PerPage < 1 { + params.PerPage = 25 + } + + if params.Page < 1 { + params.Page = 1 + } + + var accessProviders []AccessIdentityProvider + var r AccessIdentityProvidersListResponse + + for { + uri := buildURI(baseURL, params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []AccessIdentityProvider{}, &ResultInfo{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + err = json.Unmarshal(res, &r) + if err != nil { + return []AccessIdentityProvider{}, &ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + accessProviders = append(accessProviders, r.Result...) + params.ResultInfo = r.ResultInfo.Next() + if params.ResultInfo.Done() || !autoPaginate { + break + } + } + + return accessProviders, &r.ResultInfo, nil +} + +// GetAccessIdentityProvider returns a single Access Identity +// Provider for an account or zone. +// +// Account API Reference: https://developers.cloudflare.com/api/operations/access-identity-providers-get-an-access-identity-provider +// Zone API Reference: https://developers.cloudflare.com/api/operations/zone-level-access-identity-providers-get-an-access-identity-provider +func (api *API) GetAccessIdentityProvider(ctx context.Context, rc *ResourceContainer, identityProviderID string) (AccessIdentityProvider, error) { + uri := fmt.Sprintf( + "/%s/%s/access/identity_providers/%s", + rc.Level, + rc.Identifier, + identityProviderID, + ) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return AccessIdentityProvider{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var accessIdentityProviderResponse AccessIdentityProviderResponse + err = json.Unmarshal(res, &accessIdentityProviderResponse) + if err != nil { + return AccessIdentityProvider{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessIdentityProviderResponse.Result, nil +} + +// CreateAccessIdentityProvider creates a new Access Identity Provider. +// +// Account API Reference: https://developers.cloudflare.com/api/operations/access-identity-providers-add-an-access-identity-provider +// Zone API Reference: https://developers.cloudflare.com/api/operations/zone-level-access-identity-providers-add-an-access-identity-provider +func (api *API) CreateAccessIdentityProvider(ctx context.Context, rc *ResourceContainer, params CreateAccessIdentityProviderParams) (AccessIdentityProvider, error) { + uri := fmt.Sprintf("/%s/%s/access/identity_providers", rc.Level, rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return AccessIdentityProvider{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var accessIdentityProviderResponse AccessIdentityProviderResponse + err = json.Unmarshal(res, &accessIdentityProviderResponse) + if err != nil { + return AccessIdentityProvider{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessIdentityProviderResponse.Result, nil +} + +// UpdateAccessIdentityProvider updates an existing Access Identity +// Provider. +// +// Account API Reference: https://developers.cloudflare.com/api/operations/access-identity-providers-update-an-access-identity-provider +// Zone API Reference: https://developers.cloudflare.com/api/operations/zone-level-access-identity-providers-update-an-access-identity-provider +func (api *API) UpdateAccessIdentityProvider(ctx context.Context, rc *ResourceContainer, params UpdateAccessIdentityProviderParams) (AccessIdentityProvider, error) { + uri := fmt.Sprintf( + "/%s/%s/access/identity_providers/%s", + rc.Level, + rc.Identifier, + params.ID, + ) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return AccessIdentityProvider{}, err + } + + var accessIdentityProviderResponse AccessIdentityProviderResponse + err = json.Unmarshal(res, &accessIdentityProviderResponse) + if err != nil { + return AccessIdentityProvider{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessIdentityProviderResponse.Result, nil +} + +// DeleteAccessIdentityProvider deletes an Access Identity Provider. +// +// Account API Reference: https://developers.cloudflare.com/api/operations/access-identity-providers-delete-an-access-identity-provider +// Zone API Reference: https://developers.cloudflare.com/api/operations/zone-level-access-identity-providers-delete-an-access-identity-provider +func (api *API) DeleteAccessIdentityProvider(ctx context.Context, rc *ResourceContainer, identityProviderUUID string) (AccessIdentityProvider, error) { + uri := fmt.Sprintf( + "/%s/%s/access/identity_providers/%s", + rc.Level, + rc.Identifier, + identityProviderUUID, + ) + + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return AccessIdentityProvider{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var accessIdentityProviderResponse AccessIdentityProviderResponse + err = json.Unmarshal(res, &accessIdentityProviderResponse) + if err != nil { + return AccessIdentityProvider{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessIdentityProviderResponse.Result, nil +} + +// ListAccessIdentityProviderAuthContexts returns an identity provider's auth contexts +// AzureAD only +// Account API Reference: https://developers.cloudflare.com/api/operations/access-identity-providers-get-an-access-identity-provider +// Zone API Reference: https://developers.cloudflare.com/api/operations/zone-level-access-identity-providers-get-an-access-identity-provider +func (api *API) ListAccessIdentityProviderAuthContexts(ctx context.Context, rc *ResourceContainer, identityProviderID string) ([]AccessAuthContext, error) { + uri := fmt.Sprintf("/%s/%s/access/identity_providers/%s/auth_context", rc.Level, rc.Identifier, identityProviderID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []AccessAuthContext{}, err + } + + var accessAuthContextListResponse AccessAuthContextsListResponse + err = json.Unmarshal(res, &accessAuthContextListResponse) + if err != nil { + return []AccessAuthContext{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessAuthContextListResponse.Result, nil +} + +// UpdateAccessIdentityProviderAuthContexts updates an existing Access Identity +// Provider. +// AzureAD only +// Account API Reference: https://developers.cloudflare.com/api/operations/access-identity-providers-refresh-an-access-identity-provider-auth-contexts +// Zone API Reference: https://developers.cloudflare.com/api/operations/zone-level-access-identity-providers-update-an-access-identity-provider +func (api *API) UpdateAccessIdentityProviderAuthContexts(ctx context.Context, rc *ResourceContainer, identityProviderID string) (AccessIdentityProvider, error) { + uri := fmt.Sprintf( + "/%s/%s/access/identity_providers/%s/auth_context", + rc.Level, + rc.Identifier, + identityProviderID, + ) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, nil) + if err != nil { + return AccessIdentityProvider{}, err + } + + var accessIdentityProviderResponse AccessIdentityProviderResponse + err = json.Unmarshal(res, &accessIdentityProviderResponse) + if err != nil { + return AccessIdentityProvider{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessIdentityProviderResponse.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/access_keys.go b/vendor/github.com/cloudflare/cloudflare-go/access_keys.go new file mode 100644 index 0000000..fbc714d --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/access_keys.go @@ -0,0 +1,64 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +type AccessKeysConfig struct { + KeyRotationIntervalDays int `json:"key_rotation_interval_days"` + LastKeyRotationAt time.Time `json:"last_key_rotation_at"` + DaysUntilNextRotation int `json:"days_until_next_rotation"` +} + +type AccessKeysConfigUpdateRequest struct { + KeyRotationIntervalDays int `json:"key_rotation_interval_days"` +} + +type accessKeysConfigResponse struct { + Response + Result AccessKeysConfig `json:"result"` +} + +// AccessKeysConfig returns the Access Keys Configuration for an account. +// +// API reference: https://api.cloudflare.com/#access-keys-configuration-get-access-keys-configuration +func (api *API) AccessKeysConfig(ctx context.Context, accountID string) (AccessKeysConfig, error) { + uri := fmt.Sprintf("/%s/%s/access/keys", AccountRouteRoot, accountID) + + return api.accessKeysRequest(ctx, http.MethodGet, uri, nil) +} + +// UpdateAccessKeysConfig updates the Access Keys Configuration for an account. +// +// API reference: https://api.cloudflare.com/#access-keys-configuration-update-access-keys-configuration +func (api *API) UpdateAccessKeysConfig(ctx context.Context, accountID string, request AccessKeysConfigUpdateRequest) (AccessKeysConfig, error) { + uri := fmt.Sprintf("/%s/%s/access/keys", AccountRouteRoot, accountID) + + return api.accessKeysRequest(ctx, http.MethodPut, uri, request) +} + +// RotateAccessKeys rotates the Access Keys for an account and returns the updated Access Keys Configuration +// +// API reference: https://api.cloudflare.com/#access-keys-configuration-rotate-access-keys +func (api *API) RotateAccessKeys(ctx context.Context, accountID string) (AccessKeysConfig, error) { + uri := fmt.Sprintf("/%s/%s/access/keys/rotate", AccountRouteRoot, accountID) + return api.accessKeysRequest(ctx, http.MethodPost, uri, nil) +} + +func (api *API) accessKeysRequest(ctx context.Context, method, uri string, params interface{}) (AccessKeysConfig, error) { + res, err := api.makeRequestContext(ctx, method, uri, params) + if err != nil { + return AccessKeysConfig{}, err + } + + var keysConfigResponse accessKeysConfigResponse + if err := json.Unmarshal(res, &keysConfigResponse); err != nil { + return AccessKeysConfig{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return keysConfigResponse.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/access_mutual_tls_certificates.go b/vendor/github.com/cloudflare/cloudflare-go/access_mutual_tls_certificates.go new file mode 100644 index 0000000..9d4e413 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/access_mutual_tls_certificates.go @@ -0,0 +1,279 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// AccessMutualTLSCertificate is the structure of a single Access Mutual TLS +// certificate. +type AccessMutualTLSCertificate struct { + ID string `json:"id,omitempty"` + CreatedAt time.Time `json:"created_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` + ExpiresOn time.Time `json:"expires_on,omitempty"` + Name string `json:"name,omitempty"` + Fingerprint string `json:"fingerprint,omitempty"` + Certificate string `json:"certificate,omitempty"` + AssociatedHostnames []string `json:"associated_hostnames,omitempty"` +} + +// AccessMutualTLSCertificateListResponse is the API response for all Access +// Mutual TLS certificates. +type AccessMutualTLSCertificateListResponse struct { + Response + Result []AccessMutualTLSCertificate `json:"result"` + ResultInfo `json:"result_info"` +} + +// AccessMutualTLSCertificateDetailResponse is the API response for a single +// Access Mutual TLS certificate. +type AccessMutualTLSCertificateDetailResponse struct { + Response + Result AccessMutualTLSCertificate `json:"result"` +} + +type ListAccessMutualTLSCertificatesParams struct { + ResultInfo +} + +type CreateAccessMutualTLSCertificateParams struct { + ExpiresOn time.Time `json:"expires_on,omitempty"` + Name string `json:"name,omitempty"` + Fingerprint string `json:"fingerprint,omitempty"` + Certificate string `json:"certificate,omitempty"` + AssociatedHostnames []string `json:"associated_hostnames,omitempty"` +} + +type UpdateAccessMutualTLSCertificateParams struct { + ID string `json:"-"` + ExpiresOn time.Time `json:"expires_on,omitempty"` + Name string `json:"name,omitempty"` + Fingerprint string `json:"fingerprint,omitempty"` + Certificate string `json:"certificate,omitempty"` + AssociatedHostnames []string `json:"associated_hostnames,omitempty"` +} + +type AccessMutualTLSHostnameSettings struct { + ChinaNetwork *bool `json:"china_network,omitempty"` + ClientCertificateForwarding *bool `json:"client_certificate_forwarding,omitempty"` + Hostname string `json:"hostname,omitempty"` +} + +type GetAccessMutualTLSHostnameSettingsResponse struct { + Response + Result []AccessMutualTLSHostnameSettings `json:"result"` +} + +type UpdateAccessMutualTLSHostnameSettingsParams struct { + Settings []AccessMutualTLSHostnameSettings `json:"settings,omitempty"` +} + +// ListAccessMutualTLSCertificates returns all Access TLS certificates +// +// Account API Reference: https://developers.cloudflare.com/api/operations/access-mtls-authentication-list-mtls-certificates +// Zone API Reference: https://developers.cloudflare.com/api/operations/zone-level-access-mtls-authentication-list-mtls-certificates +func (api *API) ListAccessMutualTLSCertificates(ctx context.Context, rc *ResourceContainer, params ListAccessMutualTLSCertificatesParams) ([]AccessMutualTLSCertificate, *ResultInfo, error) { + baseURL := fmt.Sprintf( + "/%s/%s/access/certificates", + rc.Level, + rc.Identifier, + ) + + autoPaginate := true + if params.PerPage >= 1 || params.Page >= 1 { + autoPaginate = false + } + + if params.PerPage < 1 { + params.PerPage = 25 + } + + if params.Page < 1 { + params.Page = 1 + } + + var accessCertificates []AccessMutualTLSCertificate + var r AccessMutualTLSCertificateListResponse + + for { + uri := buildURI(baseURL, params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []AccessMutualTLSCertificate{}, &ResultInfo{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + err = json.Unmarshal(res, &r) + if err != nil { + return []AccessMutualTLSCertificate{}, &ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + accessCertificates = append(accessCertificates, r.Result...) + params.ResultInfo = r.ResultInfo.Next() + if params.ResultInfo.Done() || !autoPaginate { + break + } + } + + return accessCertificates, &r.ResultInfo, nil +} + +// GetAccessMutualTLSCertificate returns a single Access Mutual TLS +// certificate. +// +// Account API Reference: https://developers.cloudflare.com/api/operations/access-mtls-authentication-get-an-mtls-certificate +// Zone API Reference: https://developers.cloudflare.com/api/operations/zone-level-access-mtls-authentication-get-an-mtls-certificate +func (api *API) GetAccessMutualTLSCertificate(ctx context.Context, rc *ResourceContainer, certificateID string) (AccessMutualTLSCertificate, error) { + uri := fmt.Sprintf( + "/%s/%s/access/certificates/%s", + rc.Level, + rc.Identifier, + certificateID, + ) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return AccessMutualTLSCertificate{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var accessMutualTLSCertificateDetailResponse AccessMutualTLSCertificateDetailResponse + err = json.Unmarshal(res, &accessMutualTLSCertificateDetailResponse) + if err != nil { + return AccessMutualTLSCertificate{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessMutualTLSCertificateDetailResponse.Result, nil +} + +// CreateAccessMutualTLSCertificate creates an Access TLS Mutual +// certificate. +// +// Account API Reference: https://developers.cloudflare.com/api/operations/access-mtls-authentication-add-an-mtls-certificate +// Zone API Reference: https://developers.cloudflare.com/api/operations/zone-level-access-mtls-authentication-add-an-mtls-certificate +func (api *API) CreateAccessMutualTLSCertificate(ctx context.Context, rc *ResourceContainer, params CreateAccessMutualTLSCertificateParams) (AccessMutualTLSCertificate, error) { + uri := fmt.Sprintf( + "/%s/%s/access/certificates", + rc.Level, + rc.Identifier, + ) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return AccessMutualTLSCertificate{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var accessMutualTLSCertificateDetailResponse AccessMutualTLSCertificateDetailResponse + err = json.Unmarshal(res, &accessMutualTLSCertificateDetailResponse) + if err != nil { + return AccessMutualTLSCertificate{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessMutualTLSCertificateDetailResponse.Result, nil +} + +// UpdateAccessMutualTLSCertificate updates an account level Access TLS Mutual +// certificate. +// +// Account API Reference: https://developers.cloudflare.com/api/operations/access-mtls-authentication-update-an-mtls-certificate +// Zone API Reference: https://developers.cloudflare.com/api/operations/zone-level-access-mtls-authentication-update-an-mtls-certificate +func (api *API) UpdateAccessMutualTLSCertificate(ctx context.Context, rc *ResourceContainer, params UpdateAccessMutualTLSCertificateParams) (AccessMutualTLSCertificate, error) { + uri := fmt.Sprintf( + "/%s/%s/access/certificates/%s", + rc.Level, + rc.Identifier, + params.ID, + ) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return AccessMutualTLSCertificate{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var accessMutualTLSCertificateDetailResponse AccessMutualTLSCertificateDetailResponse + err = json.Unmarshal(res, &accessMutualTLSCertificateDetailResponse) + if err != nil { + return AccessMutualTLSCertificate{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessMutualTLSCertificateDetailResponse.Result, nil +} + +// DeleteAccessMutualTLSCertificate destroys an Access Mutual +// TLS certificate. +// +// Account API Reference: https://developers.cloudflare.com/api/operations/access-mtls-authentication-delete-an-mtls-certificate +// Zone API Reference: https://developers.cloudflare.com/api/operations/zone-level-access-mtls-authentication-delete-an-mtls-certificate +func (api *API) DeleteAccessMutualTLSCertificate(ctx context.Context, rc *ResourceContainer, certificateID string) error { + uri := fmt.Sprintf( + "/%s/%s/access/certificates/%s", + rc.Level, + rc.Identifier, + certificateID, + ) + + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var accessMutualTLSCertificateDetailResponse AccessMutualTLSCertificateDetailResponse + err = json.Unmarshal(res, &accessMutualTLSCertificateDetailResponse) + if err != nil { + return fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return nil +} + +// GetAccessMutualTLSHostnameSettings returns all Access mTLS hostname settings. +// +// Account API Reference: https://developers.cloudflare.com/api/operations/access-mtls-authentication-update-an-mtls-certificate-settings +// Zone API Reference: https://developers.cloudflare.com/api/operations/zone-level-access-mtls-authentication-list-mtls-certificates-hostname-settings +func (api *API) GetAccessMutualTLSHostnameSettings(ctx context.Context, rc *ResourceContainer) ([]AccessMutualTLSHostnameSettings, error) { + uri := fmt.Sprintf( + "/%s/%s/access/certificates/settings", + rc.Level, + rc.Identifier, + ) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []AccessMutualTLSHostnameSettings{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var accessMutualTLSHostnameSettingsResponse GetAccessMutualTLSHostnameSettingsResponse + err = json.Unmarshal(res, &accessMutualTLSHostnameSettingsResponse) + if err != nil { + return []AccessMutualTLSHostnameSettings{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessMutualTLSHostnameSettingsResponse.Result, nil +} + +// UpdateAccessMutualTLSHostnameSettings updates Access mTLS certificate hostname settings. +// +// Account API Reference: https://developers.cloudflare.com/api/operations/access-mtls-authentication-update-an-mtls-certificate-settings +// Zone API Reference: https://developers.cloudflare.com/api/operations/zone-level-access-mtls-authentication-update-an-mtls-certificate-settings +func (api *API) UpdateAccessMutualTLSHostnameSettings(ctx context.Context, rc *ResourceContainer, params UpdateAccessMutualTLSHostnameSettingsParams) ([]AccessMutualTLSHostnameSettings, error) { + uri := fmt.Sprintf( + "/%s/%s/access/certificates/settings", + rc.Level, + rc.Identifier, + ) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return []AccessMutualTLSHostnameSettings{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var accessMutualTLSHostnameSettingsResponse GetAccessMutualTLSHostnameSettingsResponse + err = json.Unmarshal(res, &accessMutualTLSHostnameSettingsResponse) + if err != nil { + return []AccessMutualTLSHostnameSettings{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessMutualTLSHostnameSettingsResponse.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/access_organization.go b/vendor/github.com/cloudflare/cloudflare-go/access_organization.go new file mode 100644 index 0000000..f7eea16 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/access_organization.go @@ -0,0 +1,143 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// AccessOrganization represents an Access organization. +type AccessOrganization struct { + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + Name string `json:"name"` + AuthDomain string `json:"auth_domain"` + LoginDesign AccessOrganizationLoginDesign `json:"login_design"` + IsUIReadOnly *bool `json:"is_ui_read_only,omitempty"` + UIReadOnlyToggleReason string `json:"ui_read_only_toggle_reason,omitempty"` + UserSeatExpirationInactiveTime string `json:"user_seat_expiration_inactive_time,omitempty"` + AutoRedirectToIdentity *bool `json:"auto_redirect_to_identity,omitempty"` + SessionDuration *string `json:"session_duration,omitempty"` + CustomPages AccessOrganizationCustomPages `json:"custom_pages,omitempty"` + WarpAuthSessionDuration *string `json:"warp_auth_session_duration,omitempty"` + AllowAuthenticateViaWarp *bool `json:"allow_authenticate_via_warp,omitempty"` +} + +// AccessOrganizationLoginDesign represents the login design options. +type AccessOrganizationLoginDesign struct { + BackgroundColor string `json:"background_color"` + LogoPath string `json:"logo_path"` + TextColor string `json:"text_color"` + HeaderText string `json:"header_text"` + FooterText string `json:"footer_text"` +} + +type AccessOrganizationCustomPages struct { + Forbidden AccessCustomPageType `json:"forbidden,omitempty"` + IdentityDenied AccessCustomPageType `json:"identity_denied,omitempty"` +} + +// AccessOrganizationListResponse represents the response from the list +// access organization endpoint. +type AccessOrganizationListResponse struct { + Result AccessOrganization `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +// AccessOrganizationDetailResponse is the API response, containing a +// single access organization. +type AccessOrganizationDetailResponse struct { + Success bool `json:"success"` + Errors []string `json:"errors"` + Messages []string `json:"messages"` + Result AccessOrganization `json:"result"` +} + +type GetAccessOrganizationParams struct{} + +type CreateAccessOrganizationParams struct { + Name string `json:"name"` + AuthDomain string `json:"auth_domain"` + LoginDesign AccessOrganizationLoginDesign `json:"login_design"` + IsUIReadOnly *bool `json:"is_ui_read_only,omitempty"` + UIReadOnlyToggleReason string `json:"ui_read_only_toggle_reason,omitempty"` + UserSeatExpirationInactiveTime string `json:"user_seat_expiration_inactive_time,omitempty"` + AutoRedirectToIdentity *bool `json:"auto_redirect_to_identity,omitempty"` + SessionDuration *string `json:"session_duration,omitempty"` + CustomPages AccessOrganizationCustomPages `json:"custom_pages,omitempty"` + WarpAuthSessionDuration *string `json:"warp_auth_session_duration,omitempty"` + AllowAuthenticateViaWarp *bool `json:"allow_authenticate_via_warp,omitempty"` +} + +type UpdateAccessOrganizationParams struct { + Name string `json:"name"` + AuthDomain string `json:"auth_domain"` + LoginDesign AccessOrganizationLoginDesign `json:"login_design"` + IsUIReadOnly *bool `json:"is_ui_read_only,omitempty"` + UIReadOnlyToggleReason string `json:"ui_read_only_toggle_reason,omitempty"` + UserSeatExpirationInactiveTime string `json:"user_seat_expiration_inactive_time,omitempty"` + AutoRedirectToIdentity *bool `json:"auto_redirect_to_identity,omitempty"` + SessionDuration *string `json:"session_duration,omitempty"` + CustomPages AccessOrganizationCustomPages `json:"custom_pages,omitempty"` + WarpAuthSessionDuration *string `json:"warp_auth_session_duration,omitempty"` + AllowAuthenticateViaWarp *bool `json:"allow_authenticate_via_warp,omitempty"` +} + +func (api *API) GetAccessOrganization(ctx context.Context, rc *ResourceContainer, params GetAccessOrganizationParams) (AccessOrganization, ResultInfo, error) { + uri := fmt.Sprintf("/%s/%s/access/organizations", rc.Level, rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return AccessOrganization{}, ResultInfo{}, err + } + + var accessOrganizationListResponse AccessOrganizationListResponse + err = json.Unmarshal(res, &accessOrganizationListResponse) + if err != nil { + return AccessOrganization{}, ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessOrganizationListResponse.Result, accessOrganizationListResponse.ResultInfo, nil +} + +func (api *API) CreateAccessOrganization(ctx context.Context, rc *ResourceContainer, params CreateAccessOrganizationParams) (AccessOrganization, error) { + uri := fmt.Sprintf("/%s/%s/access/organizations", rc.Level, rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return AccessOrganization{}, err + } + + var accessOrganizationDetailResponse AccessOrganizationDetailResponse + err = json.Unmarshal(res, &accessOrganizationDetailResponse) + if err != nil { + return AccessOrganization{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessOrganizationDetailResponse.Result, nil +} + +// UpdateAccessOrganization updates the Access organisation details. +// +// Account API reference: https://api.cloudflare.com/#access-organizations-update-access-organization +// Zone API reference: https://api.cloudflare.com/#zone-level-access-organizations-update-access-organization +func (api *API) UpdateAccessOrganization(ctx context.Context, rc *ResourceContainer, params UpdateAccessOrganizationParams) (AccessOrganization, error) { + uri := fmt.Sprintf("/%s/%s/access/organizations", rc.Level, rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return AccessOrganization{}, err + } + + var accessOrganizationDetailResponse AccessOrganizationDetailResponse + err = json.Unmarshal(res, &accessOrganizationDetailResponse) + if err != nil { + return AccessOrganization{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessOrganizationDetailResponse.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/access_policy.go b/vendor/github.com/cloudflare/cloudflare-go/access_policy.go new file mode 100644 index 0000000..a70ceed --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/access_policy.go @@ -0,0 +1,356 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +type AccessApprovalGroup struct { + EmailListUuid string `json:"email_list_uuid,omitempty"` + EmailAddresses []string `json:"email_addresses,omitempty"` + ApprovalsNeeded int `json:"approvals_needed,omitempty"` +} + +// AccessPolicy defines a policy for allowing or disallowing access to +// one or more Access applications. +type AccessPolicy struct { + ID string `json:"id,omitempty"` + // Precedence is the order in which the policy is executed in an Access application. + // As a general rule, lower numbers take precedence over higher numbers. + // This field can only be zero when a reusable policy is requested outside the context + // of an Access application. + Precedence int `json:"precedence"` + Decision string `json:"decision"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + Reusable *bool `json:"reusable,omitempty"` + Name string `json:"name"` + + IsolationRequired *bool `json:"isolation_required,omitempty"` + SessionDuration *string `json:"session_duration,omitempty"` + PurposeJustificationRequired *bool `json:"purpose_justification_required,omitempty"` + PurposeJustificationPrompt *string `json:"purpose_justification_prompt,omitempty"` + ApprovalRequired *bool `json:"approval_required,omitempty"` + ApprovalGroups []AccessApprovalGroup `json:"approval_groups"` + + // The include policy works like an OR logical operator. The user must + // satisfy one of the rules. + Include []interface{} `json:"include"` + + // The exclude policy works like a NOT logical operator. The user must + // not satisfy all the rules in exclude. + Exclude []interface{} `json:"exclude"` + + // The require policy works like a AND logical operator. The user must + // satisfy all the rules in require. + Require []interface{} `json:"require"` +} + +// AccessPolicyListResponse represents the response from the list +// access policies endpoint. +type AccessPolicyListResponse struct { + Result []AccessPolicy `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +// AccessPolicyDetailResponse is the API response, containing a single +// access policy. +type AccessPolicyDetailResponse struct { + Success bool `json:"success"` + Errors []string `json:"errors"` + Messages []string `json:"messages"` + Result AccessPolicy `json:"result"` +} + +type ListAccessPoliciesParams struct { + // ApplicationID is the application ID to list attached access policies for. + // If omitted, only reusable policies for the account are returned. + ApplicationID string `json:"-"` + ResultInfo +} + +type GetAccessPolicyParams struct { + PolicyID string `json:"-"` + // ApplicationID is the application ID for which to scope the policy for. + // Optional, but if included, the policy returned will include its execution precedence within the application. + ApplicationID string `json:"-"` +} + +type CreateAccessPolicyParams struct { + // ApplicationID is the application ID for which to create the policy for. + // Pass an empty value to create a reusable policy. + ApplicationID string `json:"-"` + + // Precedence is the order in which the policy is executed in an Access application. + // As a general rule, lower numbers take precedence over higher numbers. + // This field is ignored when creating a reusable policy. + // Read more here https://developers.cloudflare.com/cloudflare-one/policies/access/#order-of-execution + Precedence int `json:"precedence"` + Decision string `json:"decision"` + Name string `json:"name"` + + IsolationRequired *bool `json:"isolation_required,omitempty"` + SessionDuration *string `json:"session_duration,omitempty"` + PurposeJustificationRequired *bool `json:"purpose_justification_required,omitempty"` + PurposeJustificationPrompt *string `json:"purpose_justification_prompt,omitempty"` + ApprovalRequired *bool `json:"approval_required,omitempty"` + ApprovalGroups []AccessApprovalGroup `json:"approval_groups"` + + // The include policy works like an OR logical operator. The user must + // satisfy one of the rules. + Include []interface{} `json:"include"` + + // The exclude policy works like a NOT logical operator. The user must + // not satisfy all the rules in exclude. + Exclude []interface{} `json:"exclude"` + + // The require policy works like a AND logical operator. The user must + // satisfy all the rules in require. + Require []interface{} `json:"require"` +} + +type UpdateAccessPolicyParams struct { + // ApplicationID is the application ID that owns the existing policy. + // Pass an empty value if the existing policy is reusable. + ApplicationID string `json:"-"` + PolicyID string `json:"-"` + + // Precedence is the order in which the policy is executed in an Access application. + // As a general rule, lower numbers take precedence over higher numbers. + // This field is ignored when updating a reusable policy. + Precedence int `json:"precedence"` + Decision string `json:"decision"` + Name string `json:"name"` + + IsolationRequired *bool `json:"isolation_required,omitempty"` + SessionDuration *string `json:"session_duration,omitempty"` + PurposeJustificationRequired *bool `json:"purpose_justification_required,omitempty"` + PurposeJustificationPrompt *string `json:"purpose_justification_prompt,omitempty"` + ApprovalRequired *bool `json:"approval_required,omitempty"` + ApprovalGroups []AccessApprovalGroup `json:"approval_groups"` + + // The include policy works like an OR logical operator. The user must + // satisfy one of the rules. + Include []interface{} `json:"include"` + + // The exclude policy works like a NOT logical operator. The user must + // not satisfy all the rules in exclude. + Exclude []interface{} `json:"exclude"` + + // The require policy works like a AND logical operator. The user must + // satisfy all the rules in require. + Require []interface{} `json:"require"` +} + +type DeleteAccessPolicyParams struct { + // ApplicationID is the application ID the policy belongs to. + // If the existing policy is reusable, this field must be omitted. Otherwise, it is required. + ApplicationID string `json:"-"` + PolicyID string `json:"-"` +} + +// ListAccessPolicies returns all access policies that match the parameters. +// +// Account API reference: https://developers.cloudflare.com/api/operations/access-policies-list-access-policies +// Zone API reference: https://developers.cloudflare.com/api/operations/zone-level-access-policies-list-access-policies +func (api *API) ListAccessPolicies(ctx context.Context, rc *ResourceContainer, params ListAccessPoliciesParams) ([]AccessPolicy, *ResultInfo, error) { + var baseURL string + if params.ApplicationID != "" { + baseURL = fmt.Sprintf( + "/%s/%s/access/apps/%s/policies", + rc.Level, + rc.Identifier, + params.ApplicationID, + ) + } else { + baseURL = fmt.Sprintf( + "/%s/%s/access/policies", + rc.Level, + rc.Identifier, + ) + } + + autoPaginate := true + if params.PerPage >= 1 || params.Page >= 1 { + autoPaginate = false + } + + if params.PerPage < 1 { + params.PerPage = 25 + } + + if params.Page < 1 { + params.Page = 1 + } + + var accessPolicies []AccessPolicy + var r AccessPolicyListResponse + for { + uri := buildURI(baseURL, params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []AccessPolicy{}, &ResultInfo{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + err = json.Unmarshal(res, &r) + if err != nil { + return []AccessPolicy{}, &ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + accessPolicies = append(accessPolicies, r.Result...) + params.ResultInfo = r.ResultInfo.Next() + if params.ResultInfo.Done() || !autoPaginate { + break + } + } + + return accessPolicies, &r.ResultInfo, nil +} + +// GetAccessPolicy returns a single policy based on the policy ID. +// +// Account API reference: https://developers.cloudflare.com/api/operations/access-policies-get-an-access-policy +// Zone API reference: https://developers.cloudflare.com/api/operations/zone-level-access-policies-get-an-access-policy +func (api *API) GetAccessPolicy(ctx context.Context, rc *ResourceContainer, params GetAccessPolicyParams) (AccessPolicy, error) { + var uri string + if params.ApplicationID != "" { + uri = fmt.Sprintf( + "/%s/%s/access/apps/%s/policies/%s", + rc.Level, + rc.Identifier, + params.ApplicationID, + params.PolicyID, + ) + } else { + uri = fmt.Sprintf( + "/%s/%s/access/policies/%s", + rc.Level, + rc.Identifier, + params.PolicyID, + ) + } + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return AccessPolicy{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var accessPolicyDetailResponse AccessPolicyDetailResponse + err = json.Unmarshal(res, &accessPolicyDetailResponse) + if err != nil { + return AccessPolicy{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessPolicyDetailResponse.Result, nil +} + +// CreateAccessPolicy creates a new access policy. +// +// Account API reference: https://developers.cloudflare.com/api/operations/access-policies-create-an-access-policy +// Zone API reference: https://developers.cloudflare.com/api/operations/zone-level-access-policies-create-an-access-policy +func (api *API) CreateAccessPolicy(ctx context.Context, rc *ResourceContainer, params CreateAccessPolicyParams) (AccessPolicy, error) { + var uri string + if params.ApplicationID != "" { + uri = fmt.Sprintf( + "/%s/%s/access/apps/%s/policies", + rc.Level, + rc.Identifier, + params.ApplicationID, + ) + } else { + uri = fmt.Sprintf( + "/%s/%s/access/policies", + rc.Level, + rc.Identifier, + ) + } + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return AccessPolicy{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var accessPolicyDetailResponse AccessPolicyDetailResponse + err = json.Unmarshal(res, &accessPolicyDetailResponse) + if err != nil { + return AccessPolicy{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessPolicyDetailResponse.Result, nil +} + +// UpdateAccessPolicy updates an existing access policy. +// +// Account API reference: https://developers.cloudflare.com/api/operations/access-policies-update-an-access-policy +// Zone API reference: https://developers.cloudflare.com/api/operations/zone-level-access-policies-update-an-access-policy +func (api *API) UpdateAccessPolicy(ctx context.Context, rc *ResourceContainer, params UpdateAccessPolicyParams) (AccessPolicy, error) { + if params.PolicyID == "" { + return AccessPolicy{}, fmt.Errorf("access policy ID cannot be empty") + } + + var uri string + if params.ApplicationID != "" { + uri = fmt.Sprintf( + "/%s/%s/access/apps/%s/policies/%s", + rc.Level, + rc.Identifier, + params.ApplicationID, + params.PolicyID, + ) + } else { + uri = fmt.Sprintf( + "/%s/%s/access/policies/%s", + rc.Level, + rc.Identifier, + params.PolicyID, + ) + } + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return AccessPolicy{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var accessPolicyDetailResponse AccessPolicyDetailResponse + err = json.Unmarshal(res, &accessPolicyDetailResponse) + if err != nil { + return AccessPolicy{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessPolicyDetailResponse.Result, nil +} + +// DeleteAccessPolicy deletes an access policy. +// +// Account API reference: https://developers.cloudflare.com/api/operations/access-policies-delete-an-access-policy +// Zone API reference: https://developers.cloudflare.com/api/operations/zone-level-access-policies-delete-an-access-policy +func (api *API) DeleteAccessPolicy(ctx context.Context, rc *ResourceContainer, params DeleteAccessPolicyParams) error { + var uri string + if params.ApplicationID != "" { + uri = fmt.Sprintf( + "/%s/%s/access/apps/%s/policies/%s", + rc.Level, + rc.Identifier, + params.ApplicationID, + params.PolicyID, + ) + } else { + uri = fmt.Sprintf( + "/%s/%s/access/policies/%s", + rc.Level, + rc.Identifier, + params.PolicyID, + ) + } + + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/access_seats.go b/vendor/github.com/cloudflare/cloudflare-go/access_seats.go new file mode 100644 index 0000000..ea44eeb --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/access_seats.go @@ -0,0 +1,111 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +var errMissingAccessSeatUID = errors.New("missing required access seat UID") + +// AccessUpdateAccessUserSeatResult represents a Access User Seat. +type AccessUpdateAccessUserSeatResult struct { + AccessSeat *bool `json:"access_seat"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + GatewaySeat *bool `json:"gateway_seat"` + SeatUID string `json:"seat_uid,omitempty"` +} + +// UpdateAccessUserSeatParams represents the update payload for access seats. +type UpdateAccessUserSeatParams struct { + SeatUID string `json:"seat_uid,omitempty"` + AccessSeat *bool `json:"access_seat"` + GatewaySeat *bool `json:"gateway_seat"` +} + +// UpdateAccessUsersSeatsParams represents the update payload for multiple access seats. +type UpdateAccessUsersSeatsParams []struct { + SeatUID string `json:"seat_uid,omitempty"` + AccessSeat *bool `json:"access_seat"` + GatewaySeat *bool `json:"gateway_seat"` +} + +// AccessUserSeatResponse represents the response from the access user seat endpoints. +type UpdateAccessUserSeatResponse struct { + Response + Result []AccessUpdateAccessUserSeatResult `json:"result"` + ResultInfo `json:"result_info"` +} + +// UpdateAccessUserSeat updates a single Access User Seat. +// +// API documentation: https://developers.cloudflare.com/api/operations/zero-trust-seats-update-a-user-seat +func (api *API) UpdateAccessUserSeat(ctx context.Context, rc *ResourceContainer, params UpdateAccessUserSeatParams) ([]AccessUpdateAccessUserSeatResult, error) { + if rc.Level != AccountRouteLevel { + return []AccessUpdateAccessUserSeatResult{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + if params.SeatUID == "" { + return []AccessUpdateAccessUserSeatResult{}, errMissingAccessSeatUID + } + + uri := fmt.Sprintf( + "/%s/%s/access/seats", + rc.Level, + rc.Identifier, + ) + + // this requests expects an array of params, but this method only accepts a single param + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, []UpdateAccessUserSeatParams{params}) + if err != nil { + return []AccessUpdateAccessUserSeatResult{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var updateAccessUserSeatResponse UpdateAccessUserSeatResponse + err = json.Unmarshal(res, &updateAccessUserSeatResponse) + if err != nil { + return []AccessUpdateAccessUserSeatResult{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return updateAccessUserSeatResponse.Result, nil +} + +// UpdateAccessUsersSeats updates many Access User Seats. +// +// API documentation: https://developers.cloudflare.com/api/operations/zero-trust-seats-update-a-user-seat +func (api *API) UpdateAccessUsersSeats(ctx context.Context, rc *ResourceContainer, params UpdateAccessUsersSeatsParams) ([]AccessUpdateAccessUserSeatResult, error) { + if rc.Level != AccountRouteLevel { + return []AccessUpdateAccessUserSeatResult{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + for _, param := range params { + if param.SeatUID == "" { + return []AccessUpdateAccessUserSeatResult{}, errMissingAccessSeatUID + } + } + + uri := fmt.Sprintf( + "/%s/%s/access/seats", + rc.Level, + rc.Identifier, + ) + + // this requests expects an array of params, but this method only accepts a single param + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, params) + if err != nil { + return []AccessUpdateAccessUserSeatResult{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var updateAccessUserSeatResponse UpdateAccessUserSeatResponse + err = json.Unmarshal(res, &updateAccessUserSeatResponse) + if err != nil { + return []AccessUpdateAccessUserSeatResult{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return updateAccessUserSeatResponse.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/access_service_tokens.go b/vendor/github.com/cloudflare/cloudflare-go/access_service_tokens.go new file mode 100644 index 0000000..5202b24 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/access_service_tokens.go @@ -0,0 +1,257 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +var ( + ErrMissingServiceTokenUUID = errors.New("missing required service token UUID") +) + +// AccessServiceToken represents an Access Service Token. +type AccessServiceToken struct { + ClientID string `json:"client_id"` + CreatedAt *time.Time `json:"created_at"` + ExpiresAt *time.Time `json:"expires_at"` + ID string `json:"id"` + Name string `json:"name"` + UpdatedAt *time.Time `json:"updated_at"` + Duration string `json:"duration,omitempty"` +} + +// AccessServiceTokenUpdateResponse represents the response from the API +// when a new Service Token is updated. This base struct is also used in the +// Create as they are very similar responses. +type AccessServiceTokenUpdateResponse struct { + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + ExpiresAt *time.Time `json:"expires_at"` + ID string `json:"id"` + Name string `json:"name"` + ClientID string `json:"client_id"` + Duration string `json:"duration,omitempty"` +} + +// AccessServiceTokenRefreshResponse represents the response from the API +// when an existing service token is refreshed to last longer. +type AccessServiceTokenRefreshResponse struct { + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + ExpiresAt *time.Time `json:"expires_at"` + ID string `json:"id"` + Name string `json:"name"` + ClientID string `json:"client_id"` + Duration string `json:"duration,omitempty"` +} + +// AccessServiceTokenCreateResponse is the same API response as the Update +// operation with the exception that the `ClientSecret` is present in a +// Create operation. +type AccessServiceTokenCreateResponse struct { + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + ExpiresAt *time.Time `json:"expires_at"` + ID string `json:"id"` + Name string `json:"name"` + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + Duration string `json:"duration,omitempty"` +} + +// AccessServiceTokenRotateResponse is the same API response as the Create +// operation. +type AccessServiceTokenRotateResponse struct { + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + ExpiresAt *time.Time `json:"expires_at"` + ID string `json:"id"` + Name string `json:"name"` + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + Duration string `json:"duration,omitempty"` +} + +// AccessServiceTokensListResponse represents the response from the list +// Access Service Tokens endpoint. +type AccessServiceTokensListResponse struct { + Result []AccessServiceToken `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +// AccessServiceTokensDetailResponse is the API response, containing a single +// Access Service Token. +type AccessServiceTokensDetailResponse struct { + Success bool `json:"success"` + Errors []string `json:"errors"` + Messages []string `json:"messages"` + Result AccessServiceToken `json:"result"` +} + +// AccessServiceTokensCreationDetailResponse is the API response, containing a +// single Access Service Token. +type AccessServiceTokensCreationDetailResponse struct { + Success bool `json:"success"` + Errors []string `json:"errors"` + Messages []string `json:"messages"` + Result AccessServiceTokenCreateResponse `json:"result"` +} + +// AccessServiceTokensUpdateDetailResponse is the API response, containing a +// single Access Service Token. +type AccessServiceTokensUpdateDetailResponse struct { + Success bool `json:"success"` + Errors []string `json:"errors"` + Messages []string `json:"messages"` + Result AccessServiceTokenUpdateResponse `json:"result"` +} + +// AccessServiceTokensRefreshDetailResponse is the API response, containing a +// single Access Service Token. +type AccessServiceTokensRefreshDetailResponse struct { + Success bool `json:"success"` + Errors []string `json:"errors"` + Messages []string `json:"messages"` + Result AccessServiceTokenRefreshResponse `json:"result"` +} + +// AccessServiceTokensRotateSecretDetailResponse is the API response, containing a +// single Access Service Token. +type AccessServiceTokensRotateSecretDetailResponse struct { + Success bool `json:"success"` + Errors []string `json:"errors"` + Messages []string `json:"messages"` + Result AccessServiceTokenRotateResponse `json:"result"` +} + +type ListAccessServiceTokensParams struct{} + +type CreateAccessServiceTokenParams struct { + Name string `json:"name"` + Duration string `json:"duration,omitempty"` +} + +type UpdateAccessServiceTokenParams struct { + Name string `json:"name"` + UUID string `json:"-"` + Duration string `json:"duration,omitempty"` +} + +func (api *API) ListAccessServiceTokens(ctx context.Context, rc *ResourceContainer, params ListAccessServiceTokensParams) ([]AccessServiceToken, ResultInfo, error) { + uri := fmt.Sprintf("/%s/%s/access/service_tokens", rc.Level, rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []AccessServiceToken{}, ResultInfo{}, err + } + + var accessServiceTokensListResponse AccessServiceTokensListResponse + err = json.Unmarshal(res, &accessServiceTokensListResponse) + if err != nil { + return []AccessServiceToken{}, ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessServiceTokensListResponse.Result, accessServiceTokensListResponse.ResultInfo, nil +} + +func (api *API) CreateAccessServiceToken(ctx context.Context, rc *ResourceContainer, params CreateAccessServiceTokenParams) (AccessServiceTokenCreateResponse, error) { + uri := fmt.Sprintf("/%s/%s/access/service_tokens", rc.Level, rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + + if err != nil { + return AccessServiceTokenCreateResponse{}, err + } + + var accessServiceTokenCreation AccessServiceTokensCreationDetailResponse + err = json.Unmarshal(res, &accessServiceTokenCreation) + if err != nil { + return AccessServiceTokenCreateResponse{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessServiceTokenCreation.Result, nil +} + +func (api *API) UpdateAccessServiceToken(ctx context.Context, rc *ResourceContainer, params UpdateAccessServiceTokenParams) (AccessServiceTokenUpdateResponse, error) { + if params.UUID == "" { + return AccessServiceTokenUpdateResponse{}, ErrMissingServiceTokenUUID + } + + uri := fmt.Sprintf("/%s/%s/access/service_tokens/%s", rc.Level, rc.Identifier, params.UUID) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return AccessServiceTokenUpdateResponse{}, err + } + + var accessServiceTokenUpdate AccessServiceTokensUpdateDetailResponse + err = json.Unmarshal(res, &accessServiceTokenUpdate) + if err != nil { + return AccessServiceTokenUpdateResponse{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessServiceTokenUpdate.Result, nil +} + +func (api *API) DeleteAccessServiceToken(ctx context.Context, rc *ResourceContainer, uuid string) (AccessServiceTokenUpdateResponse, error) { + uri := fmt.Sprintf("/%s/%s/access/service_tokens/%s", rc.Level, rc.Identifier, uuid) + + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return AccessServiceTokenUpdateResponse{}, err + } + + var accessServiceTokenUpdate AccessServiceTokensUpdateDetailResponse + err = json.Unmarshal(res, &accessServiceTokenUpdate) + if err != nil { + return AccessServiceTokenUpdateResponse{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessServiceTokenUpdate.Result, nil +} + +// RefreshAccessServiceToken updates the expiry of an Access Service Token +// in place. +// +// API reference: https://api.cloudflare.com/#access-service-tokens-refresh-a-service-token +func (api *API) RefreshAccessServiceToken(ctx context.Context, rc *ResourceContainer, id string) (AccessServiceTokenRefreshResponse, error) { + uri := fmt.Sprintf("/%s/%s/access/service_tokens/%s/refresh", rc.Level, rc.Identifier, id) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, nil) + if err != nil { + return AccessServiceTokenRefreshResponse{}, err + } + + var accessServiceTokenRefresh AccessServiceTokensRefreshDetailResponse + err = json.Unmarshal(res, &accessServiceTokenRefresh) + if err != nil { + return AccessServiceTokenRefreshResponse{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessServiceTokenRefresh.Result, nil +} + +// RotateAccessServiceToken rotates the client secret of an Access Service +// Token in place. +// API reference: https://api.cloudflare.com/#access-service-tokens-rotate-a-service-token +func (api *API) RotateAccessServiceToken(ctx context.Context, rc *ResourceContainer, id string) (AccessServiceTokenRotateResponse, error) { + uri := fmt.Sprintf("/%s/%s/access/service_tokens/%s/rotate", rc.Level, rc.Identifier, id) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, nil) + if err != nil { + return AccessServiceTokenRotateResponse{}, err + } + + var accessServiceTokenRotate AccessServiceTokensRotateSecretDetailResponse + err = json.Unmarshal(res, &accessServiceTokenRotate) + if err != nil { + return AccessServiceTokenRotateResponse{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accessServiceTokenRotate.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/access_tag.go b/vendor/github.com/cloudflare/cloudflare-go/access_tag.go new file mode 100644 index 0000000..9bba613 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/access_tag.go @@ -0,0 +1,85 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +type AccessTag struct { + Name string `json:"name,omitempty"` + AppCount int `json:"app_count,omitempty"` +} + +type AccessTagListResponse struct { + Response + Result []AccessTag `json:"result"` + ResultInfo `json:"result_info"` +} + +type AccessTagResponse struct { + Response + Result AccessTag `json:"result"` +} + +type ListAccessTagsParams struct{} + +type CreateAccessTagParams struct { + Name string `json:"name,omitempty"` +} + +func (api *API) ListAccessTags(ctx context.Context, rc *ResourceContainer, params ListAccessTagsParams) ([]AccessTag, error) { + uri := buildURI(fmt.Sprintf("/%s/%s/access/tags", rc.Level, rc.Identifier), params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []AccessTag{}, err + } + + var TagsResponse AccessTagListResponse + err = json.Unmarshal(res, &TagsResponse) + if err != nil { + return []AccessTag{}, err + } + return TagsResponse.Result, nil +} + +func (api *API) GetAccessTag(ctx context.Context, rc *ResourceContainer, tagName string) (AccessTag, error) { + uri := fmt.Sprintf("/%s/%s/access/tags/%s", rc.Level, rc.Identifier, tagName) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return AccessTag{}, err + } + + var TagResponse AccessTagResponse + err = json.Unmarshal(res, &TagResponse) + if err != nil { + return AccessTag{}, err + } + return TagResponse.Result, nil +} + +func (api *API) CreateAccessTag(ctx context.Context, rc *ResourceContainer, params CreateAccessTagParams) (AccessTag, error) { + uri := fmt.Sprintf("/%s/%s/access/tags", rc.Level, rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return AccessTag{}, err + } + + var TagResponse AccessTagResponse + err = json.Unmarshal(res, &TagResponse) + if err != nil { + return AccessTag{}, err + } + return TagResponse.Result, nil +} + +func (api *API) DeleteAccessTag(ctx context.Context, rc *ResourceContainer, tagName string) error { + uri := fmt.Sprintf("/%s/%s/access/tags/%s", rc.Level, rc.Identifier, tagName) + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/access_user_tokens.go b/vendor/github.com/cloudflare/cloudflare-go/access_user_tokens.go new file mode 100644 index 0000000..ba32d7f --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/access_user_tokens.go @@ -0,0 +1,24 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" +) + +type RevokeAccessUserTokensParams struct { + Email string `json:"email"` +} + +// RevokeAccessUserTokens revokes any outstanding tokens issued for a specific user +// Access User. +func (api *API) RevokeAccessUserTokens(ctx context.Context, rc *ResourceContainer, params RevokeAccessUserTokensParams) error { + uri := fmt.Sprintf("/%s/%s/access/organizations/revoke_user", rc.Level, rc.Identifier) + + _, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/access_users.go b/vendor/github.com/cloudflare/cloudflare-go/access_users.go new file mode 100644 index 0000000..233ceb2 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/access_users.go @@ -0,0 +1,362 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +type AccessUserActiveSessionsResponse struct { + Result []AccessUserActiveSessionResult `json:"result"` + ResultInfo `json:"result_info"` + Response +} + +type AccessUserActiveSessionResult struct { + Expiration int64 `json:"expiration"` + Metadata AccessUserActiveSessionMetadata `json:"metadata"` + Name string `json:"name"` +} + +type AccessUserActiveSessionMetadata struct { + Apps map[string]AccessUserActiveSessionMetadataApp `json:"apps"` + Expires int64 `json:"expires"` + IAT int64 `json:"iat"` + Nonce string `json:"nonce"` + TTL int64 `json:"ttl"` +} + +type AccessUserActiveSessionMetadataApp struct { + Hostname string `json:"hostname"` + Name string `json:"name"` + Type string `json:"type"` + UID string `json:"uid"` +} + +type AccessUserDevicePosture struct { + Check AccessUserDevicePostureCheck `json:"check"` + Data map[string]interface{} `json:"data"` + Description string `json:"description"` + Error string `json:"error"` + ID string `json:"id"` + RuleName string `json:"rule_name"` + Success *bool `json:"success"` + Timestamp string `json:"timestamp"` + Type string `json:"type"` +} + +type AccessUserDeviceSession struct { + LastAuthenticated int64 `json:"last_authenticated"` +} + +type AccessUserFailedLoginsResponse struct { + Result []AccessUserFailedLoginResult `json:"result"` + ResultInfo `json:"result_info"` + Response +} + +type AccessUserFailedLoginResult struct { + Expiration int64 `json:"expiration"` + Metadata AccessUserFailedLoginMetadata `json:"metadata"` +} + +type AccessUserFailedLoginMetadata struct { + AppName string `json:"app_name"` + Aud string `json:"aud"` + Datetime string `json:"datetime"` + RayID string `json:"ray_id"` + UserEmail string `json:"user_email"` + UserUUID string `json:"user_uuid"` +} + +type AccessUserLastSeenIdentityResponse struct { + Result AccessUserLastSeenIdentityResult `json:"result"` + ResultInfo ResultInfo `json:"result_info"` + Response +} + +type AccessUserLastSeenIdentityResult struct { + AccountID string `json:"account_id"` + AuthStatus string `json:"auth_status"` + CommonName string `json:"common_name"` + DeviceID string `json:"device_id"` + DevicePosture map[string]AccessUserDevicePosture `json:"devicePosture"` + DeviceSessions map[string]AccessUserDeviceSession `json:"device_sessions"` + Email string `json:"email"` + Geo AccessUserIdentityGeo `json:"geo"` + IAT int64 `json:"iat"` + IDP AccessUserIDP `json:"idp"` + IP string `json:"ip"` + IsGateway *bool `json:"is_gateway"` + IsWarp *bool `json:"is_warp"` + MtlsAuth AccessUserMTLSAuth `json:"mtls_auth"` + ServiceTokenID string `json:"service_token_id"` + ServiceTokenStatus *bool `json:"service_token_status"` + UserUUID string `json:"user_uuid"` + Version int `json:"version"` +} + +type AccessUserLastSeenIdentitySessionResponse struct { + Result GetAccessUserLastSeenIdentityResult `json:"result"` + ResultInfo ResultInfo `json:"result_info"` + Response +} + +type GetAccessUserLastSeenIdentityResult struct { + AccountID string `json:"account_id"` + AuthStatus string `json:"auth_status"` + CommonName string `json:"common_name"` + DevicePosture map[string]AccessUserDevicePosture `json:"devicePosture"` + DeviceSessions map[string]AccessUserDeviceSession `json:"device_sessions"` + DeviceID string `json:"device_id"` + Email string `json:"email"` + Geo AccessUserIdentityGeo `json:"geo"` + IAT int64 `json:"iat"` + IDP AccessUserIDP `json:"idp"` + IP string `json:"ip"` + IsGateway *bool `json:"is_gateway"` + IsWarp *bool `json:"is_warp"` + MtlsAuth AccessUserMTLSAuth `json:"mtls_auth"` + ServiceTokenID string `json:"service_token_id"` + ServiceTokenStatus *bool `json:"service_token_status"` + UserUUID string `json:"user_uuid"` + Version int `json:"version"` +} + +type AccessUserDevicePostureCheck struct { + Exists *bool `json:"exists"` + Path string `json:"path"` +} + +type AccessUserIdentityGeo struct { + Country string `json:"country"` +} + +type AccessUserIDP struct { + ID string `json:"id"` + Type string `json:"type"` +} + +type AccessUserMTLSAuth struct { + AuthStatus string `json:"auth_status"` + CertIssuerDN string `json:"cert_issuer_dn"` + CertIssuerSKI string `json:"cert_issuer_ski"` + CertPresented *bool `json:"cert_presented"` + CertSerial string `json:"cert_serial"` +} + +type AccessUserListResponse struct { + Result []AccessUser `json:"result"` + ResultInfo `json:"result_info"` + Response +} + +type AccessUser struct { + ID string `json:"id"` + AccessSeat *bool `json:"access_seat"` + ActiveDeviceCount int `json:"active_device_count"` + CreatedAt string `json:"created_at"` + Email string `json:"email"` + GatewaySeat *bool `json:"gateway_seat"` + LastSuccessfulLogin string `json:"last_successful_login"` + Name string `json:"name"` + SeatUID string `json:"seat_uid"` + UID string `json:"uid"` + UpdatedAt string `json:"updated_at"` +} + +type AccessUserParams struct { + ResultInfo +} + +type GetAccessUserSingleActiveSessionResponse struct { + Result GetAccessUserSingleActiveSessionResult `json:"result"` + ResultInfo `json:"result_info"` + Response +} + +type GetAccessUserSingleActiveSessionResult struct { + AccountID string `json:"account_id"` + AuthStatus string `json:"auth_status"` + CommonName string `json:"common_name"` + DevicePosture map[string]AccessUserDevicePosture `json:"devicePosture"` + DeviceSessions map[string]AccessUserDeviceSession `json:"device_sessions"` + DeviceID string `json:"device_id"` + Email string `json:"email"` + Geo AccessUserIdentityGeo `json:"geo"` + IAT int64 `json:"iat"` + IDP AccessUserIDP `json:"idp"` + IP string `json:"ip"` + IsGateway *bool `json:"is_gateway"` + IsWarp *bool `json:"is_warp"` + MtlsAuth AccessUserMTLSAuth `json:"mtls_auth"` + ServiceTokenID string `json:"service_token_id"` + ServiceTokenStatus *bool `json:"service_token_status"` + UserUUID string `json:"user_uuid"` + Version int `json:"version"` + IsActive *bool `json:"isActive"` +} + +// ListAccessUsers returns a list of users for a single cloudflare access/zerotrust account. +// +// API documentation: https://developers.cloudflare.com/api/operations/zero-trust-users-get-users +func (api *API) ListAccessUsers(ctx context.Context, rc *ResourceContainer, params AccessUserParams) ([]AccessUser, *ResultInfo, error) { + if rc.Level != AccountRouteLevel { + return []AccessUser{}, &ResultInfo{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + baseURL := fmt.Sprintf("/%s/%s/access/users", rc.Level, rc.Identifier) + + autoPaginate := true + if params.PerPage >= 1 || params.Page >= 1 { + autoPaginate = false + } + + if params.PerPage < 1 { + params.PerPage = 25 + } + + if params.Page < 1 { + params.Page = 1 + } + + var accessUsers []AccessUser + var resultInfo *ResultInfo = nil + + for { + uri := buildURI(baseURL, params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []AccessUser{}, &ResultInfo{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + var r AccessUserListResponse + resultInfo = &r.ResultInfo + + err = json.Unmarshal(res, &r) + if err != nil { + return []AccessUser{}, &ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + accessUsers = append(accessUsers, r.Result...) + params.ResultInfo = r.ResultInfo.Next() + if params.ResultInfo.Done() || !autoPaginate { + break + } + } + + return accessUsers, resultInfo, nil +} + +// GetAccessUserActiveSessions returns a list of active sessions for an user. +// +// API documentation: https://developers.cloudflare.com/api/operations/zero-trust-users-get-active-sessions +func (api *API) GetAccessUserActiveSessions(ctx context.Context, rc *ResourceContainer, userID string) ([]AccessUserActiveSessionResult, error) { + if rc.Level != AccountRouteLevel { + return []AccessUserActiveSessionResult{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + uri := fmt.Sprintf( + "/%s/%s/access/users/%s/active_sessions", + rc.Level, + rc.Identifier, + userID, + ) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []AccessUserActiveSessionResult{}, err + } + + var accessUserActiveSessionsResponse AccessUserActiveSessionsResponse + err = json.Unmarshal(res, &accessUserActiveSessionsResponse) + if err != nil { + return []AccessUserActiveSessionResult{}, err + } + return accessUserActiveSessionsResponse.Result, nil +} + +// GetAccessUserSingleActiveSession returns a single active session for a user. +// +// API documentation: https://developers.cloudflare.com/api/operations/zero-trust-users-get-active-session +func (api *API) GetAccessUserSingleActiveSession(ctx context.Context, rc *ResourceContainer, userID string, sessionID string) (GetAccessUserSingleActiveSessionResult, error) { + if rc.Level != AccountRouteLevel { + return GetAccessUserSingleActiveSessionResult{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + uri := fmt.Sprintf( + "/%s/%s/access/users/%s/active_sessions/%s", + rc.Level, + rc.Identifier, + userID, + sessionID, + ) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return GetAccessUserSingleActiveSessionResult{}, err + } + + var accessUserActiveSingleSessionsResponse GetAccessUserSingleActiveSessionResponse + err = json.Unmarshal(res, &accessUserActiveSingleSessionsResponse) + if err != nil { + return GetAccessUserSingleActiveSessionResult{}, err + } + return accessUserActiveSingleSessionsResponse.Result, nil +} + +// GetAccessUserFailedLogins returns a list of failed logins for a user. +// +// API documentation: https://developers.cloudflare.com/api/operations/zero-trust-users-get-failed-logins +func (api *API) GetAccessUserFailedLogins(ctx context.Context, rc *ResourceContainer, userID string) ([]AccessUserFailedLoginResult, error) { + if rc.Level != AccountRouteLevel { + return []AccessUserFailedLoginResult{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + uri := fmt.Sprintf( + "/%s/%s/access/users/%s/failed_logins", + rc.Level, + rc.Identifier, + userID, + ) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []AccessUserFailedLoginResult{}, err + } + + var accessUserFailedLoginsResponse AccessUserFailedLoginsResponse + err = json.Unmarshal(res, &accessUserFailedLoginsResponse) + if err != nil { + return []AccessUserFailedLoginResult{}, err + } + return accessUserFailedLoginsResponse.Result, nil +} + +// GetAccessUserLastSeenIdentity returns the last seen identity for a user. +// +// API documentation: https://developers.cloudflare.com/api/operations/zero-trust-users-get-last-seen-identity +func (api *API) GetAccessUserLastSeenIdentity(ctx context.Context, rc *ResourceContainer, userID string) (GetAccessUserLastSeenIdentityResult, error) { + if rc.Level != AccountRouteLevel { + return GetAccessUserLastSeenIdentityResult{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + uri := fmt.Sprintf( + "/%s/%s/access/users/%s/last_seen_identity", + rc.Level, + rc.Identifier, + userID, + ) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return GetAccessUserLastSeenIdentityResult{}, err + } + + var accessUserLastSeenIdentityResponse AccessUserLastSeenIdentitySessionResponse + err = json.Unmarshal(res, &accessUserLastSeenIdentityResponse) + if err != nil { + return GetAccessUserLastSeenIdentityResult{}, err + } + return accessUserLastSeenIdentityResponse.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/account_members.go b/vendor/github.com/cloudflare/cloudflare-go/account_members.go new file mode 100644 index 0000000..d6ecab5 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/account_members.go @@ -0,0 +1,245 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +// AccountMember is the definition of a member of an account. +type AccountMember struct { + ID string `json:"id"` + Code string `json:"code"` + User AccountMemberUserDetails `json:"user"` + Status string `json:"status"` + Roles []AccountRole `json:"roles,omitempty"` + Policies []Policy `json:"policies,omitempty"` +} + +// AccountMemberUserDetails outlines all the personal information about +// a member. +type AccountMemberUserDetails struct { + ID string `json:"id"` + FirstName string `json:"first_name"` + LastName string `json:"last_name"` + Email string `json:"email"` + TwoFactorAuthenticationEnabled bool `json:"two_factor_authentication_enabled"` +} + +// AccountMembersListResponse represents the response from the list +// account members endpoint. +type AccountMembersListResponse struct { + Result []AccountMember `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +// AccountMemberDetailResponse is the API response, containing a single +// account member. +type AccountMemberDetailResponse struct { + Success bool `json:"success"` + Errors []string `json:"errors"` + Messages []string `json:"messages"` + Result AccountMember `json:"result"` +} + +// AccountMemberInvitation represents the invitation for a new member to +// the account. +type AccountMemberInvitation struct { + Email string `json:"email"` + Roles []string `json:"roles,omitempty"` + Policies []Policy `json:"policies,omitempty"` + Status string `json:"status,omitempty"` +} + +const errMissingMemberRolesOrPolicies = "account member must be created with roles or policies (not both)" + +var ErrMissingMemberRolesOrPolicies = errors.New(errMissingMemberRolesOrPolicies) + +type CreateAccountMemberParams struct { + EmailAddress string + Roles []string + Policies []Policy + Status string +} + +// AccountMembers returns all members of an account. +// +// API reference: https://api.cloudflare.com/#accounts-list-accounts +func (api *API) AccountMembers(ctx context.Context, accountID string, pageOpts PaginationOptions) ([]AccountMember, ResultInfo, error) { + if accountID == "" { + return []AccountMember{}, ResultInfo{}, ErrMissingAccountID + } + + uri := buildURI(fmt.Sprintf("/accounts/%s/members", accountID), pageOpts) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []AccountMember{}, ResultInfo{}, err + } + + var accountMemberListresponse AccountMembersListResponse + err = json.Unmarshal(res, &accountMemberListresponse) + if err != nil { + return []AccountMember{}, ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accountMemberListresponse.Result, accountMemberListresponse.ResultInfo, nil +} + +// CreateAccountMemberWithStatus invites a new member to join an account, allowing setting the status. +// +// Refer to the API reference for valid statuses. +// +// Deprecated: Use `CreateAccountMember` with a `Status` field instead. +// +// API reference: https://api.cloudflare.com/#account-members-add-member +func (api *API) CreateAccountMemberWithStatus(ctx context.Context, accountID string, emailAddress string, roles []string, status string) (AccountMember, error) { + return api.CreateAccountMember(ctx, AccountIdentifier(accountID), CreateAccountMemberParams{ + EmailAddress: emailAddress, + Roles: roles, + Status: status, + }) +} + +// CreateAccountMember invites a new member to join an account with roles. +// The member will be placed into "pending" status and receive an email confirmation. +// NOTE: If you are currently enrolled in Domain Scoped Roles, your roles will +// be converted to policies upon member invitation. +// +// API reference: https://api.cloudflare.com/#account-members-add-member +func (api *API) CreateAccountMember(ctx context.Context, rc *ResourceContainer, params CreateAccountMemberParams) (AccountMember, error) { + if rc.Level != AccountRouteLevel { + return AccountMember{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + if rc.Identifier == "" { + return AccountMember{}, ErrMissingAccountID + } + + invite := AccountMemberInvitation{ + Email: params.EmailAddress, + Status: params.Status, + } + + roles := []AccountRole{} + for i := 0; i < len(params.Roles); i++ { + roles = append(roles, AccountRole{ID: params.Roles[i]}) + } + err := validateRolesAndPolicies(roles, params.Policies) + if err != nil { + return AccountMember{}, err + } + + if params.Roles != nil { + invite.Roles = params.Roles + } else if params.Policies != nil { + invite.Policies = params.Policies + } + + uri := fmt.Sprintf("/accounts/%s/members", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, invite) + if err != nil { + return AccountMember{}, err + } + + var accountMemberListResponse AccountMemberDetailResponse + err = json.Unmarshal(res, &accountMemberListResponse) + if err != nil { + return AccountMember{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accountMemberListResponse.Result, nil +} + +// DeleteAccountMember removes a member from an account. +// +// API reference: https://api.cloudflare.com/#account-members-remove-member +func (api *API) DeleteAccountMember(ctx context.Context, accountID string, userID string) error { + if accountID == "" { + return ErrMissingAccountID + } + + uri := fmt.Sprintf("/accounts/%s/members/%s", accountID, userID) + + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + return nil +} + +// UpdateAccountMember modifies an existing account member. +// +// API reference: https://api.cloudflare.com/#account-members-update-member +func (api *API) UpdateAccountMember(ctx context.Context, accountID string, userID string, member AccountMember) (AccountMember, error) { + if accountID == "" { + return AccountMember{}, ErrMissingAccountID + } + + err := validateRolesAndPolicies(member.Roles, member.Policies) + if err != nil { + return AccountMember{}, err + } + + uri := fmt.Sprintf("/accounts/%s/members/%s", accountID, userID) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, member) + if err != nil { + return AccountMember{}, err + } + + var accountMemberListResponse AccountMemberDetailResponse + err = json.Unmarshal(res, &accountMemberListResponse) + if err != nil { + return AccountMember{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accountMemberListResponse.Result, nil +} + +// AccountMember returns details of a single account member. +// +// API reference: https://api.cloudflare.com/#account-members-member-details +func (api *API) AccountMember(ctx context.Context, accountID string, memberID string) (AccountMember, error) { + if accountID == "" { + return AccountMember{}, ErrMissingAccountID + } + + uri := fmt.Sprintf( + "/accounts/%s/members/%s", + accountID, + memberID, + ) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return AccountMember{}, err + } + + var accountMemberResponse AccountMemberDetailResponse + err = json.Unmarshal(res, &accountMemberResponse) + if err != nil { + return AccountMember{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accountMemberResponse.Result, nil +} + +// validateRolesAndPolicies ensures either roles or policies are provided in +// CreateAccountMember requests, but not both. +func validateRolesAndPolicies(roles []AccountRole, policies []Policy) error { + hasRoles := len(roles) > 0 + hasPolicies := len(policies) > 0 + hasRolesOrPolicies := hasRoles || hasPolicies + hasRolesAndPolicies := hasRoles && hasPolicies + hasCorrectPermissions := hasRolesOrPolicies && !hasRolesAndPolicies + if !hasCorrectPermissions { + return ErrMissingMemberRolesOrPolicies + } + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/account_roles.go b/vendor/github.com/cloudflare/cloudflare-go/account_roles.go new file mode 100644 index 0000000..d216ae3 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/account_roles.go @@ -0,0 +1,111 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +// AccountRole defines the roles that a member can have attached. +type AccountRole struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Permissions map[string]AccountRolePermission `json:"permissions"` +} + +// AccountRolePermission is the shared structure for all permissions +// that can be assigned to a member. +type AccountRolePermission struct { + Read bool `json:"read"` + Edit bool `json:"edit"` +} + +// AccountRolesListResponse represents the list response from the +// account roles. +type AccountRolesListResponse struct { + Result []AccountRole `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +// AccountRoleDetailResponse is the API response, containing a single +// account role. +type AccountRoleDetailResponse struct { + Success bool `json:"success"` + Errors []string `json:"errors"` + Messages []string `json:"messages"` + Result AccountRole `json:"result"` +} + +type ListAccountRolesParams struct { + ResultInfo +} + +// ListAccountRoles returns all roles of an account. +// +// API reference: https://developers.cloudflare.com/api/operations/account-roles-list-roles +func (api *API) ListAccountRoles(ctx context.Context, rc *ResourceContainer, params ListAccountRolesParams) ([]AccountRole, error) { + if rc.Identifier == "" { + return []AccountRole{}, ErrMissingAccountID + } + autoPaginate := true + if params.PerPage >= 1 || params.Page >= 1 { + autoPaginate = false + } + + if params.PerPage < 1 { + params.PerPage = 25 + } + + if params.Page < 1 { + params.Page = 1 + } + var roles []AccountRole + var r AccountRolesListResponse + for { + uri := buildURI(fmt.Sprintf("/accounts/%s/roles", rc.Identifier), params) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []AccountRole{}, err + } + + err = json.Unmarshal(res, &r) + if err != nil { + return []AccountRole{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + roles = append(roles, r.Result...) + params.ResultInfo = r.ResultInfo.Next() + if params.ResultInfo.Done() || !autoPaginate { + break + } + } + + return roles, nil +} + +// GetAccountRole returns the details of a single account role. +// +// API reference: https://developers.cloudflare.com/api/operations/account-roles-role-details +func (api *API) GetAccountRole(ctx context.Context, rc *ResourceContainer, roleID string) (AccountRole, error) { + if rc.Identifier == "" { + return AccountRole{}, ErrMissingAccountID + } + uri := fmt.Sprintf("/accounts/%s/roles/%s", rc.Identifier, roleID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return AccountRole{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var accountRole AccountRoleDetailResponse + err = json.Unmarshal(res, &accountRole) + if err != nil { + return AccountRole{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accountRole.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/accounts.go b/vendor/github.com/cloudflare/cloudflare-go/accounts.go new file mode 100644 index 0000000..aee3c6a --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/accounts.go @@ -0,0 +1,151 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// AccountSettings outlines the available options for an account. +type AccountSettings struct { + EnforceTwoFactor bool `json:"enforce_twofactor"` +} + +// Account represents the root object that owns resources. +type Account struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + CreatedOn time.Time `json:"created_on,omitempty"` + Settings *AccountSettings `json:"settings,omitempty"` +} + +// AccountResponse represents the response from the accounts endpoint for a +// single account ID. +type AccountResponse struct { + Result Account `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +// AccountListResponse represents the response from the list accounts endpoint. +type AccountListResponse struct { + Result []Account `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +// AccountDetailResponse is the API response, containing a single Account. +type AccountDetailResponse struct { + Success bool `json:"success"` + Errors []string `json:"errors"` + Messages []string `json:"messages"` + Result Account `json:"result"` +} + +// AccountsListParams holds the filterable options for Accounts. +type AccountsListParams struct { + Name string `url:"name,omitempty"` + + PaginationOptions +} + +// Accounts returns all accounts the logged in user has access to. +// +// API reference: https://api.cloudflare.com/#accounts-list-accounts +func (api *API) Accounts(ctx context.Context, params AccountsListParams) ([]Account, ResultInfo, error) { + res, err := api.makeRequestContext(ctx, http.MethodGet, buildURI("/accounts", params), nil) + if err != nil { + return []Account{}, ResultInfo{}, err + } + + var accListResponse AccountListResponse + err = json.Unmarshal(res, &accListResponse) + if err != nil { + return []Account{}, ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return accListResponse.Result, accListResponse.ResultInfo, nil +} + +// Account returns a single account based on the ID. +// +// API reference: https://api.cloudflare.com/#accounts-account-details +func (api *API) Account(ctx context.Context, accountID string) (Account, ResultInfo, error) { + uri := fmt.Sprintf("/accounts/%s", accountID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return Account{}, ResultInfo{}, err + } + + var accResponse AccountResponse + err = json.Unmarshal(res, &accResponse) + if err != nil { + return Account{}, ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return accResponse.Result, accResponse.ResultInfo, nil +} + +// UpdateAccount allows management of an account using the account ID. +// +// API reference: https://api.cloudflare.com/#accounts-update-account +func (api *API) UpdateAccount(ctx context.Context, accountID string, account Account) (Account, error) { + uri := fmt.Sprintf("/accounts/%s", accountID) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, account) + if err != nil { + return Account{}, err + } + + var a AccountDetailResponse + err = json.Unmarshal(res, &a) + if err != nil { + return Account{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return a.Result, nil +} + +// CreateAccount creates a new account. Note: This requires the Tenant +// entitlement. +// +// API reference: https://developers.cloudflare.com/tenant/tutorial/provisioning-resources#creating-an-account +func (api *API) CreateAccount(ctx context.Context, account Account) (Account, error) { + uri := "/accounts" + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, account) + if err != nil { + return Account{}, err + } + + var a AccountDetailResponse + err = json.Unmarshal(res, &a) + if err != nil { + return Account{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return a.Result, nil +} + +// DeleteAccount removes an account. Note: This requires the Tenant +// entitlement. +// +// API reference: https://developers.cloudflare.com/tenant/tutorial/provisioning-resources#optional-deleting-accounts +func (api *API) DeleteAccount(ctx context.Context, accountID string) error { + if accountID == "" { + return ErrMissingAccountID + } + + uri := fmt.Sprintf("/accounts/%s", accountID) + + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/addressing_address_map.go b/vendor/github.com/cloudflare/cloudflare-go/addressing_address_map.go new file mode 100644 index 0000000..0109e79 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/addressing_address_map.go @@ -0,0 +1,285 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// AddressMap contains information about an address map. +type AddressMap struct { + ID string `json:"id"` + Description *string `json:"description,omitempty"` + DefaultSNI *string `json:"default_sni"` + Enabled *bool `json:"enabled"` + Deletable *bool `json:"can_delete"` + CanModifyIPs *bool `json:"can_modify_ips"` + Memberships []AddressMapMembership `json:"memberships"` + IPs []AddressMapIP `json:"ips"` + CreatedAt time.Time `json:"created_at"` + ModifiedAt time.Time `json:"modified_at"` +} + +type AddressMapIP struct { + IP string `json:"ip"` + CreatedAt time.Time `json:"created_at"` +} + +type AddressMapMembershipContainer struct { + Identifier string `json:"identifier"` + Kind AddressMapMembershipKind `json:"kind"` +} + +type AddressMapMembership struct { + Identifier string `json:"identifier"` + Kind AddressMapMembershipKind `json:"kind"` + Deletable *bool `json:"can_delete"` + CreatedAt time.Time `json:"created_at"` +} + +func (ammb *AddressMapMembershipContainer) URLFragment() string { + switch ammb.Kind { + case AddressMapMembershipAccount: + return fmt.Sprintf("accounts/%s", ammb.Identifier) + case AddressMapMembershipZone: + return fmt.Sprintf("zones/%s", ammb.Identifier) + default: + return fmt.Sprintf("%s/%s", ammb.Kind, ammb.Identifier) + } +} + +type AddressMapMembershipKind string + +const ( + AddressMapMembershipZone AddressMapMembershipKind = "zone" + AddressMapMembershipAccount AddressMapMembershipKind = "account" +) + +// ListAddressMapResponse contains a slice of address maps. +type ListAddressMapResponse struct { + Response + Result []AddressMap `json:"result"` +} + +// GetAddressMapResponse contains a specific address map's API Response. +type GetAddressMapResponse struct { + Response + Result AddressMap `json:"result"` +} + +// CreateAddressMapParams contains information about an address map to be created. +type CreateAddressMapParams struct { + Description *string `json:"description"` + Enabled *bool `json:"enabled"` + IPs []string `json:"ips"` + Memberships []AddressMapMembershipContainer `json:"memberships"` +} + +// UpdateAddressMapParams contains information about an address map to be updated. +type UpdateAddressMapParams struct { + ID string `json:"-"` + Description *string `json:"description,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + DefaultSNI *string `json:"default_sni,omitempty"` +} + +// AddressMapFilter contains filter parameters for finding a list of address maps. +type ListAddressMapsParams struct { + IP *string `url:"ip,omitempty"` + CIDR *string `url:"cidr,omitempty"` +} + +// CreateIPAddressToAddressMapParams contains request parameters to add/remove IP address to/from an address map. +type CreateIPAddressToAddressMapParams struct { + // ID represents the target address map for adding the IP address. + ID string + // The IP address. + IP string +} + +// CreateMembershipToAddressMapParams contains request parameters to add/remove membership from an address map. +type CreateMembershipToAddressMapParams struct { + // ID represents the target address map for adding the membershp. + ID string + Membership AddressMapMembershipContainer +} + +type DeleteMembershipFromAddressMapParams struct { + // ID represents the target address map for removing the IP address. + ID string + Membership AddressMapMembershipContainer +} + +type DeleteIPAddressFromAddressMapParams struct { + // ID represents the target address map for adding the membershp. + ID string + IP string +} + +// ListAddressMaps lists all address maps owned by the account. +// +// API reference: https://developers.cloudflare.com/api/operations/ip-address-management-address-maps-list-address-maps +func (api *API) ListAddressMaps(ctx context.Context, rc *ResourceContainer, params ListAddressMapsParams) ([]AddressMap, error) { + if rc.Level != AccountRouteLevel { + return []AddressMap{}, ErrRequiredAccountLevelResourceContainer + } + + uri := buildURI(fmt.Sprintf("/%s/addressing/address_maps", rc.URLFragment()), params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []AddressMap{}, err + } + + result := ListAddressMapResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return []AddressMap{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// CreateAddressMap creates a new address map under the account. +// +// API reference: https://developers.cloudflare.com/api/operations/ip-address-management-address-maps-create-address-map +func (api *API) CreateAddressMap(ctx context.Context, rc *ResourceContainer, params CreateAddressMapParams) (AddressMap, error) { + if rc.Level != AccountRouteLevel { + return AddressMap{}, ErrRequiredAccountLevelResourceContainer + } + + uri := fmt.Sprintf("/%s/addressing/address_maps", rc.URLFragment()) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return AddressMap{}, err + } + + result := GetAddressMapResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return AddressMap{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// GetAddressMap returns a specific address map. +// +// API reference: https://developers.cloudflare.com/api/operations/ip-address-management-address-maps-address-map-details +func (api *API) GetAddressMap(ctx context.Context, rc *ResourceContainer, id string) (AddressMap, error) { + if rc.Level != AccountRouteLevel { + return AddressMap{}, ErrRequiredAccountLevelResourceContainer + } + + uri := fmt.Sprintf("/%s/addressing/address_maps/%s", rc.URLFragment(), id) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return AddressMap{}, err + } + + result := GetAddressMapResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return AddressMap{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// UpdateAddressMap modifies properties of an address map owned by the account. +// +// API reference: https://developers.cloudflare.com/api/operations/ip-address-management-address-maps-update-address-map +func (api *API) UpdateAddressMap(ctx context.Context, rc *ResourceContainer, params UpdateAddressMapParams) (AddressMap, error) { + if rc.Level != AccountRouteLevel { + return AddressMap{}, ErrRequiredAccountLevelResourceContainer + } + + uri := fmt.Sprintf("/%s/addressing/address_maps/%s", rc.URLFragment(), params.ID) + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, params) + if err != nil { + return AddressMap{}, err + } + + result := GetAddressMapResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return AddressMap{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// DeleteAddressMap deletes a particular address map owned by the account. +// +// API reference: https://developers.cloudflare.com/api/operations/ip-address-management-address-maps-delete-address-map +func (api *API) DeleteAddressMap(ctx context.Context, rc *ResourceContainer, id string) error { + if rc.Level != AccountRouteLevel { + return ErrRequiredAccountLevelResourceContainer + } + + uri := fmt.Sprintf("/%s/addressing/address_maps/%s", rc.URLFragment(), id) + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + return err +} + +// CreateIPAddressToAddressMap adds an IP address from a prefix owned by the account to a particular address map. +// +// API reference: https://developers.cloudflare.com/api/operations/ip-address-management-address-maps-add-an-ip-to-an-address-map +func (api *API) CreateIPAddressToAddressMap(ctx context.Context, rc *ResourceContainer, params CreateIPAddressToAddressMapParams) error { + if rc.Level != AccountRouteLevel { + return ErrRequiredAccountLevelResourceContainer + } + + uri := fmt.Sprintf("/%s/addressing/address_maps/%s/ips/%s", rc.URLFragment(), params.ID, params.IP) + _, err := api.makeRequestContext(ctx, http.MethodPut, uri, nil) + return err +} + +// DeleteIPAddressFromAddressMap removes an IP address from a particular address map. +// +// API reference: https://developers.cloudflare.com/api/operations/ip-address-management-address-maps-remove-an-ip-from-an-address-map +func (api *API) DeleteIPAddressFromAddressMap(ctx context.Context, rc *ResourceContainer, params DeleteIPAddressFromAddressMapParams) error { + if rc.Level != AccountRouteLevel { + return ErrRequiredAccountLevelResourceContainer + } + + uri := fmt.Sprintf("/%s/addressing/address_maps/%s/ips/%s", rc.URLFragment(), params.ID, params.IP) + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + return err +} + +// CreateMembershipToAddressMap adds a zone/account as a member of a particular address map. +// +// API reference: +// - account: https://developers.cloudflare.com/api/operations/ip-address-management-address-maps-add-an-account-membership-to-an-address-map +// - zone: https://developers.cloudflare.com/api/operations/ip-address-management-address-maps-add-a-zone-membership-to-an-address-map +func (api *API) CreateMembershipToAddressMap(ctx context.Context, rc *ResourceContainer, params CreateMembershipToAddressMapParams) error { + if rc.Level != AccountRouteLevel { + return ErrRequiredAccountLevelResourceContainer + } + + if params.Membership.Kind != AddressMapMembershipZone && params.Membership.Kind != AddressMapMembershipAccount { + return fmt.Errorf("requested membershp kind (%q) is not supported", params.Membership.Kind) + } + + uri := fmt.Sprintf("/%s/addressing/address_maps/%s/%s", rc.URLFragment(), params.ID, params.Membership.URLFragment()) + _, err := api.makeRequestContext(ctx, http.MethodPut, uri, nil) + return err +} + +// DeleteMembershipFromAddressMap removes a zone/account as a member of a particular address map. +// +// API reference: +// - account: https://developers.cloudflare.com/api/operations/ip-address-management-address-maps-remove-an-account-membership-from-an-address-map +// - zone: https://developers.cloudflare.com/api/operations/ip-address-management-address-maps-remove-a-zone-membership-from-an-address-map +func (api *API) DeleteMembershipFromAddressMap(ctx context.Context, rc *ResourceContainer, params DeleteMembershipFromAddressMapParams) error { + if rc.Level != AccountRouteLevel { + return ErrRequiredAccountLevelResourceContainer + } + + if params.Membership.Kind != AddressMapMembershipZone && params.Membership.Kind != AddressMapMembershipAccount { + return fmt.Errorf("requested membershp kind (%q) is not supported", params.Membership.Kind) + } + + uri := fmt.Sprintf("/%s/addressing/address_maps/%s/%s", rc.URLFragment(), params.ID, params.Membership.URLFragment()) + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + return err +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/addressing_ip_prefix.go b/vendor/github.com/cloudflare/cloudflare-go/addressing_ip_prefix.go new file mode 100644 index 0000000..a2e686c --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/addressing_ip_prefix.go @@ -0,0 +1,149 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// IPPrefix contains information about an IP prefix. +type IPPrefix struct { + ID string `json:"id"` + CreatedAt *time.Time `json:"created_at"` + ModifiedAt *time.Time `json:"modified_at"` + CIDR string `json:"cidr"` + AccountID string `json:"account_id"` + Description string `json:"description"` + Approved string `json:"approved"` + OnDemandEnabled bool `json:"on_demand_enabled"` + OnDemandLocked bool `json:"on_demand_locked"` + Advertised bool `json:"advertised"` + AdvertisedModifiedAt *time.Time `json:"advertised_modified_at"` +} + +// AdvertisementStatus contains information about the BGP status of an IP prefix. +type AdvertisementStatus struct { + Advertised bool `json:"advertised"` + AdvertisedModifiedAt *time.Time `json:"advertised_modified_at"` +} + +// ListIPPrefixResponse contains a slice of IP prefixes. +type ListIPPrefixResponse struct { + Response + Result []IPPrefix `json:"result"` +} + +// GetIPPrefixResponse contains a specific IP prefix's API Response. +type GetIPPrefixResponse struct { + Response + Result IPPrefix `json:"result"` +} + +// GetAdvertisementStatusResponse contains an API Response for the BGP status of the IP Prefix. +type GetAdvertisementStatusResponse struct { + Response + Result AdvertisementStatus `json:"result"` +} + +// IPPrefixUpdateRequest contains information about prefix updates. +type IPPrefixUpdateRequest struct { + Description string `json:"description"` +} + +// AdvertisementStatusUpdateRequest contains information about bgp status updates. +type AdvertisementStatusUpdateRequest struct { + Advertised bool `json:"advertised"` +} + +// ListPrefixes lists all IP prefixes for a given account +// +// API reference: https://api.cloudflare.com/#ip-address-management-prefixes-list-prefixes +func (api *API) ListPrefixes(ctx context.Context, accountID string) ([]IPPrefix, error) { + uri := fmt.Sprintf("/accounts/%s/addressing/prefixes", accountID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []IPPrefix{}, err + } + + result := ListIPPrefixResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return []IPPrefix{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// GetPrefix returns a specific IP prefix +// +// API reference: https://api.cloudflare.com/#ip-address-management-prefixes-prefix-details +func (api *API) GetPrefix(ctx context.Context, accountID, ID string) (IPPrefix, error) { + uri := fmt.Sprintf("/accounts/%s/addressing/prefixes/%s", accountID, ID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return IPPrefix{}, err + } + + result := GetIPPrefixResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return IPPrefix{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// UpdatePrefixDescription edits the description of the IP prefix +// +// API reference: https://api.cloudflare.com/#ip-address-management-prefixes-update-prefix-description +func (api *API) UpdatePrefixDescription(ctx context.Context, accountID, ID string, description string) (IPPrefix, error) { + uri := fmt.Sprintf("/accounts/%s/addressing/prefixes/%s", accountID, ID) + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, IPPrefixUpdateRequest{Description: description}) + if err != nil { + return IPPrefix{}, err + } + + result := GetIPPrefixResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return IPPrefix{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// GetAdvertisementStatus returns the BGP status of the IP prefix +// +// API reference: https://api.cloudflare.com/#ip-address-management-prefixes-update-prefix-description +func (api *API) GetAdvertisementStatus(ctx context.Context, accountID, ID string) (AdvertisementStatus, error) { + uri := fmt.Sprintf("/accounts/%s/addressing/prefixes/%s/bgp/status", accountID, ID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return AdvertisementStatus{}, err + } + + result := GetAdvertisementStatusResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return AdvertisementStatus{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// UpdateAdvertisementStatus changes the BGP status of an IP prefix +// +// API reference: https://api.cloudflare.com/#ip-address-management-prefixes-update-prefix-description +func (api *API) UpdateAdvertisementStatus(ctx context.Context, accountID, ID string, advertised bool) (AdvertisementStatus, error) { + uri := fmt.Sprintf("/accounts/%s/addressing/prefixes/%s/bgp/status", accountID, ID) + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, AdvertisementStatusUpdateRequest{Advertised: advertised}) + if err != nil { + return AdvertisementStatus{}, err + } + + result := GetAdvertisementStatusResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return AdvertisementStatus{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/api_shield.go b/vendor/github.com/cloudflare/cloudflare-go/api_shield.go new file mode 100644 index 0000000..622631c --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/api_shield.go @@ -0,0 +1,71 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +// AuthIdCharacteristics a single option from +// configuration?properties=auth_id_characteristics. +type AuthIdCharacteristics struct { + Type string `json:"type"` + Name string `json:"name"` +} + +// APIShield is all the available options under +// configuration?properties=auth_id_characteristics. +type APIShield struct { + AuthIdCharacteristics []AuthIdCharacteristics `json:"auth_id_characteristics"` +} + +// APIShieldResponse represents the response from the api_gateway/configuration endpoint. +type APIShieldResponse struct { + Result APIShield `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +type UpdateAPIShieldParams struct { + AuthIdCharacteristics []AuthIdCharacteristics `json:"auth_id_characteristics"` +} + +// GetAPIShieldConfiguration gets a zone API shield configuration. +// +// API documentation: https://api.cloudflare.com/#api-shield-settings-retrieve-information-about-specific-configuration-properties +func (api *API) GetAPIShieldConfiguration(ctx context.Context, rc *ResourceContainer) (APIShield, ResultInfo, error) { + uri := fmt.Sprintf("/zones/%s/api_gateway/configuration?properties=auth_id_characteristics", rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return APIShield{}, ResultInfo{}, err + } + var asResponse APIShieldResponse + err = json.Unmarshal(res, &asResponse) + if err != nil { + return APIShield{}, ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return asResponse.Result, asResponse.ResultInfo, nil +} + +// UpdateAPIShieldConfiguration sets a zone API shield configuration. +// +// API documentation: https://api.cloudflare.com/#api-shield-settings-set-configuration-properties +func (api *API) UpdateAPIShieldConfiguration(ctx context.Context, rc *ResourceContainer, params UpdateAPIShieldParams) (Response, error) { + uri := fmt.Sprintf("/zones/%s/api_gateway/configuration", rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return Response{}, err + } + var asResponse Response + err = json.Unmarshal(res, &asResponse) + if err != nil { + return Response{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return asResponse, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/api_shield_api_discovery.go b/vendor/github.com/cloudflare/cloudflare-go/api_shield_api_discovery.go new file mode 100644 index 0000000..69b3ad2 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/api_shield_api_discovery.go @@ -0,0 +1,190 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// APIShieldDiscoveryOrigin is an enumeration on what discovery engine an operation was discovered by. +type APIShieldDiscoveryOrigin string + +const ( + + // APIShieldDiscoveryOriginML discovered operations that were sourced using ML API Discovery. + APIShieldDiscoveryOriginML APIShieldDiscoveryOrigin = "ML" + // APIShieldDiscoveryOriginSessionIdentifier discovered operations that were sourced using Session Identifier + // API Discovery. + APIShieldDiscoveryOriginSessionIdentifier APIShieldDiscoveryOrigin = "SessionIdentifier" +) + +// APIShieldDiscoveryState is an enumeration on states a discovery operation can be in. +type APIShieldDiscoveryState string + +const ( + // APIShieldDiscoveryStateReview discovered operations that are not saved into API Shield Endpoint Management. + APIShieldDiscoveryStateReview APIShieldDiscoveryState = "review" + // APIShieldDiscoveryStateSaved discovered operations that are already saved into API Shield Endpoint Management. + APIShieldDiscoveryStateSaved APIShieldDiscoveryState = "saved" + // APIShieldDiscoveryStateIgnored discovered operations that have been marked as ignored. + APIShieldDiscoveryStateIgnored APIShieldDiscoveryState = "ignored" +) + +// APIShieldDiscoveryOperation is an operation that was discovered by API Discovery. +type APIShieldDiscoveryOperation struct { + // ID represents the ID of the operation, formatted as UUID + ID string `json:"id"` + // Origin represents the API discovery engine(s) that discovered this operation + Origin []APIShieldDiscoveryOrigin `json:"origin"` + // State represents the state of operation in API Discovery + State APIShieldDiscoveryState `json:"state"` + // LastUpdated timestamp of when this operation was last updated + LastUpdated *time.Time `json:"last_updated"` + // Features are additional data about the operation + Features map[string]any `json:"features,omitempty"` + + Method string `json:"method"` + Host string `json:"host"` + Endpoint string `json:"endpoint"` +} + +// ListAPIShieldDiscoveryOperationsParams represents the parameters to pass when retrieving discovered operations. +// +// API documentation: https://developers.cloudflare.com/api/operations/api-shield-api-discovery-retrieve-discovered-operations-on-a-zone +type ListAPIShieldDiscoveryOperationsParams struct { + // Direction to order results. + Direction string `url:"direction,omitempty"` + // OrderBy when requesting a feature, the feature keys are available for ordering as well, e.g., thresholds.suggested_threshold. + OrderBy string `url:"order,omitempty"` + // Hosts filters results to only include the specified hosts. + Hosts []string `url:"host,omitempty"` + // Methods filters results to only include the specified methods. + Methods []string `url:"method,omitempty"` + // Endpoint filters results to only include endpoints containing this pattern. + Endpoint string `url:"endpoint,omitempty"` + // Diff when true, only return API Discovery results that are not saved into API Shield Endpoint Management + Diff bool `url:"diff,omitempty"` + // Origin filters results to only include discovery results sourced from a particular discovery engine + // See APIShieldDiscoveryOrigin for valid values. + Origin APIShieldDiscoveryOrigin `url:"origin,omitempty"` + // State filters results to only include discovery results in a particular state + // See APIShieldDiscoveryState for valid values. + State APIShieldDiscoveryState `url:"state,omitempty"` + + // Pagination options to apply to the request. + PaginationOptions +} + +// UpdateAPIShieldDiscoveryOperationParams represents the parameters to pass to patch a discovery operation +// +// API documentation: https://developers.cloudflare.com/api/operations/api-shield-api-patch-discovered-operation +type UpdateAPIShieldDiscoveryOperationParams struct { + // OperationID is the ID, formatted as UUID, of the operation to be updated + OperationID string `json:"-" url:"-"` + State APIShieldDiscoveryState `json:"state" url:"-"` +} + +// UpdateAPIShieldDiscoveryOperationsParams maps discovery operation IDs to PatchAPIShieldDiscoveryOperation structs +// +// Example: +// +// UpdateAPIShieldDiscoveryOperations{ +// "99522293-a505-45e5-bbad-bbc339f5dc40": PatchAPIShieldDiscoveryOperation{ State: "review" }, +// } +// +// API documentation: https://developers.cloudflare.com/api/operations/api-shield-api-patch-discovered-operations +type UpdateAPIShieldDiscoveryOperationsParams map[string]UpdateAPIShieldDiscoveryOperation + +// UpdateAPIShieldDiscoveryOperation represents the state to set on a discovery operation. +type UpdateAPIShieldDiscoveryOperation struct { + // State is the state to set on the operation + State APIShieldDiscoveryState `json:"state" url:"-"` +} + +// APIShieldListDiscoveryOperationsResponse represents the response from the api_gateway/discovery/operations endpoint. +type APIShieldListDiscoveryOperationsResponse struct { + Result []APIShieldDiscoveryOperation `json:"result"` + ResultInfo `json:"result_info"` + Response +} + +// APIShieldPatchDiscoveryOperationResponse represents the response from the PATCH api_gateway/discovery/operations/{id} endpoint. +type APIShieldPatchDiscoveryOperationResponse struct { + Result UpdateAPIShieldDiscoveryOperation `json:"result"` + Response +} + +// APIShieldPatchDiscoveryOperationsResponse represents the response from the PATCH api_gateway/discovery/operations endpoint. +type APIShieldPatchDiscoveryOperationsResponse struct { + Result UpdateAPIShieldDiscoveryOperationsParams `json:"result"` + Response +} + +// ListAPIShieldDiscoveryOperations retrieve the most up to date view of discovered operations. +// +// API documentation: https://developers.cloudflare.com/api/operations/api-shield-api-discovery-retrieve-discovered-operations-on-a-zone +func (api *API) ListAPIShieldDiscoveryOperations(ctx context.Context, rc *ResourceContainer, params ListAPIShieldDiscoveryOperationsParams) ([]APIShieldDiscoveryOperation, ResultInfo, error) { + uri := buildURI(fmt.Sprintf("/zones/%s/api_gateway/discovery/operations", rc.Identifier), params) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, ResultInfo{}, err + } + + var asResponse APIShieldListDiscoveryOperationsResponse + err = json.Unmarshal(res, &asResponse) + if err != nil { + return nil, ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return asResponse.Result, asResponse.ResultInfo, nil +} + +// UpdateAPIShieldDiscoveryOperation updates certain fields on a discovered operation. +// +// API Documentation: https://developers.cloudflare.com/api/operations/api-shield-api-patch-discovered-operation +func (api *API) UpdateAPIShieldDiscoveryOperation(ctx context.Context, rc *ResourceContainer, params UpdateAPIShieldDiscoveryOperationParams) (*UpdateAPIShieldDiscoveryOperation, error) { + if params.OperationID == "" { + return nil, fmt.Errorf("operation ID must be provided") + } + + uri := fmt.Sprintf("/zones/%s/api_gateway/discovery/operations/%s", rc.Identifier, params.OperationID) + + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, params) + if err != nil { + return nil, err + } + + // Result should be the updated schema that was patched + var asResponse APIShieldPatchDiscoveryOperationResponse + err = json.Unmarshal(res, &asResponse) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return &asResponse.Result, nil +} + +// UpdateAPIShieldDiscoveryOperations bulk updates certain fields on multiple discovered operations +// +// API documentation: https://developers.cloudflare.com/api/operations/api-shield-api-patch-discovered-operations +func (api *API) UpdateAPIShieldDiscoveryOperations(ctx context.Context, rc *ResourceContainer, params UpdateAPIShieldDiscoveryOperationsParams) (*UpdateAPIShieldDiscoveryOperationsParams, error) { + uri := fmt.Sprintf("/zones/%s/api_gateway/discovery/operations", rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, params) + if err != nil { + return nil, err + } + + // Result should be the updated schema that was patched + var asResponse APIShieldPatchDiscoveryOperationsResponse + err = json.Unmarshal(res, &asResponse) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return &asResponse.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/api_shield_operations.go b/vendor/github.com/cloudflare/cloudflare-go/api_shield_operations.go new file mode 100644 index 0000000..1d89a17 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/api_shield_operations.go @@ -0,0 +1,185 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// APIShieldOperation represents an operation stored in API Shield Endpoint Management. +type APIShieldOperation struct { + APIShieldBasicOperation + ID string `json:"operation_id"` + LastUpdated *time.Time `json:"last_updated"` + Features map[string]any `json:"features,omitempty"` +} + +// GetAPIShieldOperationParams represents the parameters to pass when retrieving an operation. +// +// API documentation: https://developers.cloudflare.com/api/operations/api-shield-endpoint-management-retrieve-information-about-an-operation +type GetAPIShieldOperationParams struct { + // The Operation ID to retrieve + OperationID string `url:"-"` + // Features represents a set of features to return in `features` object when + // performing making read requests against an Operation or listing operations. + Features []string `url:"feature,omitempty"` +} + +// CreateAPIShieldOperationsParams represents the parameters to pass when adding one or more operations. +// +// API documentation: https://developers.cloudflare.com/api/operations/api-shield-endpoint-management-add-operations-to-a-zone +type CreateAPIShieldOperationsParams struct { + // Operations are a slice of operations to be created in API Shield Endpoint Management + Operations []APIShieldBasicOperation `url:"-"` +} + +// APIShieldBasicOperation should be used when creating an operation in API Shield Endpoint Management. +type APIShieldBasicOperation struct { + Method string `json:"method"` + Host string `json:"host"` + Endpoint string `json:"endpoint"` +} + +// DeleteAPIShieldOperationParams represents the parameters to pass to delete an operation. +// +// API documentation: https://developers.cloudflare.com/api/operations/api-shield-endpoint-management-delete-an-operation +type DeleteAPIShieldOperationParams struct { + // OperationID is the operation to be deleted + OperationID string `url:"-"` +} + +// ListAPIShieldOperationsParams represents the parameters to pass when retrieving operations +// +// API documentation: https://developers.cloudflare.com/api/operations/api-shield-endpoint-management-retrieve-information-about-all-operations-on-a-zone +type ListAPIShieldOperationsParams struct { + // Features represents a set of features to return in `features` object when + // performing making read requests against an Operation or listing operations. + Features []string `url:"feature,omitempty"` + // Direction to order results. + Direction string `url:"direction,omitempty"` + // OrderBy when requesting a feature, the feature keys are available for ordering as well, e.g., thresholds.suggested_threshold. + OrderBy string `url:"order,omitempty"` + // Filters to only return operations that match filtering criteria, see APIShieldGetOperationsFilters + APIShieldListOperationsFilters + // Pagination options to apply to the request. + PaginationOptions +} + +// APIShieldListOperationsFilters represents the filtering query parameters to set when retrieving operations +// +// API documentation: https://developers.cloudflare.com/api/operations/api-shield-endpoint-management-retrieve-information-about-all-operations-on-a-zone +type APIShieldListOperationsFilters struct { + // Hosts filters results to only include the specified hosts. + Hosts []string `url:"host,omitempty"` + // Methods filters results to only include the specified methods. + Methods []string `url:"method,omitempty"` + // Endpoint filter results to only include endpoints containing this pattern. + Endpoint string `url:"endpoint,omitempty"` +} + +// APIShieldGetOperationResponse represents the response from the api_gateway/operations/{id} endpoint. +type APIShieldGetOperationResponse struct { + Result APIShieldOperation `json:"result"` + Response +} + +// APIShieldGetOperationsResponse represents the response from the api_gateway/operations endpoint. +type APIShieldGetOperationsResponse struct { + Result []APIShieldOperation `json:"result"` + ResultInfo `json:"result_info"` + Response +} + +// APIShieldDeleteOperationResponse represents the response from the api_gateway/operations/{id} endpoint (DELETE). +type APIShieldDeleteOperationResponse struct { + Result interface{} `json:"result"` + Response +} + +// GetAPIShieldOperation returns information about an operation +// +// API documentation https://developers.cloudflare.com/api/operations/api-shield-endpoint-management-retrieve-information-about-an-operation +func (api *API) GetAPIShieldOperation(ctx context.Context, rc *ResourceContainer, params GetAPIShieldOperationParams) (*APIShieldOperation, error) { + path := fmt.Sprintf("/zones/%s/api_gateway/operations/%s", rc.Identifier, params.OperationID) + + uri := buildURI(path, params) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + + var asResponse APIShieldGetOperationResponse + err = json.Unmarshal(res, &asResponse) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return &asResponse.Result, nil +} + +// ListAPIShieldOperations retrieve information about all operations on a zone +// +// API documentation https://developers.cloudflare.com/api/operations/api-shield-endpoint-management-retrieve-information-about-all-operations-on-a-zone +func (api *API) ListAPIShieldOperations(ctx context.Context, rc *ResourceContainer, params ListAPIShieldOperationsParams) ([]APIShieldOperation, ResultInfo, error) { + path := fmt.Sprintf("/zones/%s/api_gateway/operations", rc.Identifier) + + uri := buildURI(path, params) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, ResultInfo{}, err + } + + var asResponse APIShieldGetOperationsResponse + err = json.Unmarshal(res, &asResponse) + if err != nil { + return nil, ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return asResponse.Result, asResponse.ResultInfo, nil +} + +// CreateAPIShieldOperations add one or more operations to a zone. +// +// API documentation https://developers.cloudflare.com/api/operations/api-shield-endpoint-management-add-operations-to-a-zone +func (api *API) CreateAPIShieldOperations(ctx context.Context, rc *ResourceContainer, params CreateAPIShieldOperationsParams) ([]APIShieldOperation, error) { + uri := fmt.Sprintf("/zones/%s/api_gateway/operations", rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params.Operations) + if err != nil { + return nil, err + } + + // Result should be all the operations added to the zone, similar to doing GetAPIShieldOperations + var asResponse APIShieldGetOperationsResponse + err = json.Unmarshal(res, &asResponse) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return asResponse.Result, nil +} + +// DeleteAPIShieldOperation deletes a single operation +// +// API documentation https://developers.cloudflare.com/api/operations/api-shield-endpoint-management-delete-an-operation +func (api *API) DeleteAPIShieldOperation(ctx context.Context, rc *ResourceContainer, params DeleteAPIShieldOperationParams) error { + uri := fmt.Sprintf("/zones/%s/api_gateway/operations/%s", rc.Identifier, params.OperationID) + + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + var asResponse APIShieldDeleteOperationResponse + err = json.Unmarshal(res, &asResponse) + if err != nil { + return fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/api_shield_schemas.go b/vendor/github.com/cloudflare/cloudflare-go/api_shield_schemas.go new file mode 100644 index 0000000..804181d --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/api_shield_schemas.go @@ -0,0 +1,505 @@ +package cloudflare + +import ( + "bytes" + "context" + "fmt" + "io" + "mime/multipart" + "net/http" + "strconv" + "time" + + "github.com/goccy/go-json" +) + +// APIShieldSchema represents a schema stored in API Shield Schema Validation 2.0. +type APIShieldSchema struct { + // ID represents the ID of the schema + ID string `json:"schema_id"` + // Name represents the name of the schema + Name string `json:"name"` + // Kind of the schema + Kind string `json:"kind"` + // Source is the contents of the schema + Source string `json:"source,omitempty"` + // CreatedAt is the time the schema was created + CreatedAt *time.Time `json:"created_at,omitempty"` + // ValidationEnabled controls if schema is used for validation + ValidationEnabled bool `json:"validation_enabled,omitempty"` +} + +// CreateAPIShieldSchemaParams represents the parameters to pass when creating a schema in Schema Validation 2.0. +// +// API documentation: https://developers.cloudflare.com/api/operations/api-shield-schema-validation-post-schema +type CreateAPIShieldSchemaParams struct { + // Source is a io.Reader containing the contents of the schema + Source io.Reader + // Name represents the name of the schema. + Name string + // Kind of the schema. This is always set to openapi_v3. + Kind string + // ValidationEnabled controls if schema is used for validation + ValidationEnabled *bool +} + +// GetAPIShieldSchemaParams represents the parameters to pass when retrieving a schema with a given schema ID. +// +// API documentation: https://developers.cloudflare.com/api/operations/api-shield-schema-validation-retrieve-information-about-specific-schema +type GetAPIShieldSchemaParams struct { + // SchemaID is the ID of the schema to retrieve + SchemaID string `url:"-"` + + // OmitSource specifies whether the contents of the schema should be returned in the "Source" field. + OmitSource *bool `url:"omit_source,omitempty"` +} + +// ListAPIShieldSchemasParams represents the parameters to pass when retrieving all schemas. +// +// API documentation: https://developers.cloudflare.com/api/operations/api-shield-schema-validation-retrieve-information-about-all-schemas +type ListAPIShieldSchemasParams struct { + // OmitSource specifies whether the contents of the schema should be returned in the "Source" field. + OmitSource *bool `url:"omit_source,omitempty"` + + // ValidationEnabled specifies whether to return only schemas that have validation enabled. + ValidationEnabled *bool `url:"validation_enabled,omitempty"` + + // PaginationOptions to apply to the request. + PaginationOptions +} + +// DeleteAPIShieldSchemaParams represents the parameters to pass to delete a schema. +// +// API documentation: https://developers.cloudflare.com/api/operations/api-shield-schema-delete-a-schema +type DeleteAPIShieldSchemaParams struct { + // SchemaID is the schema to be deleted + SchemaID string `url:"-"` +} + +// UpdateAPIShieldSchemaParams represents the parameters to pass to patch certain fields on an existing schema +// +// API documentation: https://developers.cloudflare.com/api/operations/api-shield-schema-validation-enable-validation-for-a-schema +type UpdateAPIShieldSchemaParams struct { + // SchemaID is the schema to be patched + SchemaID string `json:"-" url:"-"` + + // ValidationEnabled controls if schema is used for validation + ValidationEnabled *bool `json:"validation_enabled" url:"-"` +} + +// APIShieldGetSchemaResponse represents the response from the GET api_gateway/user_schemas/{id} endpoint. +type APIShieldGetSchemaResponse struct { + Result APIShieldSchema `json:"result"` + Response +} + +// APIShieldListSchemasResponse represents the response from the GET api_gateway/user_schemas endpoint. +type APIShieldListSchemasResponse struct { + Result []APIShieldSchema `json:"result"` + ResultInfo `json:"result_info"` + Response +} + +// APIShieldCreateSchemaResponse represents the response from the POST api_gateway/user_schemas endpoint. +type APIShieldCreateSchemaResponse struct { + Result APIShieldCreateSchemaResult `json:"result"` + Response +} + +// APIShieldDeleteSchemaResponse represents the response from the DELETE api_gateway/user_schemas/{id} endpoint. +type APIShieldDeleteSchemaResponse struct { + Result interface{} `json:"result"` + Response +} + +// APIShieldPatchSchemaResponse represents the response from the PATCH api_gateway/user_schemas/{id} endpoint. +type APIShieldPatchSchemaResponse struct { + Result APIShieldSchema `json:"result"` + Response +} + +// APIShieldCreateSchemaResult represents the successful result of creating a schema in Schema Validation 2.0. +type APIShieldCreateSchemaResult struct { + // APIShieldSchema is the schema that was created + Schema APIShieldSchema `json:"schema"` + // APIShieldCreateSchemaEvents are non-critical event logs that occurred during processing. + Events APIShieldCreateSchemaEvents `json:"upload_details"` +} + +// APIShieldCreateSchemaEvents are event logs that occurred during processing. +// +// The logs are separated into levels of severity. +type APIShieldCreateSchemaEvents struct { + Critical *APIShieldCreateSchemaEventWithLocation `json:"critical,omitempty"` + Errors []APIShieldCreateSchemaEventWithLocations `json:"errors,omitempty"` + Warnings []APIShieldCreateSchemaEventWithLocations `json:"warnings,omitempty"` +} + +// APIShieldCreateSchemaEvent is an event log that occurred during processing. +type APIShieldCreateSchemaEvent struct { + // Code identifies the event that occurred + Code uint `json:"code"` + // Message describes the event that occurred + Message string `json:"message"` +} + +// APIShieldCreateSchemaEventWithLocation is an event log that occurred during processing, with the location +// in the schema where the event occurred. +type APIShieldCreateSchemaEventWithLocation struct { + APIShieldCreateSchemaEvent + + // Location is where the event occurred + // See https://goessner.net/articles/JsonPath/ for JSONPath specification. + Location string `json:"location,omitempty"` +} + +// APIShieldCreateSchemaEventWithLocations is an event log that occurred during processing, with the location(s) +// in the schema where the event occurred. +type APIShieldCreateSchemaEventWithLocations struct { + APIShieldCreateSchemaEvent + + // Locations lists JSONPath locations where the event occurred + // See https://goessner.net/articles/JsonPath/ for JSONPath specification + Locations []string `json:"locations"` +} + +func (cse APIShieldCreateSchemaEventWithLocations) String() string { + var s string + s += cse.Message + + if len(cse.Locations) == 0 || (len(cse.Locations) == 1 && cse.Locations[0] == "") { + // append nothing + } else if len(cse.Locations) == 1 { + s += fmt.Sprintf(" (%s)", cse.Locations[0]) + } else { + s += " (multiple locations)" + } + + return s +} + +// GetAPIShieldSchema retrieves information about a specific schema on a zone +// +// API documentation: https://developers.cloudflare.com/api/operations/api-shield-schema-validation-retrieve-information-about-specific-schema +func (api *API) GetAPIShieldSchema(ctx context.Context, rc *ResourceContainer, params GetAPIShieldSchemaParams) (*APIShieldSchema, error) { + if params.SchemaID == "" { + return nil, fmt.Errorf("schema ID must be provided") + } + + path := fmt.Sprintf("/zones/%s/api_gateway/user_schemas/%s", rc.Identifier, params.SchemaID) + + uri := buildURI(path, params) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + + var asResponse APIShieldGetSchemaResponse + err = json.Unmarshal(res, &asResponse) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return &asResponse.Result, nil +} + +// ListAPIShieldSchemas retrieves all schemas for a zone +// +// API documentation: https://developers.cloudflare.com/api/operations/api-shield-schema-validation-retrieve-information-about-all-schemas +func (api *API) ListAPIShieldSchemas(ctx context.Context, rc *ResourceContainer, params ListAPIShieldSchemasParams) ([]APIShieldSchema, ResultInfo, error) { + path := fmt.Sprintf("/zones/%s/api_gateway/user_schemas", rc.Identifier) + + uri := buildURI(path, params) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, ResultInfo{}, err + } + + var asResponse APIShieldListSchemasResponse + err = json.Unmarshal(res, &asResponse) + if err != nil { + return nil, ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return asResponse.Result, asResponse.ResultInfo, nil +} + +// CreateAPIShieldSchema uploads a schema to a zone +// +// API documentation: https://developers.cloudflare.com/api/operations/api-shield-schema-validation-post-schema +func (api *API) CreateAPIShieldSchema(ctx context.Context, rc *ResourceContainer, params CreateAPIShieldSchemaParams) (*APIShieldCreateSchemaResult, error) { + uri := fmt.Sprintf("/zones/%s/api_gateway/user_schemas", rc.Identifier) + + if params.Name == "" { + return nil, fmt.Errorf("name must not be empty") + } + + if params.Source == nil { + return nil, fmt.Errorf("source must not be nil") + } + + // Prepare the form to be submitted + var b bytes.Buffer + w := multipart.NewWriter(&b) + // write fields + if err := w.WriteField("name", params.Name); err != nil { + return nil, fmt.Errorf("error during multi-part form construction: %w", err) + } + if err := w.WriteField("kind", params.Kind); err != nil { + return nil, fmt.Errorf("error during multi-part form construction: %w", err) + } + + if params.ValidationEnabled != nil { + if err := w.WriteField("validation_enabled", strconv.FormatBool(*params.ValidationEnabled)); err != nil { + return nil, fmt.Errorf("error during multi-part form construction: %w", err) + } + } + + // write schema contents + part, err := w.CreateFormFile("file", params.Name) + if err != nil { + return nil, fmt.Errorf("error during multi-part form construction: %w", err) + } + if _, err := io.Copy(part, params.Source); err != nil { + return nil, fmt.Errorf("error during multi-part form construction: %w", err) + } + if err := w.Close(); err != nil { + return nil, fmt.Errorf("error during multi-part form construction: %w", err) + } + + res, err := api.makeRequestContextWithHeaders(ctx, http.MethodPost, uri, &b, http.Header{ + "Content-Type": []string{w.FormDataContentType()}, + }) + if err != nil { + return nil, err + } + + var asResponse APIShieldCreateSchemaResponse + err = json.Unmarshal(res, &asResponse) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return &asResponse.Result, nil +} + +// DeleteAPIShieldSchema deletes a single schema +// +// API documentation: https://developers.cloudflare.com/api/operations/api-shield-schema-delete-a-schema +func (api *API) DeleteAPIShieldSchema(ctx context.Context, rc *ResourceContainer, params DeleteAPIShieldSchemaParams) error { + if params.SchemaID == "" { + return fmt.Errorf("schema ID must be provided") + } + + uri := fmt.Sprintf("/zones/%s/api_gateway/user_schemas/%s", rc.Identifier, params.SchemaID) + + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + var asResponse APIShieldDeleteSchemaResponse + err = json.Unmarshal(res, &asResponse) + if err != nil { + return fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return nil +} + +// UpdateAPIShieldSchema updates certain fields on an existing schema. +// +// API documentation: https://developers.cloudflare.com/api/operations/api-shield-schema-validation-enable-validation-for-a-schema +func (api *API) UpdateAPIShieldSchema(ctx context.Context, rc *ResourceContainer, params UpdateAPIShieldSchemaParams) (*APIShieldSchema, error) { + if params.SchemaID == "" { + return nil, fmt.Errorf("schema ID must be provided") + } + + uri := fmt.Sprintf("/zones/%s/api_gateway/user_schemas/%s", rc.Identifier, params.SchemaID) + + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, params) + if err != nil { + return nil, err + } + + // Result should be the updated schema that was patched + var asResponse APIShieldPatchSchemaResponse + err = json.Unmarshal(res, &asResponse) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return &asResponse.Result, nil +} + +// Schema Validation Settings + +// APIShieldSchemaValidationSettings represents zone level schema validation settings for +// API Shield Schema Validation 2.0. +type APIShieldSchemaValidationSettings struct { + // DefaultMitigationAction is the mitigation to apply when there is no operation-level + // mitigation action defined + DefaultMitigationAction string `json:"validation_default_mitigation_action" url:"-"` + // OverrideMitigationAction when set, will apply to all requests regardless of + // zone-level/operation-level setting + OverrideMitigationAction *string `json:"validation_override_mitigation_action" url:"-"` +} + +// UpdateAPIShieldSchemaValidationSettingsParams represents the parameters to pass to update certain fields +// on schema validation settings on the zone +// +// API documentation: https://developers.cloudflare.com/api/operations/api-shield-schema-validation-patch-zone-level-settings +type UpdateAPIShieldSchemaValidationSettingsParams struct { + // DefaultMitigationAction is the mitigation to apply when there is no operation-level + // mitigation action defined + // + // passing a `nil` value will have no effect on this setting + DefaultMitigationAction *string `json:"validation_default_mitigation_action" url:"-"` + + // OverrideMitigationAction when set, will apply to all requests regardless of + // zone-level/operation-level setting + // + // passing a `nil` value will have no effect on this setting + OverrideMitigationAction *string `json:"validation_override_mitigation_action" url:"-"` +} + +// APIShieldSchemaValidationSettingsResponse represents the response from the GET api_gateway/settings/schema_validation endpoint. +type APIShieldSchemaValidationSettingsResponse struct { + Result APIShieldSchemaValidationSettings `json:"result"` + Response +} + +// GetAPIShieldSchemaValidationSettings retrieves zone level schema validation settings +// +// API documentation: https://developers.cloudflare.com/api/operations/api-shield-schema-validation-retrieve-zone-level-settings +func (api *API) GetAPIShieldSchemaValidationSettings(ctx context.Context, rc *ResourceContainer) (*APIShieldSchemaValidationSettings, error) { + path := fmt.Sprintf("/zones/%s/api_gateway/settings/schema_validation", rc.Identifier) + + uri := buildURI(path, nil) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + + var asResponse APIShieldSchemaValidationSettingsResponse + err = json.Unmarshal(res, &asResponse) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return &asResponse.Result, nil +} + +// UpdateAPIShieldSchemaValidationSettings updates certain fields on zone level schema validation settings +// +// API documentation: https://developers.cloudflare.com/api/operations/api-shield-schema-validation-patch-zone-level-settings +func (api *API) UpdateAPIShieldSchemaValidationSettings(ctx context.Context, rc *ResourceContainer, params UpdateAPIShieldSchemaValidationSettingsParams) (*APIShieldSchemaValidationSettings, error) { + path := fmt.Sprintf("/zones/%s/api_gateway/settings/schema_validation", rc.Identifier) + + uri := buildURI(path, params) + + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, params) + if err != nil { + return nil, err + } + + var asResponse APIShieldSchemaValidationSettingsResponse + err = json.Unmarshal(res, &asResponse) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return &asResponse.Result, nil +} + +// APIShieldOperationSchemaValidationSettings represents operation level schema validation settings for +// API Shield Schema Validation 2.0. +type APIShieldOperationSchemaValidationSettings struct { + // MitigationAction is the mitigation to apply to the operation + MitigationAction *string `json:"mitigation_action" url:"-"` +} + +// GetAPIShieldOperationSchemaValidationSettingsParams represents the parameters to pass to retrieve +// the schema validation settings set on the operation. +// +// API documentation: https://developers.cloudflare.com/api/operations/api-shield-schema-validation-retrieve-operation-level-settings +type GetAPIShieldOperationSchemaValidationSettingsParams struct { + // The Operation ID to apply the mitigation action to + OperationID string `url:"-"` +} + +// UpdateAPIShieldOperationSchemaValidationSettings maps operation IDs to APIShieldOperationSchemaValidationSettings +// +// # This can be used to bulk update operations in one call +// +// Example: +// +// UpdateAPIShieldOperationSchemaValidationSettings{ +// "99522293-a505-45e5-bbad-bbc339f5dc40": APIShieldOperationSchemaValidationSettings{ MitigationAction: nil }, +// } +// +// API documentation: https://developers.cloudflare.com/api/operations/api-shield-schema-validation-update-multiple-operation-level-settings +type UpdateAPIShieldOperationSchemaValidationSettings map[string]APIShieldOperationSchemaValidationSettings + +// APIShieldOperationSchemaValidationSettingsResponse represents the response from the GET api_gateway/operation/{operationID}/schema_validation endpoint. +type APIShieldOperationSchemaValidationSettingsResponse struct { + Result APIShieldOperationSchemaValidationSettings `json:"result"` + Response +} + +// UpdateAPIShieldOperationSchemaValidationSettingsResponse represents the response from the PATCH api_gateway/operations/schema_validation endpoint. +type UpdateAPIShieldOperationSchemaValidationSettingsResponse struct { + Result UpdateAPIShieldOperationSchemaValidationSettings `json:"result"` + Response +} + +// GetAPIShieldOperationSchemaValidationSettings retrieves operation level schema validation settings +// +// API documentation: https://developers.cloudflare.com/api/operations/api-shield-schema-validation-retrieve-operation-level-settings +func (api *API) GetAPIShieldOperationSchemaValidationSettings(ctx context.Context, rc *ResourceContainer, params GetAPIShieldOperationSchemaValidationSettingsParams) (*APIShieldOperationSchemaValidationSettings, error) { + if params.OperationID == "" { + return nil, fmt.Errorf("operation ID must be provided") + } + + path := fmt.Sprintf("/zones/%s/api_gateway/operations/%s/schema_validation", rc.Identifier, params.OperationID) + + uri := buildURI(path, nil) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, params) + if err != nil { + return nil, err + } + + var asResponse APIShieldOperationSchemaValidationSettingsResponse + err = json.Unmarshal(res, &asResponse) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return &asResponse.Result, nil +} + +// UpdateAPIShieldOperationSchemaValidationSettings update multiple operation level schema validation settings +// +// API documentation: https://developers.cloudflare.com/api/operations/api-shield-schema-validation-update-multiple-operation-level-settings +func (api *API) UpdateAPIShieldOperationSchemaValidationSettings(ctx context.Context, rc *ResourceContainer, params UpdateAPIShieldOperationSchemaValidationSettings) (*UpdateAPIShieldOperationSchemaValidationSettings, error) { + path := fmt.Sprintf("/zones/%s/api_gateway/operations/schema_validation", rc.Identifier) + + uri := buildURI(path, nil) + + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, params) + if err != nil { + return nil, err + } + + var asResponse UpdateAPIShieldOperationSchemaValidationSettingsResponse + err = json.Unmarshal(res, &asResponse) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return &asResponse.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/api_token.go b/vendor/github.com/cloudflare/cloudflare-go/api_token.go new file mode 100644 index 0000000..9255477 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/api_token.go @@ -0,0 +1,238 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// APIToken is the full API token. +type APIToken struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Status string `json:"status,omitempty"` + IssuedOn *time.Time `json:"issued_on,omitempty"` + ModifiedOn *time.Time `json:"modified_on,omitempty"` + NotBefore *time.Time `json:"not_before,omitempty"` + ExpiresOn *time.Time `json:"expires_on,omitempty"` + Policies []APITokenPolicies `json:"policies,omitempty"` + Condition *APITokenCondition `json:"condition,omitempty"` + Value string `json:"value,omitempty"` +} + +// APITokenPermissionGroups is the permission groups associated with API tokens. +type APITokenPermissionGroups struct { + ID string `json:"id"` + Name string `json:"name,omitempty"` + Scopes []string `json:"scopes,omitempty"` +} + +// APITokenPolicies are policies attached to an API token. +type APITokenPolicies struct { + ID string `json:"id,omitempty"` + Effect string `json:"effect"` + Resources map[string]interface{} `json:"resources"` + PermissionGroups []APITokenPermissionGroups `json:"permission_groups"` +} + +// APITokenRequestIPCondition is the struct for adding an IP restriction to an +// API token. +type APITokenRequestIPCondition struct { + In []string `json:"in,omitempty"` + NotIn []string `json:"not_in,omitempty"` +} + +// APITokenCondition is the outer structure for request conditions (currently +// only IPs). +type APITokenCondition struct { + RequestIP *APITokenRequestIPCondition `json:"request.ip,omitempty"` +} + +// APITokenResponse is the API response for a single API token. +type APITokenResponse struct { + Response + Result APIToken `json:"result"` +} + +// APITokenListResponse is the API response for multiple API tokens. +type APITokenListResponse struct { + Response + Result []APIToken `json:"result"` +} + +// APITokenRollResponse is the API response when rolling the token. +type APITokenRollResponse struct { + Response + Result string `json:"result"` +} + +// APITokenVerifyResponse is the API response for verifying a token. +type APITokenVerifyResponse struct { + Response + Result APITokenVerifyBody `json:"result"` +} + +// APITokenPermissionGroupsResponse is the API response for the available +// permission groups. +type APITokenPermissionGroupsResponse struct { + Response + Result []APITokenPermissionGroups `json:"result"` +} + +// APITokenVerifyBody is the API body for verifying a token. +type APITokenVerifyBody struct { + ID string `json:"id"` + Status string `json:"status"` + NotBefore time.Time `json:"not_before"` + ExpiresOn time.Time `json:"expires_on"` +} + +// GetAPIToken returns a single API token based on the ID. +// +// API reference: https://api.cloudflare.com/#user-api-tokens-token-details +func (api *API) GetAPIToken(ctx context.Context, tokenID string) (APIToken, error) { + uri := fmt.Sprintf("/user/tokens/%s", tokenID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return APIToken{}, err + } + + var apiTokenResponse APITokenResponse + err = json.Unmarshal(res, &apiTokenResponse) + if err != nil { + return APIToken{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return apiTokenResponse.Result, nil +} + +// APITokens returns all available API tokens. +// +// API reference: https://api.cloudflare.com/#user-api-tokens-list-tokens +func (api *API) APITokens(ctx context.Context) ([]APIToken, error) { + res, err := api.makeRequestContext(ctx, http.MethodGet, "/user/tokens", nil) + if err != nil { + return []APIToken{}, err + } + + var apiTokenListResponse APITokenListResponse + err = json.Unmarshal(res, &apiTokenListResponse) + if err != nil { + return []APIToken{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return apiTokenListResponse.Result, nil +} + +// CreateAPIToken creates a new token. Returns the API token that has been +// generated. +// +// The token value itself is only shown once (post create) and will present as +// `Value` from this method. If you fail to capture it at this point, you will +// need to roll the token in order to get a new value. +// +// API reference: https://api.cloudflare.com/#user-api-tokens-create-token +func (api *API) CreateAPIToken(ctx context.Context, token APIToken) (APIToken, error) { + res, err := api.makeRequestContext(ctx, http.MethodPost, "/user/tokens", token) + if err != nil { + return APIToken{}, err + } + + var createTokenAPIResponse APITokenResponse + err = json.Unmarshal(res, &createTokenAPIResponse) + if err != nil { + return APIToken{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return createTokenAPIResponse.Result, nil +} + +// UpdateAPIToken updates an existing API token. +// +// API reference: https://api.cloudflare.com/#user-api-tokens-update-token +func (api *API) UpdateAPIToken(ctx context.Context, tokenID string, token APIToken) (APIToken, error) { + res, err := api.makeRequestContext(ctx, http.MethodPut, "/user/tokens/"+tokenID, token) + if err != nil { + return APIToken{}, err + } + + var updatedTokenResponse APITokenResponse + err = json.Unmarshal(res, &updatedTokenResponse) + if err != nil { + return APIToken{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return updatedTokenResponse.Result, nil +} + +// RollAPIToken rolls the credential associated with the token. +// +// API reference: https://api.cloudflare.com/#user-api-tokens-roll-token +func (api *API) RollAPIToken(ctx context.Context, tokenID string) (string, error) { + uri := fmt.Sprintf("/user/tokens/%s/value", tokenID) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, nil) + if err != nil { + return "", err + } + + var apiTokenRollResponse APITokenRollResponse + err = json.Unmarshal(res, &apiTokenRollResponse) + if err != nil { + return "", fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return apiTokenRollResponse.Result, nil +} + +// VerifyAPIToken tests the validity of the token. +// +// API reference: https://api.cloudflare.com/#user-api-tokens-verify-token +func (api *API) VerifyAPIToken(ctx context.Context) (APITokenVerifyBody, error) { + res, err := api.makeRequestContext(ctx, http.MethodGet, "/user/tokens/verify", nil) + if err != nil { + return APITokenVerifyBody{}, err + } + + var apiTokenVerifyResponse APITokenVerifyResponse + err = json.Unmarshal(res, &apiTokenVerifyResponse) + if err != nil { + return APITokenVerifyBody{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return apiTokenVerifyResponse.Result, nil +} + +// DeleteAPIToken deletes a single API token. +// +// API reference: https://api.cloudflare.com/#user-api-tokens-delete-token +func (api *API) DeleteAPIToken(ctx context.Context, tokenID string) error { + _, err := api.makeRequestContext(ctx, http.MethodDelete, "/user/tokens/"+tokenID, nil) + if err != nil { + return err + } + + return nil +} + +// ListAPITokensPermissionGroups returns all available API token permission groups. +// +// API reference: https://api.cloudflare.com/#permission-groups-list-permission-groups +func (api *API) ListAPITokensPermissionGroups(ctx context.Context) ([]APITokenPermissionGroups, error) { + var r APITokenPermissionGroupsResponse + res, err := api.makeRequestContext(ctx, http.MethodGet, "/user/tokens/permission_groups", nil) + if err != nil { + return []APITokenPermissionGroups{}, err + } + + err = json.Unmarshal(res, &r) + if err != nil { + return []APITokenPermissionGroups{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/argo.go b/vendor/github.com/cloudflare/cloudflare-go/argo.go new file mode 100644 index 0000000..d789257 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/argo.go @@ -0,0 +1,121 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +var validSettingValues = []string{"on", "off"} + +// ArgoFeatureSetting is the structure of the API object for the +// argo smart routing and tiered caching settings. +type ArgoFeatureSetting struct { + Editable bool `json:"editable,omitempty"` + ID string `json:"id,omitempty"` + ModifiedOn time.Time `json:"modified_on,omitempty"` + Value string `json:"value"` +} + +// ArgoDetailsResponse is the API response for the argo smart routing +// and tiered caching response. +type ArgoDetailsResponse struct { + Result ArgoFeatureSetting `json:"result"` + Response +} + +// ArgoSmartRouting returns the current settings for smart routing. +// +// API reference: https://api.cloudflare.com/#argo-smart-routing-get-argo-smart-routing-setting +func (api *API) ArgoSmartRouting(ctx context.Context, zoneID string) (ArgoFeatureSetting, error) { + uri := fmt.Sprintf("/zones/%s/argo/smart_routing", zoneID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return ArgoFeatureSetting{}, err + } + + var argoDetailsResponse ArgoDetailsResponse + err = json.Unmarshal(res, &argoDetailsResponse) + if err != nil { + return ArgoFeatureSetting{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return argoDetailsResponse.Result, nil +} + +// UpdateArgoSmartRouting updates the setting for smart routing. +// +// API reference: https://api.cloudflare.com/#argo-smart-routing-patch-argo-smart-routing-setting +func (api *API) UpdateArgoSmartRouting(ctx context.Context, zoneID, settingValue string) (ArgoFeatureSetting, error) { + if !contains(validSettingValues, settingValue) { + return ArgoFeatureSetting{}, fmt.Errorf("invalid setting value '%s'. must be 'on' or 'off'", settingValue) + } + + uri := fmt.Sprintf("/zones/%s/argo/smart_routing", zoneID) + + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, ArgoFeatureSetting{Value: settingValue}) + if err != nil { + return ArgoFeatureSetting{}, err + } + + var argoDetailsResponse ArgoDetailsResponse + err = json.Unmarshal(res, &argoDetailsResponse) + if err != nil { + return ArgoFeatureSetting{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return argoDetailsResponse.Result, nil +} + +// ArgoTieredCaching returns the current settings for tiered caching. +// +// API reference: TBA. +func (api *API) ArgoTieredCaching(ctx context.Context, zoneID string) (ArgoFeatureSetting, error) { + uri := fmt.Sprintf("/zones/%s/argo/tiered_caching", zoneID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return ArgoFeatureSetting{}, err + } + + var argoDetailsResponse ArgoDetailsResponse + err = json.Unmarshal(res, &argoDetailsResponse) + if err != nil { + return ArgoFeatureSetting{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return argoDetailsResponse.Result, nil +} + +// UpdateArgoTieredCaching updates the setting for tiered caching. +// +// API reference: TBA. +func (api *API) UpdateArgoTieredCaching(ctx context.Context, zoneID, settingValue string) (ArgoFeatureSetting, error) { + if !contains(validSettingValues, settingValue) { + return ArgoFeatureSetting{}, fmt.Errorf("invalid setting value '%s'. must be 'on' or 'off'", settingValue) + } + + uri := fmt.Sprintf("/zones/%s/argo/tiered_caching", zoneID) + + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, ArgoFeatureSetting{Value: settingValue}) + if err != nil { + return ArgoFeatureSetting{}, err + } + + var argoDetailsResponse ArgoDetailsResponse + err = json.Unmarshal(res, &argoDetailsResponse) + if err != nil { + return ArgoFeatureSetting{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return argoDetailsResponse.Result, nil +} + +func contains(s []string, e string) bool { + for _, a := range s { + if a == e { + return true + } + } + return false +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/argo_tunnel.go b/vendor/github.com/cloudflare/cloudflare-go/argo_tunnel.go new file mode 100644 index 0000000..fa444f0 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/argo_tunnel.go @@ -0,0 +1,162 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// ArgoTunnel is the struct definition of a tunnel. +type ArgoTunnel struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Secret string `json:"tunnel_secret,omitempty"` + CreatedAt *time.Time `json:"created_at,omitempty"` + DeletedAt *time.Time `json:"deleted_at,omitempty"` + Connections []ArgoTunnelConnection `json:"connections,omitempty"` +} + +// ArgoTunnelConnection represents the connections associated with a tunnel. +type ArgoTunnelConnection struct { + ColoName string `json:"colo_name"` + UUID string `json:"uuid"` + IsPendingReconnect bool `json:"is_pending_reconnect"` +} + +// ArgoTunnelsDetailResponse is used for representing the API response payload for +// multiple tunnels. +type ArgoTunnelsDetailResponse struct { + Result []ArgoTunnel `json:"result"` + Response +} + +// ArgoTunnelDetailResponse is used for representing the API response payload for +// a single tunnel. +type ArgoTunnelDetailResponse struct { + Result ArgoTunnel `json:"result"` + Response +} + +// ArgoTunnels lists all tunnels. +// +// API reference: https://api.cloudflare.com/#argo-tunnel-list-argo-tunnels +// +// Deprecated: Use `Tunnels` instead. +func (api *API) ArgoTunnels(ctx context.Context, accountID string) ([]ArgoTunnel, error) { + uri := fmt.Sprintf("/accounts/%s/cfd_tunnel", accountID) + + res, err := api.makeRequestContextWithHeaders(ctx, http.MethodGet, uri, nil, argoV1Header()) + if err != nil { + return []ArgoTunnel{}, err + } + + var argoDetailsResponse ArgoTunnelsDetailResponse + err = json.Unmarshal(res, &argoDetailsResponse) + if err != nil { + return []ArgoTunnel{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return argoDetailsResponse.Result, nil +} + +// ArgoTunnel returns a single Argo tunnel. +// +// API reference: https://api.cloudflare.com/#argo-tunnel-get-argo-tunnel +// +// Deprecated: Use `Tunnel` instead. +func (api *API) ArgoTunnel(ctx context.Context, accountID, tunnelUUID string) (ArgoTunnel, error) { + uri := fmt.Sprintf("/accounts/%s/cfd_tunnel/%s", accountID, tunnelUUID) + + res, err := api.makeRequestContextWithHeaders(ctx, http.MethodGet, uri, nil, argoV1Header()) + if err != nil { + return ArgoTunnel{}, err + } + + var argoDetailsResponse ArgoTunnelDetailResponse + err = json.Unmarshal(res, &argoDetailsResponse) + if err != nil { + return ArgoTunnel{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return argoDetailsResponse.Result, nil +} + +// CreateArgoTunnel creates a new tunnel for the account. +// +// API reference: https://api.cloudflare.com/#argo-tunnel-create-argo-tunnel +// +// Deprecated: Use `CreateTunnel` instead. +func (api *API) CreateArgoTunnel(ctx context.Context, accountID, name, secret string) (ArgoTunnel, error) { + uri := fmt.Sprintf("/accounts/%s/cfd_tunnel", accountID) + + tunnel := ArgoTunnel{Name: name, Secret: secret} + + res, err := api.makeRequestContextWithHeaders(ctx, http.MethodPost, uri, tunnel, argoV1Header()) + if err != nil { + return ArgoTunnel{}, err + } + + var argoDetailsResponse ArgoTunnelDetailResponse + err = json.Unmarshal(res, &argoDetailsResponse) + if err != nil { + return ArgoTunnel{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return argoDetailsResponse.Result, nil +} + +// DeleteArgoTunnel removes a single Argo tunnel. +// +// API reference: https://api.cloudflare.com/#argo-tunnel-delete-argo-tunnel +// +// Deprecated: Use `DeleteTunnel` instead. +func (api *API) DeleteArgoTunnel(ctx context.Context, accountID, tunnelUUID string) error { + uri := fmt.Sprintf("/accounts/%s/cfd_tunnel/%s", accountID, tunnelUUID) + + res, err := api.makeRequestContextWithHeaders(ctx, http.MethodDelete, uri, nil, argoV1Header()) + if err != nil { + return err + } + + var argoDetailsResponse ArgoTunnelDetailResponse + err = json.Unmarshal(res, &argoDetailsResponse) + if err != nil { + return fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return nil +} + +// CleanupArgoTunnelConnections deletes any inactive connections on a tunnel. +// +// API reference: https://api.cloudflare.com/#argo-tunnel-clean-up-argo-tunnel-connections +// +// Deprecated: Use `CleanupTunnelConnections` instead. +func (api *API) CleanupArgoTunnelConnections(ctx context.Context, accountID, tunnelUUID string) error { + uri := fmt.Sprintf("/accounts/%s/cfd_tunnel/%s/connections", accountID, tunnelUUID) + + res, err := api.makeRequestContextWithHeaders(ctx, http.MethodDelete, uri, nil, argoV1Header()) + if err != nil { + return err + } + + var argoDetailsResponse ArgoTunnelDetailResponse + err = json.Unmarshal(res, &argoDetailsResponse) + if err != nil { + return fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return nil +} + +// The early implementation of Argo Tunnel endpoints didn't conform to the V4 +// API standard response structure. This has been remedied going forward however +// to support older clients this isn't yet the default. An explicit `Accept` +// header is used to get the V4 compatible version. +func argoV1Header() http.Header { + header := make(http.Header) + header.Set("Accept", "application/json;version=1") + + return header +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/auditlogs.go b/vendor/github.com/cloudflare/cloudflare-go/auditlogs.go new file mode 100644 index 0000000..c21398a --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/auditlogs.go @@ -0,0 +1,158 @@ +package cloudflare + +import ( + "context" + "net/http" + "net/url" + "path" + "strconv" + "time" + + "github.com/goccy/go-json" +) + +// AuditLogAction is a member of AuditLog, the action that was taken. +type AuditLogAction struct { + Result bool `json:"result"` + Type string `json:"type"` +} + +// AuditLogActor is a member of AuditLog, who performed the action. +type AuditLogActor struct { + Email string `json:"email"` + ID string `json:"id"` + IP string `json:"ip"` + Type string `json:"type"` +} + +// AuditLogOwner is a member of AuditLog, who owns this audit log. +type AuditLogOwner struct { + ID string `json:"id"` +} + +// AuditLogResource is a member of AuditLog, what was the action performed on. +type AuditLogResource struct { + ID string `json:"id"` + Type string `json:"type"` +} + +// AuditLog is an resource that represents an update in the cloudflare dash. +type AuditLog struct { + Action AuditLogAction `json:"action"` + Actor AuditLogActor `json:"actor"` + ID string `json:"id"` + Metadata map[string]interface{} `json:"metadata"` + NewValue string `json:"newValue"` + NewValueJSON map[string]interface{} `json:"newValueJson"` + OldValue string `json:"oldValue"` + OldValueJSON map[string]interface{} `json:"oldValueJson"` + Owner AuditLogOwner `json:"owner"` + Resource AuditLogResource `json:"resource"` + When time.Time `json:"when"` +} + +// AuditLogResponse is the response returned from the cloudflare v4 api. +type AuditLogResponse struct { + Response Response + Result []AuditLog `json:"result"` + ResultInfo `json:"result_info"` +} + +// AuditLogFilter is an object for filtering the audit log response from the api. +type AuditLogFilter struct { + ID string + ActorIP string + ActorEmail string + HideUserLogs bool + Direction string + ZoneName string + Since string + Before string + PerPage int + Page int +} + +// ToQuery turns an audit log filter in to an HTTP Query Param +// list, suitable for use in a url.URL.RawQuery. It will not include empty +// members of the struct in the query parameters. +func (a AuditLogFilter) ToQuery() url.Values { + v := url.Values{} + + if a.ID != "" { + v.Add("id", a.ID) + } + if a.ActorIP != "" { + v.Add("actor.ip", a.ActorIP) + } + if a.ActorEmail != "" { + v.Add("actor.email", a.ActorEmail) + } + if a.HideUserLogs { + v.Add("hide_user_logs", "true") + } + if a.ZoneName != "" { + v.Add("zone.name", a.ZoneName) + } + if a.Direction != "" { + v.Add("direction", a.Direction) + } + if a.Since != "" { + v.Add("since", a.Since) + } + if a.Before != "" { + v.Add("before", a.Before) + } + if a.PerPage > 0 { + v.Add("per_page", strconv.Itoa(a.PerPage)) + } + if a.Page > 0 { + v.Add("page", strconv.Itoa(a.Page)) + } + + return v +} + +// GetOrganizationAuditLogs will return the audit logs of a specific +// organization, based on the ID passed in. The audit logs can be +// filtered based on any argument in the AuditLogFilter. +// +// API Reference: https://api.cloudflare.com/#audit-logs-list-organization-audit-logs +func (api *API) GetOrganizationAuditLogs(ctx context.Context, organizationID string, a AuditLogFilter) (AuditLogResponse, error) { + uri := url.URL{ + Path: path.Join("/accounts", organizationID, "audit_logs"), + ForceQuery: true, + RawQuery: a.ToQuery().Encode(), + } + res, err := api.makeRequestContext(ctx, http.MethodGet, uri.String(), nil) + if err != nil { + return AuditLogResponse{}, err + } + return unmarshalReturn(res) +} + +// unmarshalReturn will unmarshal bytes and return an auditlogresponse. +func unmarshalReturn(res []byte) (AuditLogResponse, error) { + var auditResponse AuditLogResponse + err := json.Unmarshal(res, &auditResponse) + if err != nil { + return auditResponse, err + } + return auditResponse, nil +} + +// GetUserAuditLogs will return your user's audit logs. The audit logs can be +// filtered based on any argument in the AuditLogFilter. +// +// API Reference: https://api.cloudflare.com/#audit-logs-list-user-audit-logs +func (api *API) GetUserAuditLogs(ctx context.Context, a AuditLogFilter) (AuditLogResponse, error) { + uri := url.URL{ + Path: path.Join("/user", "audit_logs"), + ForceQuery: true, + RawQuery: a.ToQuery().Encode(), + } + res, err := api.makeRequestContext(ctx, http.MethodGet, uri.String(), nil) + if err != nil { + return AuditLogResponse{}, err + } + return unmarshalReturn(res) +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/authenticated_origin_pulls.go b/vendor/github.com/cloudflare/cloudflare-go/authenticated_origin_pulls.go new file mode 100644 index 0000000..ff46549 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/authenticated_origin_pulls.go @@ -0,0 +1,67 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// AuthenticatedOriginPulls represents global AuthenticatedOriginPulls (tls_client_auth) metadata. +type AuthenticatedOriginPulls struct { + ID string `json:"id"` + Value string `json:"value"` + Editable bool `json:"editable"` + ModifiedOn time.Time `json:"modified_on"` +} + +// AuthenticatedOriginPullsResponse represents the response from the global AuthenticatedOriginPulls (tls_client_auth) details endpoint. +type AuthenticatedOriginPullsResponse struct { + Response + Result AuthenticatedOriginPulls `json:"result"` +} + +// GetAuthenticatedOriginPullsStatus returns the configuration details for global AuthenticatedOriginPulls (tls_client_auth). +// +// API reference: https://api.cloudflare.com/#zone-settings-get-tls-client-auth-setting +func (api *API) GetAuthenticatedOriginPullsStatus(ctx context.Context, zoneID string) (AuthenticatedOriginPulls, error) { + uri := fmt.Sprintf("/zones/%s/settings/tls_client_auth", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return AuthenticatedOriginPulls{}, err + } + var r AuthenticatedOriginPullsResponse + if err := json.Unmarshal(res, &r); err != nil { + return AuthenticatedOriginPulls{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// SetAuthenticatedOriginPullsStatus toggles whether global AuthenticatedOriginPulls is enabled for the zone. +// +// API reference: https://api.cloudflare.com/#zone-settings-change-tls-client-auth-setting +func (api *API) SetAuthenticatedOriginPullsStatus(ctx context.Context, zoneID string, enable bool) (AuthenticatedOriginPulls, error) { + uri := fmt.Sprintf("/zones/%s/settings/tls_client_auth", zoneID) + var val string + if enable { + val = "on" + } else { + val = "off" + } + params := struct { + Value string `json:"value"` + }{ + Value: val, + } + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, params) + if err != nil { + return AuthenticatedOriginPulls{}, err + } + var r AuthenticatedOriginPullsResponse + if err := json.Unmarshal(res, &r); err != nil { + return AuthenticatedOriginPulls{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/authenticated_origin_pulls_per_hostname.go b/vendor/github.com/cloudflare/cloudflare-go/authenticated_origin_pulls_per_hostname.go new file mode 100644 index 0000000..c8ea3ec --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/authenticated_origin_pulls_per_hostname.go @@ -0,0 +1,175 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// PerHostnameAuthenticatedOriginPullsCertificateDetails represents the metadata for a Per Hostname AuthenticatedOriginPulls certificate. +type PerHostnameAuthenticatedOriginPullsCertificateDetails struct { + ID string `json:"id"` + Certificate string `json:"certificate"` + Issuer string `json:"issuer"` + Signature string `json:"signature"` + SerialNumber string `json:"serial_number"` + ExpiresOn time.Time `json:"expires_on"` + Status string `json:"status"` + UploadedOn time.Time `json:"uploaded_on"` +} + +// PerHostnameAuthenticatedOriginPullsCertificateResponse represents the response from endpoints relating to creating and deleting a Per Hostname AuthenticatedOriginPulls certificate. +type PerHostnameAuthenticatedOriginPullsCertificateResponse struct { + Response + Result PerHostnameAuthenticatedOriginPullsCertificateDetails `json:"result"` +} + +// PerHostnameAuthenticatedOriginPullsDetails contains metadata about the Per Hostname AuthenticatedOriginPulls configuration on a hostname. +type PerHostnameAuthenticatedOriginPullsDetails struct { + Hostname string `json:"hostname"` + CertID string `json:"cert_id"` + Enabled bool `json:"enabled"` + Status string `json:"status"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + CertStatus string `json:"cert_status"` + Issuer string `json:"issuer"` + Signature string `json:"signature"` + SerialNumber string `json:"serial_number"` + Certificate string `json:"certificate"` + CertUploadedOn time.Time `json:"cert_uploaded_on"` + CertUpdatedAt time.Time `json:"cert_updated_at"` + ExpiresOn time.Time `json:"expires_on"` +} + +// PerHostnameAuthenticatedOriginPullsDetailsResponse represents Per Hostname AuthenticatedOriginPulls configuration metadata for a single hostname. +type PerHostnameAuthenticatedOriginPullsDetailsResponse struct { + Response + Result PerHostnameAuthenticatedOriginPullsDetails `json:"result"` +} + +// PerHostnamesAuthenticatedOriginPullsDetailsResponse represents Per Hostname AuthenticatedOriginPulls configuration metadata for multiple hostnames. +type PerHostnamesAuthenticatedOriginPullsDetailsResponse struct { + Response + Result []PerHostnameAuthenticatedOriginPullsDetails `json:"result"` +} + +// PerHostnameAuthenticatedOriginPullsCertificateParams represents the required data related to the client certificate being uploaded to be used in Per Hostname AuthenticatedOriginPulls. +type PerHostnameAuthenticatedOriginPullsCertificateParams struct { + Certificate string `json:"certificate"` + PrivateKey string `json:"private_key"` +} + +// PerHostnameAuthenticatedOriginPullsConfig represents the config state for Per Hostname AuthenticatedOriginPulls applied on a hostname. +type PerHostnameAuthenticatedOriginPullsConfig struct { + Hostname string `json:"hostname"` + CertID string `json:"cert_id"` + Enabled bool `json:"enabled"` +} + +// PerHostnameAuthenticatedOriginPullsConfigParams represents the expected config param format for Per Hostname AuthenticatedOriginPulls applied on a hostname. +type PerHostnameAuthenticatedOriginPullsConfigParams struct { + Config []PerHostnameAuthenticatedOriginPullsConfig `json:"config"` +} + +// ListPerHostnameAuthenticatedOriginPullsCertificates will get all certificate under Per Hostname AuthenticatedOriginPulls zone. +// +// API reference: https://api.cloudflare.com/#per-hostname-authenticated-origin-pull-list-certificates +func (api *API) ListPerHostnameAuthenticatedOriginPullsCertificates(ctx context.Context, zoneID string) ([]PerHostnameAuthenticatedOriginPullsDetails, error) { + uri := fmt.Sprintf("/zones/%s/origin_tls_client_auth/hostnames/certificates", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []PerHostnameAuthenticatedOriginPullsDetails{}, err + } + var r PerHostnamesAuthenticatedOriginPullsDetailsResponse + if err := json.Unmarshal(res, &r); err != nil { + return []PerHostnameAuthenticatedOriginPullsDetails{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// UploadPerHostnameAuthenticatedOriginPullsCertificate will upload the provided certificate and private key to the edge under Per Hostname AuthenticatedOriginPulls. +// +// API reference: https://api.cloudflare.com/#per-hostname-authenticated-origin-pull-upload-a-hostname-client-certificate +func (api *API) UploadPerHostnameAuthenticatedOriginPullsCertificate(ctx context.Context, zoneID string, params PerHostnameAuthenticatedOriginPullsCertificateParams) (PerHostnameAuthenticatedOriginPullsCertificateDetails, error) { + uri := fmt.Sprintf("/zones/%s/origin_tls_client_auth/hostnames/certificates", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return PerHostnameAuthenticatedOriginPullsCertificateDetails{}, err + } + var r PerHostnameAuthenticatedOriginPullsCertificateResponse + if err := json.Unmarshal(res, &r); err != nil { + return PerHostnameAuthenticatedOriginPullsCertificateDetails{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// GetPerHostnameAuthenticatedOriginPullsCertificate retrieves certificate metadata about the requested Per Hostname certificate. +// +// API reference: https://api.cloudflare.com/#per-hostname-authenticated-origin-pull-get-the-hostname-client-certificate +func (api *API) GetPerHostnameAuthenticatedOriginPullsCertificate(ctx context.Context, zoneID, certificateID string) (PerHostnameAuthenticatedOriginPullsCertificateDetails, error) { + uri := fmt.Sprintf("/zones/%s/origin_tls_client_auth/hostnames/certificates/%s", zoneID, certificateID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return PerHostnameAuthenticatedOriginPullsCertificateDetails{}, err + } + var r PerHostnameAuthenticatedOriginPullsCertificateResponse + if err := json.Unmarshal(res, &r); err != nil { + return PerHostnameAuthenticatedOriginPullsCertificateDetails{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// DeletePerHostnameAuthenticatedOriginPullsCertificate will remove the requested Per Hostname certificate from the edge. +// +// API reference: https://api.cloudflare.com/#per-hostname-authenticated-origin-pull-delete-hostname-client-certificate +func (api *API) DeletePerHostnameAuthenticatedOriginPullsCertificate(ctx context.Context, zoneID, certificateID string) (PerHostnameAuthenticatedOriginPullsCertificateDetails, error) { + uri := fmt.Sprintf("/zones/%s/origin_tls_client_auth/hostnames/certificates/%s", zoneID, certificateID) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return PerHostnameAuthenticatedOriginPullsCertificateDetails{}, err + } + var r PerHostnameAuthenticatedOriginPullsCertificateResponse + if err := json.Unmarshal(res, &r); err != nil { + return PerHostnameAuthenticatedOriginPullsCertificateDetails{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// EditPerHostnameAuthenticatedOriginPullsConfig applies the supplied Per Hostname AuthenticatedOriginPulls config onto a hostname(s) in the edge. +// +// API reference: https://api.cloudflare.com/#per-hostname-authenticated-origin-pull-enable-or-disable-a-hostname-for-client-authentication +func (api *API) EditPerHostnameAuthenticatedOriginPullsConfig(ctx context.Context, zoneID string, config []PerHostnameAuthenticatedOriginPullsConfig) ([]PerHostnameAuthenticatedOriginPullsDetails, error) { + uri := fmt.Sprintf("/zones/%s/origin_tls_client_auth/hostnames", zoneID) + conf := PerHostnameAuthenticatedOriginPullsConfigParams{ + Config: config, + } + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, conf) + if err != nil { + return []PerHostnameAuthenticatedOriginPullsDetails{}, err + } + var r PerHostnamesAuthenticatedOriginPullsDetailsResponse + if err := json.Unmarshal(res, &r); err != nil { + return []PerHostnameAuthenticatedOriginPullsDetails{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// GetPerHostnameAuthenticatedOriginPullsConfig returns the config state of Per Hostname AuthenticatedOriginPulls of the provided hostname within a zone. +// +// API reference: https://api.cloudflare.com/#per-hostname-authenticated-origin-pull-get-the-hostname-status-for-client-authentication +func (api *API) GetPerHostnameAuthenticatedOriginPullsConfig(ctx context.Context, zoneID, hostname string) (PerHostnameAuthenticatedOriginPullsDetails, error) { + uri := fmt.Sprintf("/zones/%s/origin_tls_client_auth/hostnames/%s", zoneID, hostname) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return PerHostnameAuthenticatedOriginPullsDetails{}, err + } + var r PerHostnameAuthenticatedOriginPullsDetailsResponse + if err := json.Unmarshal(res, &r); err != nil { + return PerHostnameAuthenticatedOriginPullsDetails{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/authenticated_origin_pulls_per_zone.go b/vendor/github.com/cloudflare/cloudflare-go/authenticated_origin_pulls_per_zone.go new file mode 100644 index 0000000..a72b34e --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/authenticated_origin_pulls_per_zone.go @@ -0,0 +1,151 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// PerZoneAuthenticatedOriginPullsSettings represents the settings for Per Zone AuthenticatedOriginPulls. +type PerZoneAuthenticatedOriginPullsSettings struct { + Enabled bool `json:"enabled"` +} + +// PerZoneAuthenticatedOriginPullsSettingsResponse represents the response from the Per Zone AuthenticatedOriginPulls settings endpoint. +type PerZoneAuthenticatedOriginPullsSettingsResponse struct { + Response + Result PerZoneAuthenticatedOriginPullsSettings `json:"result"` +} + +// PerZoneAuthenticatedOriginPullsCertificateDetails represents the metadata for a Per Zone AuthenticatedOriginPulls client certificate. +type PerZoneAuthenticatedOriginPullsCertificateDetails struct { + ID string `json:"id"` + Certificate string `json:"certificate"` + Issuer string `json:"issuer"` + Signature string `json:"signature"` + ExpiresOn time.Time `json:"expires_on"` + Status string `json:"status"` + UploadedOn time.Time `json:"uploaded_on"` +} + +// PerZoneAuthenticatedOriginPullsCertificateResponse represents the response from endpoints relating to creating and deleting a per zone AuthenticatedOriginPulls certificate. +type PerZoneAuthenticatedOriginPullsCertificateResponse struct { + Response + Result PerZoneAuthenticatedOriginPullsCertificateDetails `json:"result"` +} + +// PerZoneAuthenticatedOriginPullsCertificatesResponse represents the response from the per zone AuthenticatedOriginPulls certificate list endpoint. +type PerZoneAuthenticatedOriginPullsCertificatesResponse struct { + Response + Result []PerZoneAuthenticatedOriginPullsCertificateDetails `json:"result"` +} + +// PerZoneAuthenticatedOriginPullsCertificateParams represents the required data related to the client certificate being uploaded to be used in Per Zone AuthenticatedOriginPulls. +type PerZoneAuthenticatedOriginPullsCertificateParams struct { + Certificate string `json:"certificate"` + PrivateKey string `json:"private_key"` +} + +// GetPerZoneAuthenticatedOriginPullsStatus returns whether per zone AuthenticatedOriginPulls is enabled or not. It is false by default. +// +// API reference: https://api.cloudflare.com/#zone-level-authenticated-origin-pulls-get-enablement-setting-for-zone +func (api *API) GetPerZoneAuthenticatedOriginPullsStatus(ctx context.Context, zoneID string) (PerZoneAuthenticatedOriginPullsSettings, error) { + uri := fmt.Sprintf("/zones/%s/origin_tls_client_auth/settings", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return PerZoneAuthenticatedOriginPullsSettings{}, err + } + var r PerZoneAuthenticatedOriginPullsSettingsResponse + if err := json.Unmarshal(res, &r); err != nil { + return PerZoneAuthenticatedOriginPullsSettings{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// SetPerZoneAuthenticatedOriginPullsStatus will update whether Per Zone AuthenticatedOriginPulls is enabled for the zone. +// +// API reference: https://api.cloudflare.com/#zone-level-authenticated-origin-pulls-set-enablement-for-zone +func (api *API) SetPerZoneAuthenticatedOriginPullsStatus(ctx context.Context, zoneID string, enable bool) (PerZoneAuthenticatedOriginPullsSettings, error) { + uri := fmt.Sprintf("/zones/%s/origin_tls_client_auth/settings", zoneID) + params := struct { + Enabled bool `json:"enabled"` + }{ + Enabled: enable, + } + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return PerZoneAuthenticatedOriginPullsSettings{}, err + } + var r PerZoneAuthenticatedOriginPullsSettingsResponse + if err := json.Unmarshal(res, &r); err != nil { + return PerZoneAuthenticatedOriginPullsSettings{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// UploadPerZoneAuthenticatedOriginPullsCertificate will upload a provided client certificate and enable it to be used in all AuthenticatedOriginPulls requests for the zone. +// +// API reference: https://api.cloudflare.com/#zone-level-authenticated-origin-pulls-upload-certificate +func (api *API) UploadPerZoneAuthenticatedOriginPullsCertificate(ctx context.Context, zoneID string, params PerZoneAuthenticatedOriginPullsCertificateParams) (PerZoneAuthenticatedOriginPullsCertificateDetails, error) { + uri := fmt.Sprintf("/zones/%s/origin_tls_client_auth", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return PerZoneAuthenticatedOriginPullsCertificateDetails{}, err + } + var r PerZoneAuthenticatedOriginPullsCertificateResponse + if err := json.Unmarshal(res, &r); err != nil { + return PerZoneAuthenticatedOriginPullsCertificateDetails{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// ListPerZoneAuthenticatedOriginPullsCertificates returns a list of all user uploaded client certificates to Per Zone AuthenticatedOriginPulls. +// +// API reference: https://api.cloudflare.com/#zone-level-authenticated-origin-pulls-list-certificates +func (api *API) ListPerZoneAuthenticatedOriginPullsCertificates(ctx context.Context, zoneID string) ([]PerZoneAuthenticatedOriginPullsCertificateDetails, error) { + uri := fmt.Sprintf("/zones/%s/origin_tls_client_auth", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []PerZoneAuthenticatedOriginPullsCertificateDetails{}, err + } + var r PerZoneAuthenticatedOriginPullsCertificatesResponse + if err := json.Unmarshal(res, &r); err != nil { + return []PerZoneAuthenticatedOriginPullsCertificateDetails{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// GetPerZoneAuthenticatedOriginPullsCertificateDetails returns the metadata associated with a user uploaded client certificate to Per Zone AuthenticatedOriginPulls. +// +// API reference: https://api.cloudflare.com/#zone-level-authenticated-origin-pulls-get-certificate-details +func (api *API) GetPerZoneAuthenticatedOriginPullsCertificateDetails(ctx context.Context, zoneID, certificateID string) (PerZoneAuthenticatedOriginPullsCertificateDetails, error) { + uri := fmt.Sprintf("/zones/%s/origin_tls_client_auth/%s", zoneID, certificateID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return PerZoneAuthenticatedOriginPullsCertificateDetails{}, err + } + var r PerZoneAuthenticatedOriginPullsCertificateResponse + if err := json.Unmarshal(res, &r); err != nil { + return PerZoneAuthenticatedOriginPullsCertificateDetails{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// DeletePerZoneAuthenticatedOriginPullsCertificate removes the specified client certificate from the edge. +// +// API reference: https://api.cloudflare.com/#zone-level-authenticated-origin-pulls-delete-certificate +func (api *API) DeletePerZoneAuthenticatedOriginPullsCertificate(ctx context.Context, zoneID, certificateID string) (PerZoneAuthenticatedOriginPullsCertificateDetails, error) { + uri := fmt.Sprintf("/zones/%s/origin_tls_client_auth/%s", zoneID, certificateID) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return PerZoneAuthenticatedOriginPullsCertificateDetails{}, err + } + var r PerZoneAuthenticatedOriginPullsCertificateResponse + if err := json.Unmarshal(res, &r); err != nil { + return PerZoneAuthenticatedOriginPullsCertificateDetails{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/bot_management.go b/vendor/github.com/cloudflare/cloudflare-go/bot_management.go new file mode 100644 index 0000000..51205a6 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/bot_management.go @@ -0,0 +1,93 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +// BotManagement represents the bots config for a zone. +type BotManagement struct { + EnableJS *bool `json:"enable_js,omitempty"` + FightMode *bool `json:"fight_mode,omitempty"` + SBFMDefinitelyAutomated *string `json:"sbfm_definitely_automated,omitempty"` + SBFMLikelyAutomated *string `json:"sbfm_likely_automated,omitempty"` + SBFMVerifiedBots *string `json:"sbfm_verified_bots,omitempty"` + SBFMStaticResourceProtection *bool `json:"sbfm_static_resource_protection,omitempty"` + OptimizeWordpress *bool `json:"optimize_wordpress,omitempty"` + SuppressSessionScore *bool `json:"suppress_session_score,omitempty"` + AutoUpdateModel *bool `json:"auto_update_model,omitempty"` + UsingLatestModel *bool `json:"using_latest_model,omitempty"` + AIBotsProtection *string `json:"ai_bots_protection,omitempty"` +} + +// BotManagementResponse represents the response from the bot_management endpoint. +type BotManagementResponse struct { + Result BotManagement `json:"result"` + Response +} + +type UpdateBotManagementParams struct { + EnableJS *bool `json:"enable_js,omitempty"` + FightMode *bool `json:"fight_mode,omitempty"` + SBFMDefinitelyAutomated *string `json:"sbfm_definitely_automated,omitempty"` + SBFMLikelyAutomated *string `json:"sbfm_likely_automated,omitempty"` + SBFMVerifiedBots *string `json:"sbfm_verified_bots,omitempty"` + SBFMStaticResourceProtection *bool `json:"sbfm_static_resource_protection,omitempty"` + OptimizeWordpress *bool `json:"optimize_wordpress,omitempty"` + SuppressSessionScore *bool `json:"suppress_session_score,omitempty"` + AutoUpdateModel *bool `json:"auto_update_model,omitempty"` + AIBotsProtection *string `json:"ai_bots_protection,omitempty"` +} + +// GetBotManagement gets a zone API shield configuration. +// +// API documentation: https://developers.cloudflare.com/api/operations/bot-management-for-a-zone-get-config +func (api *API) GetBotManagement(ctx context.Context, rc *ResourceContainer) (BotManagement, error) { + uri := fmt.Sprintf("/zones/%s/bot_management", rc.Identifier) + + res, err := api.makeRequestContextWithHeaders(ctx, http.MethodGet, uri, nil, botV2Header()) + if err != nil { + return BotManagement{}, err + } + var bmResponse BotManagementResponse + err = json.Unmarshal(res, &bmResponse) + if err != nil { + return BotManagement{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return bmResponse.Result, nil +} + +// UpdateBotManagement sets a zone API shield configuration. +// +// API documentation: https://developers.cloudflare.com/api/operations/bot-management-for-a-zone-update-config +func (api *API) UpdateBotManagement(ctx context.Context, rc *ResourceContainer, params UpdateBotManagementParams) (BotManagement, error) { + uri := fmt.Sprintf("/zones/%s/bot_management", rc.Identifier) + + res, err := api.makeRequestContextWithHeaders(ctx, http.MethodPut, uri, params, botV2Header()) + if err != nil { + return BotManagement{}, err + } + + var bmResponse BotManagementResponse + err = json.Unmarshal(res, &bmResponse) + if err != nil { + return BotManagement{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return bmResponse.Result, nil +} + +// We are currently undergoing the process of updating the bot management API. +// The older 1.0.0 version of the is still the default version, so we will need +// to explicitly set this special header on all requests. We will eventually +// make 2.0.0 the default version, and later we will remove the 1.0.0 entirely. +func botV2Header() http.Header { + header := make(http.Header) + header.Set("Cloudflare-Version", "2.0.0") + + return header +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/cache_reserve.go b/vendor/github.com/cloudflare/cloudflare-go/cache_reserve.go new file mode 100644 index 0000000..64e74b3 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/cache_reserve.go @@ -0,0 +1,83 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// CacheReserve is the structure of the API object for the cache reserve +// setting. +type CacheReserve struct { + ID string `json:"id,omitempty"` + ModifiedOn time.Time `json:"modified_on,omitempty"` + Value string `json:"value"` +} + +// CacheReserveDetailsResponse is the API response for the cache reserve +// setting. +type CacheReserveDetailsResponse struct { + Result CacheReserve `json:"result"` + Response +} + +type zoneCacheReserveSingleResponse struct { + Response + Result CacheReserve `json:"result"` +} + +type GetCacheReserveParams struct{} + +type UpdateCacheReserveParams struct { + Value string `json:"value"` +} + +// GetCacheReserve returns information about the current cache reserve settings. +// +// API reference: https://developers.cloudflare.com/api/operations/zone-cache-settings-get-cache-reserve-setting +func (api *API) GetCacheReserve(ctx context.Context, rc *ResourceContainer, params GetCacheReserveParams) (CacheReserve, error) { + if rc.Level != ZoneRouteLevel { + return CacheReserve{}, ErrRequiredZoneLevelResourceContainer + } + + uri := fmt.Sprintf("/%s/%s/cache/cache_reserve", rc.Level, rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return CacheReserve{}, err + } + + var cacheReserveDetailsResponse CacheReserveDetailsResponse + err = json.Unmarshal(res, &cacheReserveDetailsResponse) + if err != nil { + return CacheReserve{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return cacheReserveDetailsResponse.Result, nil +} + +// UpdateCacheReserve updates the cache reserve setting for a zone +// +// API reference: https://developers.cloudflare.com/api/operations/zone-cache-settings-change-cache-reserve-setting +func (api *API) UpdateCacheReserve(ctx context.Context, rc *ResourceContainer, params UpdateCacheReserveParams) (CacheReserve, error) { + if rc.Level != ZoneRouteLevel { + return CacheReserve{}, ErrRequiredZoneLevelResourceContainer + } + + uri := fmt.Sprintf("/%s/%s/cache/cache_reserve", rc.Level, rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, params) + if err != nil { + return CacheReserve{}, err + } + + response := &zoneCacheReserveSingleResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return CacheReserve{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/catalog.json b/vendor/github.com/cloudflare/cloudflare-go/catalog.json new file mode 100644 index 0000000..9784724 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/catalog.json @@ -0,0 +1,67 @@ +{ + "nixpkgs-flox": { + "x86_64-darwin": { + "stable": { + "go_1_20": { + "latest": { + "cache": { + "out": { + "https://cache.nixos.org": {} + } + }, + "element": { + "attrPath": [ + "legacyPackages", + "x86_64-darwin", + "go_1_20" + ], + "originalUrl": "flake:nixpkgs", + "storePaths": [ + "/nix/store/0pp0svlrkdls28dixb6a7kqa567gs59v-go-1.20.1" + ], + "url": "github:flox/nixpkgs/d0d55259081f0b97c828f38559cad899d351cad1" + }, + "eval": { + "meta": { + "description": "The Go Programming language", + "outputsToInstall": [ + "out" + ], + "position": "/nix/store/32cp7y0cf36h3xp0fqwy2nf432fpcaci-source/pkgs/development/compilers/go/1.20.nix:176", + "unfree": false + }, + "name": "go-1.20.1", + "outputs": { + "out": "/nix/store/0pp0svlrkdls28dixb6a7kqa567gs59v-go-1.20.1" + }, + "pname": "go", + "system": "x86_64-darwin", + "version": "1.20.1" + }, + "publish_element": { + "attrPath": [ + "evalCatalog", + "x86_64-darwin", + "stable", + "go_1_20" + ], + "originalUrl": "flake:nixpkgs-flox", + "storePaths": [ + "/nix/store/0pp0svlrkdls28dixb6a7kqa567gs59v-go-1.20.1" + ], + "url": "flake:nixpkgs-flox/b7e7e40e2aa1ca2a44db440f5dc52213564af02f" + }, + "source": { + "locked": { + "lastModified": 1676973346, + "revCount": 456418 + } + }, + "type": "catalogRender", + "version": 1 + } + } + } + } + } +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/certificate_packs.go b/vendor/github.com/cloudflare/cloudflare-go/certificate_packs.go new file mode 100644 index 0000000..2fee1d6 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/certificate_packs.go @@ -0,0 +1,163 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// CertificatePackGeoRestrictions is for the structure of the geographic +// restrictions for a TLS certificate. +type CertificatePackGeoRestrictions struct { + Label string `json:"label"` +} + +// CertificatePackCertificate is the base structure of a TLS certificate that is +// contained within a certificate pack. +type CertificatePackCertificate struct { + ID string `json:"id"` + Hosts []string `json:"hosts"` + Issuer string `json:"issuer"` + Signature string `json:"signature"` + Status string `json:"status"` + BundleMethod string `json:"bundle_method"` + GeoRestrictions CertificatePackGeoRestrictions `json:"geo_restrictions"` + ZoneID string `json:"zone_id"` + UploadedOn time.Time `json:"uploaded_on"` + ModifiedOn time.Time `json:"modified_on"` + ExpiresOn time.Time `json:"expires_on"` + Priority int `json:"priority"` +} + +// CertificatePack is the overarching structure of a certificate pack response. +type CertificatePack struct { + ID string `json:"id"` + Type string `json:"type"` + Hosts []string `json:"hosts"` + Certificates []CertificatePackCertificate `json:"certificates"` + PrimaryCertificate string `json:"primary_certificate"` + Status string `json:"status"` + ValidationRecords []SSLValidationRecord `json:"validation_records,omitempty"` + ValidationErrors []SSLValidationError `json:"validation_errors,omitempty"` + ValidationMethod string `json:"validation_method"` + ValidityDays int `json:"validity_days"` + CertificateAuthority string `json:"certificate_authority"` + CloudflareBranding bool `json:"cloudflare_branding"` +} + +// CertificatePackRequest is used for requesting a new certificate. +type CertificatePackRequest struct { + Type string `json:"type"` + Hosts []string `json:"hosts"` + ValidationMethod string `json:"validation_method"` + ValidityDays int `json:"validity_days"` + CertificateAuthority string `json:"certificate_authority"` + CloudflareBranding bool `json:"cloudflare_branding"` +} + +// CertificatePacksResponse is for responses where multiple certificates are +// expected. +type CertificatePacksResponse struct { + Response + Result []CertificatePack `json:"result"` +} + +// CertificatePacksDetailResponse contains a single certificate pack in the +// response. +type CertificatePacksDetailResponse struct { + Response + Result CertificatePack `json:"result"` +} + +// ListCertificatePacks returns all available TLS certificate packs for a zone. +// +// API Reference: https://api.cloudflare.com/#certificate-packs-list-certificate-packs +func (api *API) ListCertificatePacks(ctx context.Context, zoneID string) ([]CertificatePack, error) { + uri := fmt.Sprintf("/zones/%s/ssl/certificate_packs?status=all", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []CertificatePack{}, err + } + + var certificatePacksResponse CertificatePacksResponse + err = json.Unmarshal(res, &certificatePacksResponse) + if err != nil { + return []CertificatePack{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return certificatePacksResponse.Result, nil +} + +// CertificatePack returns a single TLS certificate pack on a zone. +// +// API Reference: https://api.cloudflare.com/#certificate-packs-get-certificate-pack +func (api *API) CertificatePack(ctx context.Context, zoneID, certificatePackID string) (CertificatePack, error) { + uri := fmt.Sprintf("/zones/%s/ssl/certificate_packs/%s", zoneID, certificatePackID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return CertificatePack{}, err + } + + var certificatePacksDetailResponse CertificatePacksDetailResponse + err = json.Unmarshal(res, &certificatePacksDetailResponse) + if err != nil { + return CertificatePack{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return certificatePacksDetailResponse.Result, nil +} + +// CreateCertificatePack creates a new certificate pack associated with a zone. +// +// API Reference: https://api.cloudflare.com/#certificate-packs-order-advanced-certificate-manager-certificate-pack +func (api *API) CreateCertificatePack(ctx context.Context, zoneID string, cert CertificatePackRequest) (CertificatePack, error) { + uri := fmt.Sprintf("/zones/%s/ssl/certificate_packs/order", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, cert) + if err != nil { + return CertificatePack{}, err + } + + var certificatePacksDetailResponse CertificatePacksDetailResponse + err = json.Unmarshal(res, &certificatePacksDetailResponse) + if err != nil { + return CertificatePack{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return certificatePacksDetailResponse.Result, nil +} + +// DeleteCertificatePack removes a certificate pack associated with a zone. +// +// API Reference: https://api.cloudflare.com/#certificate-packs-delete-advanced-certificate-manager-certificate-pack +func (api *API) DeleteCertificatePack(ctx context.Context, zoneID, certificateID string) error { + uri := fmt.Sprintf("/zones/%s/ssl/certificate_packs/%s", zoneID, certificateID) + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + return nil +} + +// RestartCertificateValidation kicks off the validation process for a +// pending certificate pack. +// +// API Reference: https://api.cloudflare.com/#certificate-packs-restart-validation-for-advanced-certificate-manager-certificate-pack +func (api *API) RestartCertificateValidation(ctx context.Context, zoneID, certificateID string) (CertificatePack, error) { + uri := fmt.Sprintf("/zones/%s/ssl/certificate_packs/%s", zoneID, certificateID) + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, nil) + if err != nil { + return CertificatePack{}, err + } + + var certificatePackResponse CertificatePacksDetailResponse + err = json.Unmarshal(res, &certificatePackResponse) + if err != nil { + return CertificatePack{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return certificatePackResponse.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/cloud_connector.go b/vendor/github.com/cloudflare/cloudflare-go/cloud_connector.go new file mode 100644 index 0000000..8c6e3b9 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/cloud_connector.go @@ -0,0 +1,71 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +type CloudConnectorRulesResponse struct { + Response + Result []CloudConnectorRule `json:"result"` +} + +type CloudConnectorRuleParameters struct { + Host string `json:"host"` +} + +type CloudConnectorRule struct { + ID string `json:"id"` + Enabled *bool `json:"enabled,omitempty"` + Expression string `json:"expression"` + Provider string `json:"provider"` + Parameters CloudConnectorRuleParameters `json:"parameters"` + Description string `json:"description"` +} + +func (api *API) ListZoneCloudConnectorRules(ctx context.Context, rc *ResourceContainer) ([]CloudConnectorRule, error) { + if rc.Identifier == "" { + return nil, ErrMissingZoneID + } + + uri := buildURI(fmt.Sprintf("/zones/%s/cloud_connector/rules", rc.Identifier), nil) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + + result := CloudConnectorRulesResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +func (api *API) UpdateZoneCloudConnectorRules(ctx context.Context, rc *ResourceContainer, params []CloudConnectorRule) ([]CloudConnectorRule, error) { + if rc.Identifier == "" { + return nil, ErrMissingZoneID + } + + uri := fmt.Sprintf("/zones/%s/cloud_connector/rules", rc.Identifier) + + payload, err := json.Marshal(params) + if err != nil { + return nil, err + } + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, payload) + if err != nil { + return nil, err + } + + result := CloudConnectorRulesResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/cloudflare.go b/vendor/github.com/cloudflare/cloudflare-go/cloudflare.go new file mode 100644 index 0000000..b228981 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/cloudflare.go @@ -0,0 +1,593 @@ +// Package cloudflare implements the Cloudflare v4 API. +package cloudflare + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "log" + "math" + "net/http" + "net/http/httputil" + "net/url" + "regexp" + "strconv" + "strings" + "time" + + "github.com/goccy/go-json" + + "golang.org/x/time/rate" +) + +var ( + Version string = "v4" + + // Deprecated: Use `client.New` configuration instead. + apiURL = fmt.Sprintf("%s://%s%s", defaultScheme, defaultHostname, defaultBasePath) +) + +const ( + // AuthKeyEmail specifies that we should authenticate with API key and email address. + AuthKeyEmail = 1 << iota + // AuthUserService specifies that we should authenticate with a User-Service key. + AuthUserService + // AuthToken specifies that we should authenticate with an API Token. + AuthToken +) + +// API holds the configuration for the current API client. A client should not +// be modified concurrently. +type API struct { + APIKey string + APIEmail string + APIUserServiceKey string + APIToken string + BaseURL string + UserAgent string + headers http.Header + httpClient *http.Client + authType int + rateLimiter *rate.Limiter + retryPolicy RetryPolicy + logger Logger + Debug bool +} + +// newClient provides shared logic for New and NewWithUserServiceKey. +func newClient(opts ...Option) (*API, error) { + silentLogger := log.New(io.Discard, "", log.LstdFlags) + + api := &API{ + BaseURL: fmt.Sprintf("%s://%s%s", defaultScheme, defaultHostname, defaultBasePath), + UserAgent: userAgent + "/" + Version, + headers: make(http.Header), + rateLimiter: rate.NewLimiter(rate.Limit(4), 1), // 4rps equates to default api limit (1200 req/5 min) + retryPolicy: RetryPolicy{ + MaxRetries: 3, + MinRetryDelay: 1 * time.Second, + MaxRetryDelay: 30 * time.Second, + }, + logger: silentLogger, + } + + err := api.parseOptions(opts...) + if err != nil { + return nil, fmt.Errorf("options parsing failed: %w", err) + } + + // Fall back to http.DefaultClient if the package user does not provide + // their own. + if api.httpClient == nil { + api.httpClient = http.DefaultClient + } + + return api, nil +} + +// New creates a new Cloudflare v4 API client. +func New(key, email string, opts ...Option) (*API, error) { + if key == "" || email == "" { + return nil, errors.New(errEmptyCredentials) + } + + api, err := newClient(opts...) + if err != nil { + return nil, err + } + + api.APIKey = key + api.APIEmail = email + api.authType = AuthKeyEmail + + return api, nil +} + +// NewWithAPIToken creates a new Cloudflare v4 API client using API Tokens. +func NewWithAPIToken(token string, opts ...Option) (*API, error) { + if token == "" { + return nil, errors.New(errEmptyAPIToken) + } + + api, err := newClient(opts...) + if err != nil { + return nil, err + } + + api.APIToken = token + api.authType = AuthToken + + return api, nil +} + +// NewWithUserServiceKey creates a new Cloudflare v4 API client using service key authentication. +func NewWithUserServiceKey(key string, opts ...Option) (*API, error) { + if key == "" { + return nil, errors.New(errEmptyCredentials) + } + + api, err := newClient(opts...) + if err != nil { + return nil, err + } + + api.APIUserServiceKey = key + api.authType = AuthUserService + + return api, nil +} + +// SetAuthType sets the authentication method (AuthKeyEmail, AuthToken, or AuthUserService). +func (api *API) SetAuthType(authType int) { + api.authType = authType +} + +// ZoneIDByName retrieves a zone's ID from the name. +func (api *API) ZoneIDByName(zoneName string) (string, error) { + zoneName = normalizeZoneName(zoneName) + res, err := api.ListZonesContext(context.Background(), WithZoneFilters(zoneName, "", "")) + if err != nil { + return "", fmt.Errorf("ListZonesContext command failed: %w", err) + } + + switch len(res.Result) { + case 0: + return "", errors.New("zone could not be found") + case 1: + return res.Result[0].ID, nil + default: + return "", errors.New("ambiguous zone name; an account ID might help") + } +} + +// makeRequest makes a HTTP request and returns the body as a byte slice, +// closing it before returning. params will be serialized to JSON. +func (api *API) makeRequest(method, uri string, params interface{}) ([]byte, error) { + return api.makeRequestWithAuthType(context.Background(), method, uri, params, api.authType) +} + +func (api *API) makeRequestContext(ctx context.Context, method, uri string, params interface{}) ([]byte, error) { + return api.makeRequestWithAuthType(ctx, method, uri, params, api.authType) +} + +func (api *API) makeRequestContextWithHeaders(ctx context.Context, method, uri string, params interface{}, headers http.Header) ([]byte, error) { + return api.makeRequestWithAuthTypeAndHeaders(ctx, method, uri, params, api.authType, headers) +} + +func (api *API) makeRequestWithAuthType(ctx context.Context, method, uri string, params interface{}, authType int) ([]byte, error) { + return api.makeRequestWithAuthTypeAndHeaders(ctx, method, uri, params, authType, nil) +} + +// APIResponse holds the structure for a response from the API. It looks alot +// like `http.Response` however, uses a `[]byte` for the `Body` instead of a +// `io.ReadCloser`. +// +// This may go away in the experimental client in favour of `http.Response`. +type APIResponse struct { + Body []byte + Status string + StatusCode int + Headers http.Header +} + +func (api *API) makeRequestWithAuthTypeAndHeaders(ctx context.Context, method, uri string, params interface{}, authType int, headers http.Header) ([]byte, error) { + res, err := api.makeRequestWithAuthTypeAndHeadersComplete(ctx, method, uri, params, authType, headers) + if err != nil { + return nil, err + } + return res.Body, err +} + +// Use this method if an API response can have different Content-Type headers and different body formats. +func (api *API) makeRequestContextWithHeadersComplete(ctx context.Context, method, uri string, params interface{}, headers http.Header) (*APIResponse, error) { + return api.makeRequestWithAuthTypeAndHeadersComplete(ctx, method, uri, params, api.authType, headers) +} + +func (api *API) makeRequestWithAuthTypeAndHeadersComplete(ctx context.Context, method, uri string, params interface{}, authType int, headers http.Header) (*APIResponse, error) { + var err error + var resp *http.Response + var respErr error + var respBody []byte + + for i := 0; i <= api.retryPolicy.MaxRetries; i++ { + var reqBody io.Reader + if params != nil { + if r, ok := params.(io.Reader); ok { + reqBody = r + } else if paramBytes, ok := params.([]byte); ok { + reqBody = bytes.NewReader(paramBytes) + } else { + var jsonBody []byte + jsonBody, err = json.Marshal(params) + if err != nil { + return nil, fmt.Errorf("error marshalling params to JSON: %w", err) + } + reqBody = bytes.NewReader(jsonBody) + } + } + + if i > 0 { + // expect the backoff introduced here on errored requests to dominate the effect of rate limiting + // don't need a random component here as the rate limiter should do something similar + // nb time duration could truncate an arbitrary float. Since our inputs are all ints, we should be ok + sleepDuration := time.Duration(math.Pow(2, float64(i-1)) * float64(api.retryPolicy.MinRetryDelay)) + + if sleepDuration > api.retryPolicy.MaxRetryDelay { + sleepDuration = api.retryPolicy.MaxRetryDelay + } + // useful to do some simple logging here, maybe introduce levels later + api.logger.Printf("Sleeping %s before retry attempt number %d for request %s %s", sleepDuration.String(), i, method, uri) + + select { + case <-time.After(sleepDuration): + case <-ctx.Done(): + return nil, fmt.Errorf("operation aborted during backoff: %w", ctx.Err()) + } + } + + err = api.rateLimiter.Wait(ctx) + if err != nil { + return nil, fmt.Errorf("error caused by request rate limiting: %w", err) + } + + resp, respErr = api.request(ctx, method, uri, reqBody, authType, headers) + + // short circuit processing on context timeouts + if respErr != nil && errors.Is(respErr, context.DeadlineExceeded) { + return nil, respErr + } + + // retry if the server is rate limiting us or if it failed + // assumes server operations are rolled back on failure + if respErr != nil || resp.StatusCode == http.StatusTooManyRequests || resp.StatusCode >= 500 { + if resp != nil && resp.StatusCode == http.StatusTooManyRequests { + respErr = errors.New("exceeded available rate limit retries") + } + + if respErr == nil { + respErr = fmt.Errorf("received %s response (HTTP %d), please try again later", strings.ToLower(http.StatusText(resp.StatusCode)), resp.StatusCode) + } + continue + } else { + respBody, err = io.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return nil, fmt.Errorf("could not read response body: %w", err) + } + + break + } + } + + // still had an error after all retries + if respErr != nil { + return nil, respErr + } + + if resp.StatusCode >= http.StatusBadRequest { + if strings.HasSuffix(resp.Request.URL.Path, "/filters/validate-expr") { + return nil, fmt.Errorf("%s", respBody) + } + + if resp.StatusCode >= http.StatusInternalServerError { + return nil, &ServiceError{cloudflareError: &Error{ + StatusCode: resp.StatusCode, + RayID: resp.Header.Get("cf-ray"), + Errors: []ResponseInfo{{ + Message: errInternalServiceError, + }}, + }} + } + + errBody := &Response{} + err = json.Unmarshal(respBody, &errBody) + if err != nil { + return nil, fmt.Errorf(errUnmarshalErrorBody+": %w", err) + } + + errCodes := make([]int, 0, len(errBody.Errors)) + errMsgs := make([]string, 0, len(errBody.Errors)) + for _, e := range errBody.Errors { + errCodes = append(errCodes, e.Code) + errMsgs = append(errMsgs, e.Message) + } + + err := &Error{ + StatusCode: resp.StatusCode, + RayID: resp.Header.Get("cf-ray"), + Errors: errBody.Errors, + ErrorCodes: errCodes, + ErrorMessages: errMsgs, + Messages: errBody.Messages, + } + + switch resp.StatusCode { + case http.StatusUnauthorized: + err.Type = ErrorTypeAuthorization + return nil, &AuthorizationError{cloudflareError: err} + case http.StatusForbidden: + err.Type = ErrorTypeAuthentication + return nil, &AuthenticationError{cloudflareError: err} + case http.StatusNotFound: + err.Type = ErrorTypeNotFound + return nil, &NotFoundError{cloudflareError: err} + case http.StatusTooManyRequests: + err.Type = ErrorTypeRateLimit + return nil, &RatelimitError{cloudflareError: err} + default: + err.Type = ErrorTypeRequest + return nil, &RequestError{cloudflareError: err} + } + } + + return &APIResponse{ + Body: respBody, + StatusCode: resp.StatusCode, + Status: resp.Status, + Headers: resp.Header, + }, nil +} + +// request makes a HTTP request to the given API endpoint, returning the raw +// *http.Response, or an error if one occurred. The caller is responsible for +// closing the response body. +func (api *API) request(ctx context.Context, method, uri string, reqBody io.Reader, authType int, headers http.Header) (*http.Response, error) { + req, err := http.NewRequestWithContext(ctx, method, api.BaseURL+uri, reqBody) + if err != nil { + return nil, fmt.Errorf("HTTP request creation failed: %w", err) + } + + combinedHeaders := make(http.Header) + copyHeader(combinedHeaders, api.headers) + copyHeader(combinedHeaders, headers) + req.Header = combinedHeaders + + if authType&AuthKeyEmail != 0 { + req.Header.Set("X-Auth-Key", api.APIKey) + req.Header.Set("X-Auth-Email", api.APIEmail) + } + if authType&AuthUserService != 0 { + req.Header.Set("X-Auth-User-Service-Key", api.APIUserServiceKey) + } + if authType&AuthToken != 0 { + req.Header.Set("Authorization", "Bearer "+api.APIToken) + } + + if api.UserAgent != "" { + req.Header.Set("User-Agent", api.UserAgent) + } + + if req.Header.Get("Content-Type") == "" { + req.Header.Set("Content-Type", "application/json") + } + + if api.Debug { + dump, err := httputil.DumpRequestOut(req, true) + if err != nil { + return nil, err + } + + // Strip out any sensitive information from the request payload. + sensitiveKeys := []string{api.APIKey, api.APIEmail, api.APIToken, api.APIUserServiceKey} + for _, key := range sensitiveKeys { + if key != "" { + valueRegex := regexp.MustCompile(fmt.Sprintf("(?m)%s", key)) + dump = valueRegex.ReplaceAll(dump, []byte("[redacted]")) + } + } + log.Printf("\n%s", string(dump)) + } + + resp, err := api.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("HTTP request failed: %w", err) + } + + if api.Debug { + dump, err := httputil.DumpResponse(resp, true) + if err != nil { + return resp, err + } + log.Printf("\n%s", string(dump)) + } + + return resp, nil +} + +// copyHeader copies all headers for `source` and sets them on `target`. +// based on https://godoc.org/github.com/golang/gddo/httputil/header#Copy +func copyHeader(target, source http.Header) { + for k, vs := range source { + target[k] = vs + } +} + +// ResponseInfo contains a code and message returned by the API as errors or +// informational messages inside the response. +type ResponseInfo struct { + Code int `json:"code"` + Message string `json:"message"` +} + +// Response is a template. There will also be a result struct. There will be a +// unique response type for each response, which will include this type. +type Response struct { + Success bool `json:"success"` + Errors []ResponseInfo `json:"errors"` + Messages []ResponseInfo `json:"messages"` +} + +// ResultInfoCursors contains information about cursors. +type ResultInfoCursors struct { + Before string `json:"before" url:"before,omitempty"` + After string `json:"after" url:"after,omitempty"` +} + +// ResultInfo contains metadata about the Response. +type ResultInfo struct { + Page int `json:"page" url:"page,omitempty"` + PerPage int `json:"per_page" url:"per_page,omitempty"` + TotalPages int `json:"total_pages" url:"-"` + Count int `json:"count" url:"-"` + Total int `json:"total_count" url:"-"` + Cursor string `json:"cursor" url:"cursor,omitempty"` + Cursors ResultInfoCursors `json:"cursors" url:"cursors,omitempty"` +} + +// RawResponse keeps the result as JSON form. +type RawResponse struct { + Response + Result json.RawMessage `json:"result"` + ResultInfo *ResultInfo `json:"result_info,omitempty"` +} + +// Raw makes a HTTP request with user provided params and returns the +// result as a RawResponse, which contains the untouched JSON result. +func (api *API) Raw(ctx context.Context, method, endpoint string, data interface{}, headers http.Header) (RawResponse, error) { + var r RawResponse + res, err := api.makeRequestContextWithHeaders(ctx, method, endpoint, data, headers) + if err != nil { + return r, err + } + + if err := json.Unmarshal(res, &r); err != nil { + return r, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r, nil +} + +// PaginationOptions can be passed to a list request to configure paging +// These values will be defaulted if omitted, and PerPage has min/max limits set by resource. +type PaginationOptions struct { + Page int `json:"page,omitempty" url:"page,omitempty"` + PerPage int `json:"per_page,omitempty" url:"per_page,omitempty"` +} + +// RetryPolicy specifies number of retries and min/max retry delays +// This config is used when the client exponentially backs off after errored requests. +type RetryPolicy struct { + MaxRetries int + MinRetryDelay time.Duration + MaxRetryDelay time.Duration +} + +// Logger defines the interface this library needs to use logging +// This is a subset of the methods implemented in the log package. +type Logger interface { + Printf(format string, v ...interface{}) +} + +// ReqOption is a functional option for configuring API requests. +type ReqOption func(opt *reqOption) + +type reqOption struct { + params url.Values +} + +// WithZoneFilters applies a filter based on zone properties. +func WithZoneFilters(zoneName, accountID, status string) ReqOption { + return func(opt *reqOption) { + if zoneName != "" { + opt.params.Set("name", normalizeZoneName(zoneName)) + } + + if accountID != "" { + opt.params.Set("account.id", accountID) + } + + if status != "" { + opt.params.Set("status", status) + } + } +} + +// WithPagination configures the pagination for a response. +func WithPagination(opts PaginationOptions) ReqOption { + return func(opt *reqOption) { + if opts.Page > 0 { + opt.params.Set("page", strconv.Itoa(opts.Page)) + } + + if opts.PerPage > 0 { + opt.params.Set("per_page", strconv.Itoa(opts.PerPage)) + } + } +} + +// checkResultInfo checks whether ResultInfo is reasonable except that it currently +// ignores the cursor information. perPage, page, and count are the requested #items +// per page, the requested page number, and the actual length of the Result array. +// +// Responses from the actual Cloudflare servers should pass all these checks (or we +// discover a serious bug in the Cloudflare servers). However, the unit tests can +// easily violate these constraints and this utility function can help debugging. +// Correct pagination information is crucial for more advanced List* functions that +// handle pagination automatically and fetch different pages in parallel. +// +// TODO: check cursors as well. +func checkResultInfo(perPage, page, count int, info *ResultInfo) bool { + if info.Cursor != "" || info.Cursors.Before != "" || info.Cursors.After != "" { + panic("checkResultInfo could not handle cursors yet.") + } + + switch { + case info.PerPage != perPage || info.Page != page || info.Count != count: + return false + + case info.PerPage <= 0: + return false + + case info.Total == 0 && info.TotalPages == 0 && info.Page == 1 && info.Count == 0: + return true + + case info.Total <= 0 || info.TotalPages <= 0: + return false + + case info.Total > info.PerPage*info.TotalPages || info.Total <= info.PerPage*(info.TotalPages-1): + return false + } + + switch { + case info.Page > info.TotalPages || info.Page <= 0: + return false + + case info.Page < info.TotalPages: + return info.Count == info.PerPage + + case info.Page == info.TotalPages: + return info.Count == info.Total-info.PerPage*(info.TotalPages-1) + + default: + // This is actually impossible, but Go compiler does not know trichotomy + panic("checkResultInfo: impossible") + } +} + +type OrderDirection string + +const ( + OrderDirectionAsc OrderDirection = "asc" + OrderDirectionDesc OrderDirection = "desc" +) diff --git a/vendor/github.com/cloudflare/cloudflare-go/consts.go b/vendor/github.com/cloudflare/cloudflare-go/consts.go new file mode 100644 index 0000000..668ddc3 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/consts.go @@ -0,0 +1,27 @@ +package cloudflare + +// RouteRoot represents the name of the route namespace. +type RouteRoot string + +const ( + defaultScheme = "https" + defaultHostname = "api.cloudflare.com" + defaultBasePath = "/client/v4" + userAgent = "cloudflare-go" + + // AccountRouteRoot is the accounts route namespace. + AccountRouteRoot RouteRoot = "accounts" + + // ZoneRouteRoot is the zones route namespace. + ZoneRouteRoot RouteRoot = "zones" + + originCARootCertEccURL = "https://developers.cloudflare.com/ssl/static/origin_ca_ecc_root.pem" + originCARootCertRsaURL = "https://developers.cloudflare.com/ssl/static/origin_ca_rsa_root.pem" + + // Used for testing. + testAccountID = "01a7362d577a6c3019a474fd6f485823" + testZoneID = "d56084adb405e0b7e32c52321bf07be6" + testUserID = "a81be4e9b20632860d20a64c054c4150" + testCertPackUUID = "a77f8bd7-3b47-46b4-a6f1-75cf98109948" + testTunnelID = "f174e90a-fafe-4643-bbbc-4a0ed4fc8415" +) diff --git a/vendor/github.com/cloudflare/cloudflare-go/convert_types.go b/vendor/github.com/cloudflare/cloudflare-go/convert_types.go new file mode 100644 index 0000000..f3ebc83 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/convert_types.go @@ -0,0 +1,932 @@ +// File contains helper methods for accepting variants (pointers, values, +// slices, etc) of a particular type and returning them in another. A common use +// is pointer to values and back. +// +// _Most_ follow the convention of (where is a Golang type such as Bool): +// +// Ptr: Accepts a value and returns a pointer. +// : Accepts a pointer and returns a value. +// PtrSlice: Accepts a slice of values and returns a slice of pointers. +// Slice: Accepts a slice of pointers and returns a slice of values. +// PtrMap: Accepts a string map of values into a string map of pointers. +// Map: Accepts a string map of pointers into a string map of values. +// +// Not all Golang types are covered here, only those that are commonly used. +package cloudflare + +import ( + "reflect" + "time" +) + +// AnyPtr is a helper routine that allocates a new interface value +// to store v and returns a pointer to it. +// +// Usage: var _ *Type = AnyPtr(Type(value) | value).(*Type) +// +// var _ *bool = AnyPtr(true).(*bool) +// var _ *byte = AnyPtr(byte(1)).(*byte) +// var _ *complex64 = AnyPtr(complex64(1.1)).(*complex64) +// var _ *complex128 = AnyPtr(complex128(1.1)).(*complex128) +// var _ *float32 = AnyPtr(float32(1.1)).(*float32) +// var _ *float64 = AnyPtr(float64(1.1)).(*float64) +// var _ *int = AnyPtr(int(1)).(*int) +// var _ *int8 = AnyPtr(int8(8)).(*int8) +// var _ *int16 = AnyPtr(int16(16)).(*int16) +// var _ *int32 = AnyPtr(int32(32)).(*int32) +// var _ *int64 = AnyPtr(int64(64)).(*int64) +// var _ *rune = AnyPtr(rune(1)).(*rune) +// var _ *string = AnyPtr("ptr").(*string) +// var _ *uint = AnyPtr(uint(1)).(*uint) +// var _ *uint8 = AnyPtr(uint8(8)).(*uint8) +// var _ *uint16 = AnyPtr(uint16(16)).(*uint16) +// var _ *uint32 = AnyPtr(uint32(32)).(*uint32) +// var _ *uint64 = AnyPtr(uint64(64)).(*uint64) +func AnyPtr(v interface{}) interface{} { + r := reflect.New(reflect.TypeOf(v)) + reflect.ValueOf(r.Interface()).Elem().Set(reflect.ValueOf(v)) + return r.Interface() +} + +// BytePtr is a helper routine that allocates a new byte value to store v and +// returns a pointer to it. +func BytePtr(v byte) *byte { return &v } + +// Complex64Ptr is a helper routine that allocates a new complex64 value to +// store v and returns a pointer to it. +func Complex64Ptr(v complex64) *complex64 { return &v } + +// Complex128Ptr is a helper routine that allocates a new complex128 value +// to store v and returns a pointer to it. +func Complex128Ptr(v complex128) *complex128 { return &v } + +// RunePtr is a helper routine that allocates a new rune value to store v +// and returns a pointer to it. +func RunePtr(v rune) *rune { return &v } + +// TimePtr is a helper routine that allocates a new time.Time value +// to store v and returns a pointer to it. +func TimePtr(v time.Time) *time.Time { return &v } + +// DurationPtr is a helper routine that allocates a new time.Duration value +// to store v and returns a pointer to it. +func DurationPtr(v time.Duration) *time.Duration { return &v } + +// BoolPtr is a helper routine that allocates a new bool value to store v and +// returns a pointer to it. +func BoolPtr(v bool) *bool { return &v } + +// Bool is a helper routine that accepts a bool pointer and returns a value +// to it. +func Bool(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolPtrSlice converts a slice of bool values into a slice of bool pointers. +func BoolPtrSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolSlice converts a slice of bool pointers into a slice of bool values. +func BoolSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolPtrMap converts a string map of bool values into a string map of bool +// pointers. +func BoolPtrMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolMap converts a string map of bool pointers into a string map of bool +// values. +func BoolMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Byte is a helper routine that accepts a byte pointer and returns a +// value to it. +func Byte(v *byte) byte { + if v != nil { + return *v + } + return byte(0) +} + +// Complex64 is a helper routine that accepts a complex64 pointer and +// returns a value to it. +func Complex64(v *complex64) complex64 { + if v != nil { + return *v + } + return 0 +} + +// Complex128 is a helper routine that accepts a complex128 pointer and +// returns a value to it. +func Complex128(v *complex128) complex128 { + if v != nil { + return *v + } + return 0 +} + +// Float32Ptr is a helper routine that allocates a new float32 value to store v +// and returns a pointer to it. +func Float32Ptr(v float32) *float32 { return &v } + +// Float32 is a helper routine that accepts a float32 pointer and returns a +// value to it. +func Float32(v *float32) float32 { + if v != nil { + return *v + } + return 0 +} + +// Float32PtrSlice converts a slice of float32 values into a slice of float32 +// pointers. +func Float32PtrSlice(src []float32) []*float32 { + dst := make([]*float32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float32Slice converts a slice of float32 pointers into a slice of +// float32 values. +func Float32Slice(src []*float32) []float32 { + dst := make([]float32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float32PtrMap converts a string map of float32 values into a string map of +// float32 pointers. +func Float32PtrMap(src map[string]float32) map[string]*float32 { + dst := make(map[string]*float32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float32Map converts a string map of float32 pointers into a string +// map of float32 values. +func Float32Map(src map[string]*float32) map[string]float32 { + dst := make(map[string]float32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64Ptr is a helper routine that allocates a new float64 value to store v +// and returns a pointer to it. +func Float64Ptr(v float64) *float64 { return &v } + +// Float64 is a helper routine that accepts a float64 pointer and returns a +// value to it. +func Float64(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64PtrSlice converts a slice of float64 values into a slice of float64 +// pointers. +func Float64PtrSlice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64Slice converts a slice of float64 pointers into a slice of +// float64 values. +func Float64Slice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64PtrMap converts a string map of float64 values into a string map of +// float64 pointers. +func Float64PtrMap(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64Map converts a string map of float64 pointers into a string +// map of float64 values. +func Float64Map(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// IntPtr is a helper routine that allocates a new int value to store v and +// returns a pointer to it. +func IntPtr(v int) *int { return &v } + +// Int is a helper routine that accepts a int pointer and returns a value +// to it. +func Int(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntPtrSlice converts a slice of int values into a slice of int pointers. +func IntPtrSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntSlice converts a slice of int pointers into a slice of int values. +func IntSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntPtrMap converts a string map of int values into a string map of int +// pointers. +func IntPtrMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntMap converts a string map of int pointers into a string map of int +// values. +func IntMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int8Ptr is a helper routine that allocates a new int8 value to store v and +// returns a pointer to it. +func Int8Ptr(v int8) *int8 { return &v } + +// Int8 is a helper routine that accepts a int8 pointer and returns a value +// to it. +func Int8(v *int8) int8 { + if v != nil { + return *v + } + return 0 +} + +// Int8PtrSlice converts a slice of int8 values into a slice of int8 pointers. +func Int8PtrSlice(src []int8) []*int8 { + dst := make([]*int8, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int8Slice converts a slice of int8 pointers into a slice of int8 values. +func Int8Slice(src []*int8) []int8 { + dst := make([]int8, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int8PtrMap converts a string map of int8 values into a string map of int8 +// pointers. +func Int8PtrMap(src map[string]int8) map[string]*int8 { + dst := make(map[string]*int8) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int8Map converts a string map of int8 pointers into a string map of int8 +// values. +func Int8Map(src map[string]*int8) map[string]int8 { + dst := make(map[string]int8) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int16Ptr is a helper routine that allocates a new int16 value to store v +// and returns a pointer to it. +func Int16Ptr(v int16) *int16 { return &v } + +// Int16 is a helper routine that accepts a int16 pointer and returns a +// value to it. +func Int16(v *int16) int16 { + if v != nil { + return *v + } + return 0 +} + +// Int16PtrSlice converts a slice of int16 values into a slice of int16 +// pointers. +func Int16PtrSlice(src []int16) []*int16 { + dst := make([]*int16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int16Slice converts a slice of int16 pointers into a slice of int16 +// values. +func Int16Slice(src []*int16) []int16 { + dst := make([]int16, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int16PtrMap converts a string map of int16 values into a string map of int16 +// pointers. +func Int16PtrMap(src map[string]int16) map[string]*int16 { + dst := make(map[string]*int16) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int16Map converts a string map of int16 pointers into a string map of +// int16 values. +func Int16Map(src map[string]*int16) map[string]int16 { + dst := make(map[string]int16) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int32Ptr is a helper routine that allocates a new int32 value to store v +// and returns a pointer to it. +func Int32Ptr(v int32) *int32 { return &v } + +// Int32 is a helper routine that accepts a int32 pointer and returns a +// value to it. +func Int32(v *int32) int32 { + if v != nil { + return *v + } + return 0 +} + +// Int32PtrSlice converts a slice of int32 values into a slice of int32 +// pointers. +func Int32PtrSlice(src []int32) []*int32 { + dst := make([]*int32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int32Slice converts a slice of int32 pointers into a slice of int32 +// values. +func Int32Slice(src []*int32) []int32 { + dst := make([]int32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int32PtrMap converts a string map of int32 values into a string map of int32 +// pointers. +func Int32PtrMap(src map[string]int32) map[string]*int32 { + dst := make(map[string]*int32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int32Map converts a string map of int32 pointers into a string map of +// int32 values. +func Int32Map(src map[string]*int32) map[string]int32 { + dst := make(map[string]int32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64Ptr is a helper routine that allocates a new int64 value to store v +// and returns a pointer to it. +func Int64Ptr(v int64) *int64 { return &v } + +// Int64 is a helper routine that accepts a int64 pointer and returns a +// value to it. +func Int64(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64PtrSlice converts a slice of int64 values into a slice of int64 +// pointers. +func Int64PtrSlice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64Slice converts a slice of int64 pointers into a slice of int64 +// values. +func Int64Slice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64PtrMap converts a string map of int64 values into a string map of int64 +// pointers. +func Int64PtrMap(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64Map converts a string map of int64 pointers into a string map of +// int64 values. +func Int64Map(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Rune is a helper routine that accepts a rune pointer and returns a value +// to it. +func Rune(v *rune) rune { + if v != nil { + return *v + } + return rune(0) +} + +// StringPtr is a helper routine that allocates a new string value to store v +// and returns a pointer to it. +func StringPtr(v string) *string { return &v } + +// String is a helper routine that accepts a string pointer and returns a +// value to it. +func String(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringPtrSlice converts a slice of string values into a slice of string +// pointers. +func StringPtrSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringSlice converts a slice of string pointers into a slice of string +// values. +func StringSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringPtrMap converts a string map of string values into a string map of +// string pointers. +func StringPtrMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringMap converts a string map of string pointers into a string map of +// string values. +func StringMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// UintPtr is a helper routine that allocates a new uint value to store v +// and returns a pointer to it. +func UintPtr(v uint) *uint { return &v } + +// Uint is a helper routine that accepts a uint pointer and returns a value +// to it. +func Uint(v *uint) uint { + if v != nil { + return *v + } + return 0 +} + +// UintPtrSlice converts a slice of uint values uinto a slice of uint pointers. +func UintPtrSlice(src []uint) []*uint { + dst := make([]*uint, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// UintSlice converts a slice of uint pointers uinto a slice of uint +// values. +func UintSlice(src []*uint) []uint { + dst := make([]uint, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// UintPtrMap converts a string map of uint values uinto a string map of uint +// pointers. +func UintPtrMap(src map[string]uint) map[string]*uint { + dst := make(map[string]*uint) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// UintMap converts a string map of uint pointers uinto a string map of +// uint values. +func UintMap(src map[string]*uint) map[string]uint { + dst := make(map[string]uint) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint8Ptr is a helper routine that allocates a new uint8 value to store v +// and returns a pointer to it. +func Uint8Ptr(v uint8) *uint8 { return &v } + +// Uint8 is a helper routine that accepts a uint8 pointer and returns a +// value to it. +func Uint8(v *uint8) uint8 { + if v != nil { + return *v + } + return 0 +} + +// Uint8PtrSlice converts a slice of uint8 values into a slice of uint8 +// pointers. +func Uint8PtrSlice(src []uint8) []*uint8 { + dst := make([]*uint8, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint8Slice converts a slice of uint8 pointers into a slice of uint8 +// values. +func Uint8Slice(src []*uint8) []uint8 { + dst := make([]uint8, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint8PtrMap converts a string map of uint8 values into a string map of uint8 +// pointers. +func Uint8PtrMap(src map[string]uint8) map[string]*uint8 { + dst := make(map[string]*uint8) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint8Map converts a string map of uint8 pointers into a string +// map of uint8 values. +func Uint8Map(src map[string]*uint8) map[string]uint8 { + dst := make(map[string]uint8) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint16Ptr is a helper routine that allocates a new uint16 value to store v +// and returns a pointer to it. +func Uint16Ptr(v uint16) *uint16 { return &v } + +// Uint16 is a helper routine that accepts a uint16 pointer and returns a +// value to it. +func Uint16(v *uint16) uint16 { + if v != nil { + return *v + } + return 0 +} + +// Uint16PtrSlice converts a slice of uint16 values into a slice of uint16 +// pointers. +func Uint16PtrSlice(src []uint16) []*uint16 { + dst := make([]*uint16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint16Slice converts a slice of uint16 pointers into a slice of uint16 +// values. +func Uint16Slice(src []*uint16) []uint16 { + dst := make([]uint16, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint16PtrMap converts a string map of uint16 values into a string map of +// uint16 pointers. +func Uint16PtrMap(src map[string]uint16) map[string]*uint16 { + dst := make(map[string]*uint16) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint16Map converts a string map of uint16 pointers into a string map of +// uint16 values. +func Uint16Map(src map[string]*uint16) map[string]uint16 { + dst := make(map[string]uint16) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint32Ptr is a helper routine that allocates a new uint32 value to store v +// and returns a pointer to it. +func Uint32Ptr(v uint32) *uint32 { return &v } + +// Uint32 is a helper routine that accepts a uint32 pointer and returns a +// value to it. +func Uint32(v *uint32) uint32 { + if v != nil { + return *v + } + return 0 +} + +// Uint32PtrSlice converts a slice of uint32 values into a slice of uint32 +// pointers. +func Uint32PtrSlice(src []uint32) []*uint32 { + dst := make([]*uint32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint32Slice converts a slice of uint32 pointers into a slice of uint32 +// values. +func Uint32Slice(src []*uint32) []uint32 { + dst := make([]uint32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint32PtrMap converts a string map of uint32 values into a string map of +// uint32 pointers. +func Uint32PtrMap(src map[string]uint32) map[string]*uint32 { + dst := make(map[string]*uint32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint32Map converts a string map of uint32 pointers into a string +// map of uint32 values. +func Uint32Map(src map[string]*uint32) map[string]uint32 { + dst := make(map[string]uint32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint64Ptr is a helper routine that allocates a new uint64 value to store v +// and returns a pointer to it. +func Uint64Ptr(v uint64) *uint64 { return &v } + +// Uint64 is a helper routine that accepts a uint64 pointer and returns a +// value to it. +func Uint64(v *uint64) uint64 { + if v != nil { + return *v + } + return 0 +} + +// Uint64PtrSlice converts a slice of uint64 values into a slice of uint64 +// pointers. +func Uint64PtrSlice(src []uint64) []*uint64 { + dst := make([]*uint64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint64Slice converts a slice of uint64 pointers into a slice of uint64 +// values. +func Uint64Slice(src []*uint64) []uint64 { + dst := make([]uint64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint64PtrMap converts a string map of uint64 values into a string map of +// uint64 pointers. +func Uint64PtrMap(src map[string]uint64) map[string]*uint64 { + dst := make(map[string]*uint64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint64Map converts a string map of uint64 pointers into a string map of +// uint64 values. +func Uint64Map(src map[string]*uint64) map[string]uint64 { + dst := make(map[string]uint64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time is a helper routine that accepts a time pointer value and returns a +// value to it. +func Time(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// Duration is a helper routine that accepts a time pointer ion value +// and returns a value to it. +// func Duration(v *time.Duration) time.Duration { +// if v != nil { +// return *v +// } +// return time.Duration(0) +// } diff --git a/vendor/github.com/cloudflare/cloudflare-go/custom_hostname.go b/vendor/github.com/cloudflare/cloudflare-go/custom_hostname.go new file mode 100644 index 0000000..b625b72 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/custom_hostname.go @@ -0,0 +1,330 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/goccy/go-json" +) + +// CustomHostnameStatus is the enumeration of valid state values in the CustomHostnameSSL. +type CustomHostnameStatus string + +const ( + // PENDING status represents state of CustomHostname is pending. + PENDING CustomHostnameStatus = "pending" + // ACTIVE status represents state of CustomHostname is active. + ACTIVE CustomHostnameStatus = "active" + // MOVED status represents state of CustomHostname is moved. + MOVED CustomHostnameStatus = "moved" + // DELETED status represents state of CustomHostname is deleted. + DELETED CustomHostnameStatus = "deleted" + // BLOCKED status represents state of CustomHostname is blocked from going active. + BLOCKED CustomHostnameStatus = "blocked" +) + +// CustomHostnameSSLSettings represents the SSL settings for a custom hostname. +type CustomHostnameSSLSettings struct { + HTTP2 string `json:"http2,omitempty"` + HTTP3 string `json:"http3,omitempty"` + TLS13 string `json:"tls_1_3,omitempty"` + MinTLSVersion string `json:"min_tls_version,omitempty"` + Ciphers []string `json:"ciphers,omitempty"` + EarlyHints string `json:"early_hints,omitempty"` +} + +// CustomHostnameOwnershipVerification represents ownership verification status of a given custom hostname. +type CustomHostnameOwnershipVerification struct { + Type string `json:"type,omitempty"` + Name string `json:"name,omitempty"` + Value string `json:"value,omitempty"` +} + +// SSLValidationError represents errors that occurred during SSL validation. +type SSLValidationError struct { + Message string `json:"message,omitempty"` +} + +// CustomHostnameSSLCertificates represent certificate properties like issuer, expires date and etc. +type CustomHostnameSSLCertificates struct { + Issuer string `json:"issuer"` + SerialNumber string `json:"serial_number"` + Signature string `json:"signature"` + ExpiresOn *time.Time `json:"expires_on"` + IssuedOn *time.Time `json:"issued_on"` + FingerprintSha256 string `json:"fingerprint_sha256"` + ID string `json:"id"` +} + +// CustomHostnameSSL represents the SSL section in a given custom hostname. +type CustomHostnameSSL struct { + ID string `json:"id,omitempty"` + Status string `json:"status,omitempty"` + Method string `json:"method,omitempty"` + Type string `json:"type,omitempty"` + Wildcard *bool `json:"wildcard,omitempty"` + CustomCertificate string `json:"custom_certificate,omitempty"` + CustomKey string `json:"custom_key,omitempty"` + CertificateAuthority string `json:"certificate_authority,omitempty"` + Issuer string `json:"issuer,omitempty"` + SerialNumber string `json:"serial_number,omitempty"` + Settings CustomHostnameSSLSettings `json:"settings,omitempty"` + Certificates []CustomHostnameSSLCertificates `json:"certificates,omitempty"` + // Deprecated: use ValidationRecords. + // If there a single validation record, this will equal ValidationRecords[0] for backwards compatibility. + SSLValidationRecord + ValidationRecords []SSLValidationRecord `json:"validation_records,omitempty"` + ValidationErrors []SSLValidationError `json:"validation_errors,omitempty"` + BundleMethod string `json:"bundle_method,omitempty"` +} + +// CustomMetadata defines custom metadata for the hostname. This requires logic to be implemented by Cloudflare to act on the data provided. +type CustomMetadata map[string]interface{} + +// CustomHostname represents a custom hostname in a zone. +type CustomHostname struct { + ID string `json:"id,omitempty"` + Hostname string `json:"hostname,omitempty"` + CustomOriginServer string `json:"custom_origin_server,omitempty"` + CustomOriginSNI string `json:"custom_origin_sni,omitempty"` + SSL *CustomHostnameSSL `json:"ssl,omitempty"` + CustomMetadata *CustomMetadata `json:"custom_metadata,omitempty"` + Status CustomHostnameStatus `json:"status,omitempty"` + VerificationErrors []string `json:"verification_errors,omitempty"` + OwnershipVerification CustomHostnameOwnershipVerification `json:"ownership_verification,omitempty"` + OwnershipVerificationHTTP CustomHostnameOwnershipVerificationHTTP `json:"ownership_verification_http,omitempty"` + CreatedAt *time.Time `json:"created_at,omitempty"` +} + +// CustomHostnameOwnershipVerificationHTTP represents a response from the Custom Hostnames endpoints. +type CustomHostnameOwnershipVerificationHTTP struct { + HTTPUrl string `json:"http_url,omitempty"` + HTTPBody string `json:"http_body,omitempty"` +} + +// CustomHostnameResponse represents a response from the Custom Hostnames endpoints. +type CustomHostnameResponse struct { + Result CustomHostname `json:"result"` + Response +} + +// CustomHostnameListResponse represents a response from the Custom Hostnames endpoints. +type CustomHostnameListResponse struct { + Result []CustomHostname `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +// CustomHostnameFallbackOrigin represents a Custom Hostnames Fallback Origin. +type CustomHostnameFallbackOrigin struct { + Origin string `json:"origin,omitempty"` + Status string `json:"status,omitempty"` + Errors []string `json:"errors,omitempty"` +} + +// CustomHostnameFallbackOriginResponse represents a response from the Custom Hostnames Fallback Origin endpoint. +type CustomHostnameFallbackOriginResponse struct { + Result CustomHostnameFallbackOrigin `json:"result"` + Response +} + +// UpdateCustomHostnameSSL modifies SSL configuration for the given custom +// hostname in the given zone. +// +// API reference: https://api.cloudflare.com/#custom-hostname-for-a-zone-update-custom-hostname-configuration +func (api *API) UpdateCustomHostnameSSL(ctx context.Context, zoneID string, customHostnameID string, ssl *CustomHostnameSSL) (*CustomHostnameResponse, error) { + uri := fmt.Sprintf("/zones/%s/custom_hostnames/%s", zoneID, customHostnameID) + ch := CustomHostname{ + SSL: ssl, + } + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, ch) + if err != nil { + return nil, err + } + + var response *CustomHostnameResponse + err = json.Unmarshal(res, &response) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return response, nil +} + +// UpdateCustomHostname modifies configuration for the given custom +// hostname in the given zone. +// +// API reference: https://api.cloudflare.com/#custom-hostname-for-a-zone-update-custom-hostname-configuration +func (api *API) UpdateCustomHostname(ctx context.Context, zoneID string, customHostnameID string, ch CustomHostname) (*CustomHostnameResponse, error) { + uri := fmt.Sprintf("/zones/%s/custom_hostnames/%s", zoneID, customHostnameID) + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, ch) + if err != nil { + return nil, err + } + + var response *CustomHostnameResponse + err = json.Unmarshal(res, &response) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return response, nil +} + +// DeleteCustomHostname deletes a custom hostname (and any issued SSL +// certificates). +// +// API reference: https://api.cloudflare.com/#custom-hostname-for-a-zone-delete-a-custom-hostname-and-any-issued-ssl-certificates- +func (api *API) DeleteCustomHostname(ctx context.Context, zoneID string, customHostnameID string) error { + uri := fmt.Sprintf("/zones/%s/custom_hostnames/%s", zoneID, customHostnameID) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + var response *CustomHostnameResponse + err = json.Unmarshal(res, &response) + if err != nil { + return fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return nil +} + +// CreateCustomHostname creates a new custom hostname and requests that an SSL certificate be issued for it. +// +// API reference: https://api.cloudflare.com/#custom-hostname-for-a-zone-create-custom-hostname +func (api *API) CreateCustomHostname(ctx context.Context, zoneID string, ch CustomHostname) (*CustomHostnameResponse, error) { + uri := fmt.Sprintf("/zones/%s/custom_hostnames", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, ch) + if err != nil { + return nil, err + } + + var response *CustomHostnameResponse + err = json.Unmarshal(res, &response) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response, nil +} + +// CustomHostnames fetches custom hostnames for the given zone, +// by applying filter.Hostname if not empty and scoping the result to page'th 50 items. +// +// The returned ResultInfo can be used to implement pagination. +// +// API reference: https://api.cloudflare.com/#custom-hostname-for-a-zone-list-custom-hostnames +func (api *API) CustomHostnames(ctx context.Context, zoneID string, page int, filter CustomHostname) ([]CustomHostname, ResultInfo, error) { + v := url.Values{} + v.Set("per_page", "50") + v.Set("page", strconv.Itoa(page)) + if filter.Hostname != "" { + v.Set("hostname", filter.Hostname) + } + + uri := fmt.Sprintf("/zones/%s/custom_hostnames?%s", zoneID, v.Encode()) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []CustomHostname{}, ResultInfo{}, err + } + var customHostnameListResponse CustomHostnameListResponse + err = json.Unmarshal(res, &customHostnameListResponse) + if err != nil { + return []CustomHostname{}, ResultInfo{}, err + } + + return customHostnameListResponse.Result, customHostnameListResponse.ResultInfo, nil +} + +// CustomHostname inspects the given custom hostname in the given zone. +// +// API reference: https://api.cloudflare.com/#custom-hostname-for-a-zone-custom-hostname-configuration-details +func (api *API) CustomHostname(ctx context.Context, zoneID string, customHostnameID string) (CustomHostname, error) { + uri := fmt.Sprintf("/zones/%s/custom_hostnames/%s", zoneID, customHostnameID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return CustomHostname{}, err + } + + var response CustomHostnameResponse + err = json.Unmarshal(res, &response) + if err != nil { + return CustomHostname{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response.Result, nil +} + +// CustomHostnameIDByName retrieves the ID for the given hostname in the given zone. +func (api *API) CustomHostnameIDByName(ctx context.Context, zoneID string, hostname string) (string, error) { + customHostnames, _, err := api.CustomHostnames(ctx, zoneID, 1, CustomHostname{Hostname: hostname}) + if err != nil { + return "", fmt.Errorf("CustomHostnames command failed: %w", err) + } + for _, ch := range customHostnames { + if ch.Hostname == hostname { + return ch.ID, nil + } + } + return "", errors.New("CustomHostname could not be found") +} + +// UpdateCustomHostnameFallbackOrigin modifies the Custom Hostname Fallback origin in the given zone. +// +// API reference: https://api.cloudflare.com/#custom-hostname-fallback-origin-for-a-zone-update-fallback-origin-for-custom-hostnames +func (api *API) UpdateCustomHostnameFallbackOrigin(ctx context.Context, zoneID string, chfo CustomHostnameFallbackOrigin) (*CustomHostnameFallbackOriginResponse, error) { + uri := fmt.Sprintf("/zones/%s/custom_hostnames/fallback_origin", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, chfo) + if err != nil { + return nil, err + } + + var response *CustomHostnameFallbackOriginResponse + err = json.Unmarshal(res, &response) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return response, nil +} + +// DeleteCustomHostnameFallbackOrigin deletes the Custom Hostname Fallback origin in the given zone. +// +// API reference: https://api.cloudflare.com/#custom-hostname-fallback-origin-for-a-zone-delete-fallback-origin-for-custom-hostnames +func (api *API) DeleteCustomHostnameFallbackOrigin(ctx context.Context, zoneID string) error { + uri := fmt.Sprintf("/zones/%s/custom_hostnames/fallback_origin", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + var response *CustomHostnameFallbackOriginResponse + err = json.Unmarshal(res, &response) + if err != nil { + return fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return nil +} + +// CustomHostnameFallbackOrigin inspects the Custom Hostname Fallback origin in the given zone. +// +// API reference: https://api.cloudflare.com/#custom-hostname-fallback-origin-for-a-zone-properties +func (api *API) CustomHostnameFallbackOrigin(ctx context.Context, zoneID string) (CustomHostnameFallbackOrigin, error) { + uri := fmt.Sprintf("/zones/%s/custom_hostnames/fallback_origin", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return CustomHostnameFallbackOrigin{}, err + } + + var response CustomHostnameFallbackOriginResponse + err = json.Unmarshal(res, &response) + if err != nil { + return CustomHostnameFallbackOrigin{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/custom_nameservers.go b/vendor/github.com/cloudflare/cloudflare-go/custom_nameservers.go new file mode 100644 index 0000000..dda3003 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/custom_nameservers.go @@ -0,0 +1,207 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +type CustomNameserverRecord struct { + Type string `json:"type"` + Value string `json:"value"` +} + +type CustomNameserver struct { + NSName string `json:"ns_name"` + NSSet int `json:"ns_set"` +} + +type CustomNameserverResult struct { + DNSRecords []CustomNameserverRecord `json:"dns_records"` + NSName string `json:"ns_name"` + NSSet int `json:"ns_set"` + Status string `json:"status"` + ZoneTag string `json:"zone_tag"` +} + +type CustomNameserverZoneMetadata struct { + NSSet int `json:"ns_set"` + Enabled bool `json:"enabled"` +} + +type customNameserverListResponse struct { + Response + Result []CustomNameserverResult `json:"result"` +} + +type customNameserverCreateResponse struct { + Response + Result CustomNameserverResult `json:"result"` +} + +type getEligibleZonesAccountCustomNameserversResponse struct { + Result []string `json:"result"` +} + +type customNameserverZoneMetadata struct { + Response + Result CustomNameserverZoneMetadata +} + +type GetCustomNameserversParams struct{} + +type CreateCustomNameserversParams struct { + NSName string `json:"ns_name"` + NSSet int `json:"ns_set"` +} + +type DeleteCustomNameserversParams struct { + NSName string +} + +type GetEligibleZonesAccountCustomNameserversParams struct{} + +type GetCustomNameserverZoneMetadataParams struct{} + +type UpdateCustomNameserverZoneMetadataParams struct { + NSSet int `json:"ns_set"` + Enabled bool `json:"enabled"` +} + +// GetCustomNameservers lists custom nameservers. +// +// API documentation: https://developers.cloudflare.com/api/operations/account-level-custom-nameservers-list-account-custom-nameservers +func (api *API) GetCustomNameservers(ctx context.Context, rc *ResourceContainer, params GetCustomNameserversParams) ([]CustomNameserverResult, error) { + if rc.Level != AccountRouteLevel { + return []CustomNameserverResult{}, ErrRequiredAccountLevelResourceContainer + } + uri := fmt.Sprintf("/%s/%s/custom_ns", rc.Level, rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + + var response customNameserverListResponse + err = json.Unmarshal(res, &response) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response.Result, nil +} + +// CreateCustomNameservers adds a custom nameserver. +// +// API documentation: https://developers.cloudflare.com/api/operations/account-level-custom-nameservers-add-account-custom-nameserver +func (api *API) CreateCustomNameservers(ctx context.Context, rc *ResourceContainer, params CreateCustomNameserversParams) (CustomNameserverResult, error) { + if rc.Level != AccountRouteLevel { + return CustomNameserverResult{}, ErrRequiredAccountLevelResourceContainer + } + + uri := fmt.Sprintf("/%s/%s/custom_ns", rc.Level, rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return CustomNameserverResult{}, err + } + + response := &customNameserverCreateResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return CustomNameserverResult{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response.Result, nil +} + +// DeleteCustomNameservers removes a custom nameserver. +// +// API documentation: https://developers.cloudflare.com/api/operations/account-level-custom-nameservers-delete-account-custom-nameserver +func (api *API) DeleteCustomNameservers(ctx context.Context, rc *ResourceContainer, params DeleteCustomNameserversParams) error { + if rc.Level != AccountRouteLevel { + return ErrRequiredAccountLevelResourceContainer + } + + if params.NSName == "" { + return errors.New("missing required NSName parameter") + } + + uri := fmt.Sprintf("/%s/%s/custom_ns/%s", rc.Level, rc.Identifier, params.NSName) + + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + return nil +} + +// GetEligibleZonesAccountCustomNameservers lists zones eligible for custom nameservers. +// +// API documentation: https://developers.cloudflare.com/api/operations/account-level-custom-nameservers-get-eligible-zones-for-account-custom-nameservers +func (api *API) GetEligibleZonesAccountCustomNameservers(ctx context.Context, rc *ResourceContainer, params GetEligibleZonesAccountCustomNameserversParams) ([]string, error) { + if rc.Level != AccountRouteLevel { + return []string{}, ErrRequiredAccountLevelResourceContainer + } + + uri := fmt.Sprintf("/%s/%s/custom_ns/availability", rc.Level, rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + + var response getEligibleZonesAccountCustomNameserversResponse + err = json.Unmarshal(res, &response) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response.Result, nil +} + +// GetCustomNameserverZoneMetadata get metadata for custom nameservers on a zone. +// +// API documentation: https://developers.cloudflare.com/api/operations/account-level-custom-nameservers-usage-for-a-zone-get-account-custom-nameserver-related-zone-metadata +func (api *API) GetCustomNameserverZoneMetadata(ctx context.Context, rc *ResourceContainer, params GetCustomNameserverZoneMetadataParams) (CustomNameserverZoneMetadata, error) { + if rc.Level != ZoneRouteLevel { + return CustomNameserverZoneMetadata{}, ErrRequiredZoneLevelResourceContainer + } + + uri := fmt.Sprintf("/%s/%s/custom_ns", rc.Level, rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return CustomNameserverZoneMetadata{}, err + } + + var response customNameserverZoneMetadata + err = json.Unmarshal(res, &response) + if err != nil { + return CustomNameserverZoneMetadata{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response.Result, nil +} + +// UpdateCustomNameserverZoneMetadata set metadata for custom nameservers on a zone. +// +// API documentation: https://developers.cloudflare.com/api/operations/account-level-custom-nameservers-usage-for-a-zone-set-account-custom-nameserver-related-zone-metadata +func (api *API) UpdateCustomNameserverZoneMetadata(ctx context.Context, rc *ResourceContainer, params UpdateCustomNameserverZoneMetadataParams) error { + if rc.Level != ZoneRouteLevel { + return ErrRequiredZoneLevelResourceContainer + } + + uri := fmt.Sprintf("/%s/%s/custom_ns", rc.Level, rc.Identifier) + + _, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/custom_pages.go b/vendor/github.com/cloudflare/cloudflare-go/custom_pages.go new file mode 100644 index 0000000..ae68d68 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/custom_pages.go @@ -0,0 +1,177 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// CustomPage represents a custom page configuration. +type CustomPage struct { + CreatedOn time.Time `json:"created_on"` + ModifiedOn time.Time `json:"modified_on"` + URL interface{} `json:"url"` + State string `json:"state"` + RequiredTokens []string `json:"required_tokens"` + PreviewTarget string `json:"preview_target"` + Description string `json:"description"` + ID string `json:"id"` +} + +// CustomPageResponse represents the response from the custom pages endpoint. +type CustomPageResponse struct { + Response + Result []CustomPage `json:"result"` +} + +// CustomPageDetailResponse represents the response from the custom page endpoint. +type CustomPageDetailResponse struct { + Response + Result CustomPage `json:"result"` +} + +// CustomPageOptions is used to determine whether or not the operation +// should take place on an account or zone level based on which is +// provided to the function. +// +// A non-empty value denotes desired use. +type CustomPageOptions struct { + AccountID string + ZoneID string +} + +// CustomPageParameters is used to update a particular custom page with +// the values provided. +type CustomPageParameters struct { + URL interface{} `json:"url"` + State string `json:"state"` +} + +// CustomPages lists custom pages for a zone or account. +// +// Zone API reference: https://api.cloudflare.com/#custom-pages-for-a-zone-list-available-custom-pages +// Account API reference: https://api.cloudflare.com/#custom-pages-account--list-custom-pages +func (api *API) CustomPages(ctx context.Context, options *CustomPageOptions) ([]CustomPage, error) { + var ( + pageType, identifier string + ) + + if options.AccountID == "" && options.ZoneID == "" { + return nil, ErrAccountIDOrZoneIDAreRequired + } + + if options.AccountID != "" && options.ZoneID != "" { + return nil, ErrAccountIDAndZoneIDAreMutuallyExclusive + } + + // Should the account ID be defined, treat this as an account level operation. + if options.AccountID != "" { + pageType = "accounts" + identifier = options.AccountID + } else { + pageType = "zones" + identifier = options.ZoneID + } + + uri := fmt.Sprintf("/%s/%s/custom_pages", pageType, identifier) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + + var customPageResponse CustomPageResponse + err = json.Unmarshal(res, &customPageResponse) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return customPageResponse.Result, nil +} + +// CustomPage lists a single custom page based on the ID. +// +// Zone API reference: https://api.cloudflare.com/#custom-pages-for-a-zone-custom-page-details +// Account API reference: https://api.cloudflare.com/#custom-pages-account--custom-page-details +func (api *API) CustomPage(ctx context.Context, options *CustomPageOptions, customPageID string) (CustomPage, error) { + var ( + pageType, identifier string + ) + + if options.AccountID == "" && options.ZoneID == "" { + return CustomPage{}, ErrAccountIDOrZoneIDAreRequired + } + + if options.AccountID != "" && options.ZoneID != "" { + return CustomPage{}, ErrAccountIDAndZoneIDAreMutuallyExclusive + } + + // Should the account ID be defined, treat this as an account level operation. + if options.AccountID != "" { + pageType = "accounts" + identifier = options.AccountID + } else { + pageType = "zones" + identifier = options.ZoneID + } + + uri := fmt.Sprintf("/%s/%s/custom_pages/%s", pageType, identifier, customPageID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return CustomPage{}, err + } + + var customPageResponse CustomPageDetailResponse + err = json.Unmarshal(res, &customPageResponse) + if err != nil { + return CustomPage{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return customPageResponse.Result, nil +} + +// UpdateCustomPage updates a single custom page setting. +// +// Zone API reference: https://api.cloudflare.com/#custom-pages-for-a-zone-update-custom-page-url +// Account API reference: https://api.cloudflare.com/#custom-pages-account--update-custom-page +func (api *API) UpdateCustomPage(ctx context.Context, options *CustomPageOptions, customPageID string, pageParameters CustomPageParameters) (CustomPage, error) { + var ( + pageType, identifier string + ) + + if options.AccountID == "" && options.ZoneID == "" { + return CustomPage{}, ErrAccountIDOrZoneIDAreRequired + } + + if options.AccountID != "" && options.ZoneID != "" { + return CustomPage{}, ErrAccountIDAndZoneIDAreMutuallyExclusive + } + + // Should the account ID be defined, treat this as an account level operation. + if options.AccountID != "" { + pageType = "accounts" + identifier = options.AccountID + } else { + pageType = "zones" + identifier = options.ZoneID + } + + uri := fmt.Sprintf("/%s/%s/custom_pages/%s", pageType, identifier, customPageID) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, pageParameters) + if err != nil { + return CustomPage{}, err + } + + var customPageResponse CustomPageDetailResponse + err = json.Unmarshal(res, &customPageResponse) + if err != nil { + return CustomPage{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return customPageResponse.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/d1.go b/vendor/github.com/cloudflare/cloudflare-go/d1.go new file mode 100644 index 0000000..08a3ecc --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/d1.go @@ -0,0 +1,199 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +var ( + ErrMissingDatabaseID = fmt.Errorf("required missing database ID") +) + +type D1Database struct { + Name string `json:"name"` + NumTables int `json:"num_tables"` + UUID string `json:"uuid"` + Version string `json:"version"` + CreatedAt *time.Time `json:"created_at"` + FileSize int64 `json:"file_size"` +} + +type ListD1DatabasesParams struct { + Name string `url:"name,omitempty"` + ResultInfo +} + +type ListD1Response struct { + Result []D1Database `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +type CreateD1DatabaseParams struct { + Name string `json:"name"` +} + +type D1DatabaseResponse struct { + Result D1Database `json:"result"` + Response +} + +type QueryD1DatabaseParams struct { + DatabaseID string `json:"-"` + SQL string `json:"sql"` + Parameters []string `json:"params"` +} + +type D1DatabaseMetadata struct { + ChangedDB *bool `json:"changed_db,omitempty"` + Changes int `json:"changes"` + Duration float64 `json:"duration"` + LastRowID int `json:"last_row_id"` + RowsRead int `json:"rows_read"` + RowsWritten int `json:"rows_written"` + SizeAfter int `json:"size_after"` +} + +type D1Result struct { + Success *bool `json:"success"` + Results []map[string]any `json:"results"` + Meta D1DatabaseMetadata `json:"meta"` +} + +type QueryD1Response struct { + Result []D1Result `json:"result"` + Response +} + +// ListD1Databases returns all databases for an account. +// +// API reference: https://developers.cloudflare.com/api/operations/cloudflare-d1-list-databases +func (api *API) ListD1Databases(ctx context.Context, rc *ResourceContainer, params ListD1DatabasesParams) ([]D1Database, *ResultInfo, error) { + if rc.Identifier == "" { + return []D1Database{}, &ResultInfo{}, ErrMissingAccountID + } + baseURL := fmt.Sprintf("/accounts/%s/d1/database", rc.Identifier) + autoPaginate := true + if params.PerPage >= 1 || params.Page >= 1 { + autoPaginate = false + } + + if params.PerPage < 1 { + params.PerPage = 100 + } + + if params.Page < 1 { + params.Page = 1 + } + var databases []D1Database + var r ListD1Response + for { + uri := buildURI(baseURL, params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []D1Database{}, &ResultInfo{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + err = json.Unmarshal(res, &r) + if err != nil { + return []D1Database{}, &ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + databases = append(databases, r.Result...) + params.ResultInfo = r.ResultInfo.Next() + if params.ResultInfo.Done() || !autoPaginate { + break + } + } + return databases, &r.ResultInfo, nil +} + +// CreateD1Database creates a new database for an account. +// +// API reference: https://developers.cloudflare.com/api/operations/cloudflare-d1-create-database +func (api *API) CreateD1Database(ctx context.Context, rc *ResourceContainer, params CreateD1DatabaseParams) (D1Database, error) { + if rc.Identifier == "" { + return D1Database{}, ErrMissingAccountID + } + uri := fmt.Sprintf("/accounts/%s/d1/database", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return D1Database{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var r D1DatabaseResponse + err = json.Unmarshal(res, &r) + if err != nil { + return D1Database{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// DeleteD1Database deletes a database for an account. +// +// API reference: https://developers.cloudflare.com/api/operations/cloudflare-d1-delete-database +func (api *API) DeleteD1Database(ctx context.Context, rc *ResourceContainer, databaseID string) error { + if rc.Identifier == "" { + return ErrMissingAccountID + } + if databaseID == "" { + return ErrMissingDatabaseID + } + uri := fmt.Sprintf("/accounts/%s/d1/database/%s", rc.Identifier, databaseID) + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return fmt.Errorf("%s: %w", errMakeRequestError, err) + } + return nil +} + +// GetD1Database returns a database for an account. +// +// API reference: https://developers.cloudflare.com/api/operations/cloudflare-d1-get-database +func (api *API) GetD1Database(ctx context.Context, rc *ResourceContainer, databaseID string) (D1Database, error) { + if rc.Identifier == "" { + return D1Database{}, ErrMissingAccountID + } + uri := fmt.Sprintf("/accounts/%s/d1/database/%s", rc.Identifier, databaseID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return D1Database{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var r D1DatabaseResponse + err = json.Unmarshal(res, &r) + if err != nil { + return D1Database{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// QueryD1Database queries a database for an account. +// +// API reference: https://developers.cloudflare.com/api/operations/cloudflare-d1-query-database +func (api *API) QueryD1Database(ctx context.Context, rc *ResourceContainer, params QueryD1DatabaseParams) ([]D1Result, error) { + if rc.Identifier == "" { + return []D1Result{}, ErrMissingAccountID + } + if params.DatabaseID == "" { + return []D1Result{}, ErrMissingDatabaseID + } + uri := fmt.Sprintf("/accounts/%s/d1/database/%s/query", rc.Identifier, params.DatabaseID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return []D1Result{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var r QueryD1Response + err = json.Unmarshal(res, &r) + if err != nil { + return []D1Result{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/dcv_delegation.go b/vendor/github.com/cloudflare/cloudflare-go/dcv_delegation.go new file mode 100644 index 0000000..db66db2 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/dcv_delegation.go @@ -0,0 +1,41 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +type DCVDelegation struct { + UUID string `json:"uuid"` +} + +// DCVDelegationResponse represents the response from the dcv_delegation/uuid endpoint. +type DCVDelegationResponse struct { + Result DCVDelegation `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +type GetDCVDelegationParams struct{} + +// GetDCVDelegation gets a zone DCV Delegation UUID. +// +// API documentation: https://developers.cloudflare.com/api/operations/dcv-delegation-uuid-get +func (api *API) GetDCVDelegation(ctx context.Context, rc *ResourceContainer, params GetDCVDelegationParams) (DCVDelegation, ResultInfo, error) { + uri := fmt.Sprintf("/zones/%s/dcv_delegation/uuid", rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return DCVDelegation{}, ResultInfo{}, err + } + var dcvResponse DCVDelegationResponse + err = json.Unmarshal(res, &dcvResponse) + if err != nil { + return DCVDelegation{}, ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return dcvResponse.Result, dcvResponse.ResultInfo, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/device_posture_rule.go b/vendor/github.com/cloudflare/cloudflare-go/device_posture_rule.go new file mode 100644 index 0000000..fc4e065 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/device_posture_rule.go @@ -0,0 +1,340 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +// DevicePostureIntegrationConfig contains authentication information +// for a device posture integration. +type DevicePostureIntegrationConfig struct { + ClientID string `json:"client_id,omitempty"` + ClientSecret string `json:"client_secret,omitempty"` + AuthUrl string `json:"auth_url,omitempty"` + ApiUrl string `json:"api_url,omitempty"` + ClientKey string `json:"client_key,omitempty"` + CustomerID string `json:"customer_id,omitempty"` + AccessClientID string `json:"access_client_id,omitempty"` + AccessClientSecret string `json:"access_client_secret,omitempty"` +} + +// DevicePostureIntegration represents a device posture integration. +type DevicePostureIntegration struct { + IntegrationID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + Interval string `json:"interval,omitempty"` + Config DevicePostureIntegrationConfig `json:"config,omitempty"` +} + +// DevicePostureIntegrationResponse represents the response from the get +// device posture integrations endpoint. +type DevicePostureIntegrationResponse struct { + Result DevicePostureIntegration `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +// DevicePostureIntegrationListResponse represents the response from the list +// device posture integrations endpoint. +type DevicePostureIntegrationListResponse struct { + Result []DevicePostureIntegration `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +// CreateDevicePostureIntegration creates a device posture integration within an account. +// +// API reference: https://api.cloudflare.com/#device-posture-integrations-create-device-posture-integration +func (api *API) CreateDevicePostureIntegration(ctx context.Context, accountID string, integration DevicePostureIntegration) (DevicePostureIntegration, error) { + uri := fmt.Sprintf("/%s/%s/devices/posture/integration", AccountRouteRoot, accountID) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, integration) + if err != nil { + fmt.Printf("err:%+v res:%+v\n", err, res) + return DevicePostureIntegration{}, err + } + + var devicePostureIntegrationResponse DevicePostureIntegrationResponse + err = json.Unmarshal(res, &devicePostureIntegrationResponse) + if err != nil { + return DevicePostureIntegration{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return devicePostureIntegrationResponse.Result, nil +} + +// UpdateDevicePostureIntegration updates a device posture integration within an account. +// +// API reference: https://api.cloudflare.com/#device-posture-integrations-update-device-posture-integration +func (api *API) UpdateDevicePostureIntegration(ctx context.Context, accountID string, integration DevicePostureIntegration) (DevicePostureIntegration, error) { + uri := fmt.Sprintf("/%s/%s/devices/posture/integration/%s", AccountRouteRoot, accountID, integration.IntegrationID) + + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, integration) + if err != nil { + return DevicePostureIntegration{}, err + } + + var devicePostureIntegrationResponse DevicePostureIntegrationResponse + err = json.Unmarshal(res, &devicePostureIntegrationResponse) + if err != nil { + return DevicePostureIntegration{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return devicePostureIntegrationResponse.Result, nil +} + +// DevicePostureIntegration returns a specific device posture integrations within an account. +// +// API reference: https://api.cloudflare.com/#device-posture-integrations-device-posture-integration-details +func (api *API) DevicePostureIntegration(ctx context.Context, accountID, integrationID string) (DevicePostureIntegration, error) { + uri := fmt.Sprintf("/%s/%s/devices/posture/integration/%s", AccountRouteRoot, accountID, integrationID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return DevicePostureIntegration{}, err + } + + var devicePostureIntegrationResponse DevicePostureIntegrationResponse + err = json.Unmarshal(res, &devicePostureIntegrationResponse) + if err != nil { + return DevicePostureIntegration{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return devicePostureIntegrationResponse.Result, nil +} + +// DevicePostureIntegrations returns all device posture integrations within an account. +// +// API reference: https://api.cloudflare.com/#device-posture-integrations-list-device-posture-integrations +func (api *API) DevicePostureIntegrations(ctx context.Context, accountID string) ([]DevicePostureIntegration, ResultInfo, error) { + uri := fmt.Sprintf("/%s/%s/devices/posture/integration", AccountRouteRoot, accountID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []DevicePostureIntegration{}, ResultInfo{}, err + } + + var devicePostureIntegrationListResponse DevicePostureIntegrationListResponse + err = json.Unmarshal(res, &devicePostureIntegrationListResponse) + if err != nil { + return []DevicePostureIntegration{}, ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return devicePostureIntegrationListResponse.Result, devicePostureIntegrationListResponse.ResultInfo, nil +} + +// DeleteDevicePostureIntegration deletes a device posture integration. +// +// API reference: https://api.cloudflare.com/#device-posture-integrations-delete-device-posture-integration +func (api *API) DeleteDevicePostureIntegration(ctx context.Context, accountID, ruleID string) error { + uri := fmt.Sprintf( + "/%s/%s/devices/posture/integration/%s", + AccountRouteRoot, + accountID, + ruleID, + ) + + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + return nil +} + +// DevicePostureRule represents a device posture rule. +type DevicePostureRule struct { + ID string `json:"id,omitempty"` + Type string `json:"type"` + Name string `json:"name"` + Description string `json:"description,omitempty"` + Schedule string `json:"schedule,omitempty"` + Match []DevicePostureRuleMatch `json:"match,omitempty"` + Input DevicePostureRuleInput `json:"input,omitempty"` + Expiration string `json:"expiration,omitempty"` +} + +// DevicePostureRuleMatch represents the conditions that the client must match to run the rule. +type DevicePostureRuleMatch struct { + Platform string `json:"platform,omitempty"` +} + +// DevicePostureRuleInput represents the value to be checked against. +type DevicePostureRuleInput struct { + ID string `json:"id,omitempty"` + Path string `json:"path,omitempty"` + Exists *bool `json:"exists,omitempty"` + Thumbprint string `json:"thumbprint,omitempty"` + Sha256 string `json:"sha256,omitempty"` + Running *bool `json:"running,omitempty"` + RequireAll *bool `json:"requireAll,omitempty"` + CheckDisks []string `json:"checkDisks,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + Version string `json:"version,omitempty"` + VersionOperator string `json:"versionOperator,omitempty"` + Overall string `json:"overall,omitempty"` + SensorConfig string `json:"sensor_config,omitempty"` + Os string `json:"os,omitempty"` + OsDistroName string `json:"os_distro_name,omitempty"` + OsDistroRevision string `json:"os_distro_revision,omitempty"` + OSVersionExtra string `json:"os_version_extra,omitempty"` + Operator string `json:"operator,omitempty"` + Domain string `json:"domain,omitempty"` + ComplianceStatus string `json:"compliance_status,omitempty"` + ConnectionID string `json:"connection_id,omitempty"` + IssueCount string `json:"issue_count,omitempty"` + CountOperator string `json:"countOperator,omitempty"` + TotalScore int `json:"total_score,omitempty"` + ScoreOperator string `json:"scoreOperator,omitempty"` + CertificateID string `json:"certificate_id,omitempty"` + CommonName string `json:"cn,omitempty"` + ActiveThreats int `json:"active_threats,omitempty"` + NetworkStatus string `json:"network_status,omitempty"` + Infected *bool `json:"infected,omitempty"` + IsActive *bool `json:"is_active,omitempty"` + EidLastSeen string `json:"eid_last_seen,omitempty"` + RiskLevel string `json:"risk_level,omitempty"` + State string `json:"state,omitempty"` + LastSeen string `json:"last_seen,omitempty"` + ExtendedKeyUsage []string `json:"extended_key_usage,omitempty"` + CheckPrivateKey *bool `json:"check_private_key,omitempty"` + Locations CertificateLocations `json:"locations,omitempty"` +} + +// Locations struct for client certificate rule v2. +type CertificateLocations struct { + Paths []string `json:"paths,omitempty"` + TrustStores []string `json:"trust_stores,omitempty"` +} + +// DevicePostureRuleListResponse represents the response from the list +// device posture rules endpoint. +type DevicePostureRuleListResponse struct { + Result []DevicePostureRule `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +// DevicePostureRuleDetailResponse is the API response, containing a single +// device posture rule. +type DevicePostureRuleDetailResponse struct { + Response + Result DevicePostureRule `json:"result"` +} + +// DevicePostureRules returns all device posture rules within an account. +// +// API reference: https://api.cloudflare.com/#device-posture-rules-list-device-posture-rules +func (api *API) DevicePostureRules(ctx context.Context, accountID string) ([]DevicePostureRule, ResultInfo, error) { + uri := fmt.Sprintf("/%s/%s/devices/posture", AccountRouteRoot, accountID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []DevicePostureRule{}, ResultInfo{}, err + } + + var devicePostureRuleListResponse DevicePostureRuleListResponse + err = json.Unmarshal(res, &devicePostureRuleListResponse) + if err != nil { + return []DevicePostureRule{}, ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return devicePostureRuleListResponse.Result, devicePostureRuleListResponse.ResultInfo, nil +} + +// DevicePostureRule returns a single device posture rule based on the rule ID. +// +// API reference: https://api.cloudflare.com/#device-posture-rules-device-posture-rules-details +func (api *API) DevicePostureRule(ctx context.Context, accountID, ruleID string) (DevicePostureRule, error) { + uri := fmt.Sprintf( + "/%s/%s/devices/posture/%s", + AccountRouteRoot, + accountID, + ruleID, + ) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return DevicePostureRule{}, err + } + + var devicePostureRuleDetailResponse DevicePostureRuleDetailResponse + err = json.Unmarshal(res, &devicePostureRuleDetailResponse) + if err != nil { + return DevicePostureRule{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return devicePostureRuleDetailResponse.Result, nil +} + +// CreateDevicePostureRule creates a new device posture rule. +// +// API reference: https://api.cloudflare.com/#device-posture-rules-create-device-posture-rule +func (api *API) CreateDevicePostureRule(ctx context.Context, accountID string, rule DevicePostureRule) (DevicePostureRule, error) { + uri := fmt.Sprintf("/%s/%s/devices/posture", AccountRouteRoot, accountID) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, rule) + if err != nil { + return DevicePostureRule{}, err + } + + var devicePostureRuleDetailResponse DevicePostureRuleDetailResponse + err = json.Unmarshal(res, &devicePostureRuleDetailResponse) + if err != nil { + return DevicePostureRule{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return devicePostureRuleDetailResponse.Result, nil +} + +// UpdateDevicePostureRule updates an existing device posture rule. +// +// API reference: https://api.cloudflare.com/#device-posture-rules-update-device-posture-rule +func (api *API) UpdateDevicePostureRule(ctx context.Context, accountID string, rule DevicePostureRule) (DevicePostureRule, error) { + if rule.ID == "" { + return DevicePostureRule{}, fmt.Errorf("device posture rule ID cannot be empty") + } + + uri := fmt.Sprintf( + "/%s/%s/devices/posture/%s", + AccountRouteRoot, + accountID, + rule.ID, + ) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, rule) + if err != nil { + return DevicePostureRule{}, err + } + + var devicePostureRuleDetailResponse DevicePostureRuleDetailResponse + err = json.Unmarshal(res, &devicePostureRuleDetailResponse) + if err != nil { + return DevicePostureRule{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return devicePostureRuleDetailResponse.Result, nil +} + +// DeleteDevicePostureRule deletes a device posture rule. +// +// API reference: https://api.cloudflare.com/#device-posture-rules-delete-device-posture-rule +func (api *API) DeleteDevicePostureRule(ctx context.Context, accountID, ruleID string) error { + uri := fmt.Sprintf( + "/%s/%s/devices/posture/%s", + AccountRouteRoot, + accountID, + ruleID, + ) + + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/devices_dex.go b/vendor/github.com/cloudflare/cloudflare-go/devices_dex.go new file mode 100644 index 0000000..4c9ef50 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/devices_dex.go @@ -0,0 +1,174 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +type DeviceDexTestData map[string]interface{} + +type DeviceDexTest struct { + TestID string `json:"test_id"` + Name string `json:"name"` + Description string `json:"description,omitempty"` + Interval string `json:"interval"` + Enabled bool `json:"enabled"` + Updated time.Time `json:"updated"` + Created time.Time `json:"created"` + Data *DeviceDexTestData `json:"data"` +} + +type DeviceDexTests struct { + DexTests []DeviceDexTest `json:"dex_tests"` +} + +type DeviceDexTestResponse struct { + Response + Result DeviceDexTest `json:"result"` +} + +type DeviceDexTestListResponse struct { + Response + Result DeviceDexTests `json:"result"` +} + +type ListDeviceDexTestParams struct{} + +type CreateDeviceDexTestParams struct { + TestID string `json:"test_id,omitempty"` + Name string `json:"name"` + Description string `json:"description,omitempty"` + Interval string `json:"interval"` + Enabled bool `json:"enabled"` + Data *DeviceDexTestData `json:"data"` +} + +type UpdateDeviceDexTestParams struct { + TestID string `json:"test_id,omitempty"` + Name string `json:"name"` + Description string `json:"description,omitempty"` + Interval string `json:"interval"` + Enabled bool `json:"enabled"` + Data *DeviceDexTestData `json:"data"` +} + +// ListDexTests returns all Device Dex Tests for a given account. +// +// API reference : https://developers.cloudflare.com/api/operations/device-dex-test-details +func (api *API) ListDexTests(ctx context.Context, rc *ResourceContainer, params ListDeviceDexTestParams) (DeviceDexTests, error) { + if rc.Level != AccountRouteLevel { + return DeviceDexTests{}, ErrRequiredAccountLevelResourceContainer + } + + uri := fmt.Sprintf("/%s/%s/devices/dex_tests", rc.Level, rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return DeviceDexTests{}, err + } + + var response DeviceDexTestListResponse + err = json.Unmarshal(res, &response) + if err != nil { + return DeviceDexTests{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response.Result, nil +} + +// CreateDeviceDexTest created a new Device Dex Test +// +// API reference: https://developers.cloudflare.com/api/operations/device-dex-test-create-device-dex-test +func (api *API) CreateDeviceDexTest(ctx context.Context, rc *ResourceContainer, params CreateDeviceDexTestParams) (DeviceDexTest, error) { + if rc.Level != AccountRouteLevel { + return DeviceDexTest{}, ErrRequiredAccountLevelResourceContainer + } + + uri := fmt.Sprintf("/%s/%s/devices/dex_tests", rc.Level, rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return DeviceDexTest{}, err + } + + var deviceDexTestResponse DeviceDexTestResponse + if err := json.Unmarshal(res, &deviceDexTestResponse); err != nil { + return DeviceDexTest{}, fmt.Errorf("%s: %w\n\nres: %s", errUnmarshalError, err, string(res)) + } + + return deviceDexTestResponse.Result, err +} + +// UpdateDeviceDexTest Updates a Device Dex Test. +// +// API reference: https://developers.cloudflare.com/api/operations/device-dex-test-update-device-dex-test +func (api *API) UpdateDeviceDexTest(ctx context.Context, rc *ResourceContainer, params UpdateDeviceDexTestParams) (DeviceDexTest, error) { + if rc.Level != AccountRouteLevel { + return DeviceDexTest{}, ErrRequiredAccountLevelResourceContainer + } + + uri := fmt.Sprintf("/%s/%s/devices/dex_tests/%s", rc.Level, rc.Identifier, params.TestID) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return DeviceDexTest{}, err + } + + var deviceDexTestsResponse DeviceDexTestResponse + + if err := json.Unmarshal(res, &deviceDexTestsResponse); err != nil { + return DeviceDexTest{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return deviceDexTestsResponse.Result, err +} + +// GetDeviceDexTest gets a single Device Dex Test. +// +// API reference: https://developers.cloudflare.com/api/operations/device-dex-test-get-device-dex-test +func (api *API) GetDeviceDexTest(ctx context.Context, rc *ResourceContainer, testID string) (DeviceDexTest, error) { + if rc.Level != AccountRouteLevel { + return DeviceDexTest{}, ErrRequiredAccountLevelResourceContainer + } + + uri := fmt.Sprintf("/%s/%s/devices/dex_tests/%s", rc.Level, rc.Identifier, testID) + + deviceDexTestResponse := DeviceDexTestResponse{} + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return DeviceDexTest{}, err + } + + if err := json.Unmarshal(res, &deviceDexTestResponse); err != nil { + return DeviceDexTest{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return deviceDexTestResponse.Result, err +} + +// DeleteDexTest deletes a Device Dex Test. +// +// API reference: https://developers.cloudflare.com/api/operations/device-dex-test-delete-device-dex-test +func (api *API) DeleteDexTest(ctx context.Context, rc *ResourceContainer, testID string) (DeviceDexTests, error) { + if rc.Level != AccountRouteLevel { + return DeviceDexTests{}, ErrRequiredAccountLevelResourceContainer + } + + uri := fmt.Sprintf("/%s/%s/devices/dex_tests/%s", rc.Level, rc.Identifier, testID) + + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return DeviceDexTests{}, err + } + + var response DeviceDexTestListResponse + if err := json.Unmarshal(res, &response); err != nil { + return DeviceDexTests{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response.Result, err +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/devices_managed_networks.go b/vendor/github.com/cloudflare/cloudflare-go/devices_managed_networks.go new file mode 100644 index 0000000..11fc7ea --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/devices_managed_networks.go @@ -0,0 +1,165 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +type Config struct { + TlsSockAddr string `json:"tls_sockaddr,omitempty"` + Sha256 string `json:"sha256,omitempty"` +} + +type DeviceManagedNetwork struct { + NetworkID string `json:"network_id,omitempty"` + Type string `json:"type"` + Name string `json:"name"` + Config *Config `json:"config"` +} + +type DeviceManagedNetworkResponse struct { + Response + Result DeviceManagedNetwork `json:"result"` +} + +type DeviceManagedNetworkListResponse struct { + Response + Result []DeviceManagedNetwork `json:"result"` +} + +type ListDeviceManagedNetworksParams struct{} + +type CreateDeviceManagedNetworkParams struct { + NetworkID string `json:"network_id,omitempty"` + Type string `json:"type"` + Name string `json:"name"` + Config *Config `json:"config"` +} + +type UpdateDeviceManagedNetworkParams struct { + NetworkID string `json:"network_id,omitempty"` + Type string `json:"type"` + Name string `json:"name"` + Config *Config `json:"config"` +} + +// ListDeviceManagedNetwork returns all Device Managed Networks for a given +// account. +// +// API reference : https://api.cloudflare.com/#device-managed-networks-list-device-managed-networks +func (api *API) ListDeviceManagedNetworks(ctx context.Context, rc *ResourceContainer, params ListDeviceManagedNetworksParams) ([]DeviceManagedNetwork, error) { + if rc.Level != AccountRouteLevel { + return []DeviceManagedNetwork{}, ErrRequiredAccountLevelResourceContainer + } + + uri := fmt.Sprintf("/%s/%s/devices/networks", rc.Level, rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []DeviceManagedNetwork{}, err + } + + var response DeviceManagedNetworkListResponse + err = json.Unmarshal(res, &response) + if err != nil { + return []DeviceManagedNetwork{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response.Result, nil +} + +// CreateDeviceManagedNetwork creates a new Device Managed Network. +// +// API reference: https://api.cloudflare.com/#device-managed-networks-create-device-managed-network +func (api *API) CreateDeviceManagedNetwork(ctx context.Context, rc *ResourceContainer, params CreateDeviceManagedNetworkParams) (DeviceManagedNetwork, error) { + if rc.Level != AccountRouteLevel { + return DeviceManagedNetwork{}, ErrRequiredAccountLevelResourceContainer + } + + uri := fmt.Sprintf("/%s/%s/devices/networks", rc.Level, rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return DeviceManagedNetwork{}, err + } + + var deviceManagedNetworksResponse DeviceManagedNetworkResponse + if err := json.Unmarshal(res, &deviceManagedNetworksResponse); err != nil { + return DeviceManagedNetwork{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return deviceManagedNetworksResponse.Result, err +} + +// UpdateDeviceManagedNetwork Update a Device Managed Network. +// +// API reference: https://api.cloudflare.com/#device-managed-networks-update-device-managed-network +func (api *API) UpdateDeviceManagedNetwork(ctx context.Context, rc *ResourceContainer, params UpdateDeviceManagedNetworkParams) (DeviceManagedNetwork, error) { + if rc.Level != AccountRouteLevel { + return DeviceManagedNetwork{}, ErrRequiredAccountLevelResourceContainer + } + + uri := fmt.Sprintf("/%s/%s/devices/networks/%s", rc.Level, rc.Identifier, params.NetworkID) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return DeviceManagedNetwork{}, err + } + + var deviceManagedNetworksResponse DeviceManagedNetworkResponse + + if err := json.Unmarshal(res, &deviceManagedNetworksResponse); err != nil { + return DeviceManagedNetwork{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return deviceManagedNetworksResponse.Result, err +} + +// GetDeviceManagedNetwork gets a single Device Managed Network. +// +// API reference: https://api.cloudflare.com/#device-managed-networks-device-managed-network-details +func (api *API) GetDeviceManagedNetwork(ctx context.Context, rc *ResourceContainer, networkID string) (DeviceManagedNetwork, error) { + if rc.Level != AccountRouteLevel { + return DeviceManagedNetwork{}, ErrRequiredAccountLevelResourceContainer + } + + uri := fmt.Sprintf("/%s/%s/devices/networks/%s", rc.Level, rc.Identifier, networkID) + + deviceManagedNetworksResponse := DeviceManagedNetworkResponse{} + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return DeviceManagedNetwork{}, err + } + + if err := json.Unmarshal(res, &deviceManagedNetworksResponse); err != nil { + return DeviceManagedNetwork{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return deviceManagedNetworksResponse.Result, err +} + +// DeleteManagedNetworks deletes a Device Managed Network. +// +// API reference: https://api.cloudflare.com/#device-managed-networks-delete-device-managed-network +func (api *API) DeleteManagedNetworks(ctx context.Context, rc *ResourceContainer, networkID string) ([]DeviceManagedNetwork, error) { + if rc.Level != AccountRouteLevel { + return []DeviceManagedNetwork{}, ErrRequiredAccountLevelResourceContainer + } + + uri := fmt.Sprintf("/%s/%s/devices/networks/%s", rc.Level, rc.Identifier, networkID) + + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return []DeviceManagedNetwork{}, err + } + + var response DeviceManagedNetworkListResponse + if err := json.Unmarshal(res, &response); err != nil { + return []DeviceManagedNetwork{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response.Result, err +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/devices_policy.go b/vendor/github.com/cloudflare/cloudflare-go/devices_policy.go new file mode 100644 index 0000000..0da365d --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/devices_policy.go @@ -0,0 +1,381 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +type Enabled struct { + Enabled bool `json:"enabled"` +} + +// DeviceClientCertificates identifies if the zero trust zone is configured for an account. +type DeviceClientCertificates struct { + Response + Result Enabled +} + +type ServiceMode string + +const ( + oneDotOne ServiceMode = "1dot1" + warp ServiceMode = "warp" + proxy ServiceMode = "proxy" + postureOnly ServiceMode = "posture_only" + warpTunnelOnly ServiceMode = "warp_tunnel_only" + + listDeviceSettingsPoliciesDefaultPageSize = 20 +) + +type ServiceModeV2 struct { + Mode ServiceMode `json:"mode,omitempty"` + Port int `json:"port,omitempty"` +} + +type DeviceSettingsPolicy struct { + ServiceModeV2 *ServiceModeV2 `json:"service_mode_v2"` + DisableAutoFallback *bool `json:"disable_auto_fallback"` + FallbackDomains *[]FallbackDomain `json:"fallback_domains"` + Include *[]SplitTunnel `json:"include"` + Exclude *[]SplitTunnel `json:"exclude"` + GatewayUniqueID *string `json:"gateway_unique_id"` + SupportURL *string `json:"support_url"` + CaptivePortal *int `json:"captive_portal"` + AllowModeSwitch *bool `json:"allow_mode_switch"` + SwitchLocked *bool `json:"switch_locked"` + AllowUpdates *bool `json:"allow_updates"` + AutoConnect *int `json:"auto_connect"` + AllowedToLeave *bool `json:"allowed_to_leave"` + PolicyID *string `json:"policy_id"` + Enabled *bool `json:"enabled"` + Name *string `json:"name"` + Match *string `json:"match"` + Precedence *int `json:"precedence"` + Default bool `json:"default"` + ExcludeOfficeIps *bool `json:"exclude_office_ips"` + Description *string `json:"description"` + LANAllowMinutes *uint `json:"lan_allow_minutes"` + LANAllowSubnetSize *uint `json:"lan_allow_subnet_size"` + TunnelProtocol *string `json:"tunnel_protocol"` +} + +type DeviceSettingsPolicyResponse struct { + Response + Result DeviceSettingsPolicy +} + +type DeleteDeviceSettingsPolicyResponse struct { + Response + Result []DeviceSettingsPolicy +} + +type CreateDeviceSettingsPolicyParams struct { + DisableAutoFallback *bool `json:"disable_auto_fallback,omitempty"` + CaptivePortal *int `json:"captive_portal,omitempty"` + AllowModeSwitch *bool `json:"allow_mode_switch,omitempty"` + SwitchLocked *bool `json:"switch_locked,omitempty"` + AllowUpdates *bool `json:"allow_updates,omitempty"` + AutoConnect *int `json:"auto_connect,omitempty"` + AllowedToLeave *bool `json:"allowed_to_leave,omitempty"` + SupportURL *string `json:"support_url,omitempty"` + ServiceModeV2 *ServiceModeV2 `json:"service_mode_v2,omitempty"` + Precedence *int `json:"precedence,omitempty"` + Name *string `json:"name,omitempty"` + Match *string `json:"match,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + ExcludeOfficeIps *bool `json:"exclude_office_ips"` + Description *string `json:"description,omitempty"` + LANAllowMinutes *uint `json:"lan_allow_minutes,omitempty"` + LANAllowSubnetSize *uint `json:"lan_allow_subnet_size,omitempty"` + TunnelProtocol *string `json:"tunnel_protocol,omitempty"` +} + +type UpdateDefaultDeviceSettingsPolicyParams struct { + DisableAutoFallback *bool `json:"disable_auto_fallback,omitempty"` + CaptivePortal *int `json:"captive_portal,omitempty"` + AllowModeSwitch *bool `json:"allow_mode_switch,omitempty"` + SwitchLocked *bool `json:"switch_locked,omitempty"` + AllowUpdates *bool `json:"allow_updates,omitempty"` + AutoConnect *int `json:"auto_connect,omitempty"` + AllowedToLeave *bool `json:"allowed_to_leave,omitempty"` + SupportURL *string `json:"support_url,omitempty"` + ServiceModeV2 *ServiceModeV2 `json:"service_mode_v2,omitempty"` + Precedence *int `json:"precedence,omitempty"` + Name *string `json:"name,omitempty"` + Match *string `json:"match,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + ExcludeOfficeIps *bool `json:"exclude_office_ips"` + Description *string `json:"description,omitempty"` + LANAllowMinutes *uint `json:"lan_allow_minutes,omitempty"` + LANAllowSubnetSize *uint `json:"lan_allow_subnet_size,omitempty"` + TunnelProtocol *string `json:"tunnel_protocol,omitempty"` +} + +type UpdateDeviceSettingsPolicyParams struct { + PolicyID *string `json:"-"` + DisableAutoFallback *bool `json:"disable_auto_fallback,omitempty"` + CaptivePortal *int `json:"captive_portal,omitempty"` + AllowModeSwitch *bool `json:"allow_mode_switch,omitempty"` + SwitchLocked *bool `json:"switch_locked,omitempty"` + AllowUpdates *bool `json:"allow_updates,omitempty"` + AutoConnect *int `json:"auto_connect,omitempty"` + AllowedToLeave *bool `json:"allowed_to_leave,omitempty"` + SupportURL *string `json:"support_url,omitempty"` + ServiceModeV2 *ServiceModeV2 `json:"service_mode_v2,omitempty"` + Precedence *int `json:"precedence,omitempty"` + Name *string `json:"name,omitempty"` + Match *string `json:"match,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + ExcludeOfficeIps *bool `json:"exclude_office_ips"` + Description *string `json:"description,omitempty"` + LANAllowMinutes *uint `json:"lan_allow_minutes,omitempty"` + LANAllowSubnetSize *uint `json:"lan_allow_subnet_size,omitempty"` + TunnelProtocol *string `json:"tunnel_protocol,omitempty"` +} + +type ListDeviceSettingsPoliciesResponse struct { + Response + ResultInfo ResultInfo `json:"result_info"` + Result []DeviceSettingsPolicy `json:"result"` +} + +type UpdateDeviceClientCertificatesParams struct { + Enabled *bool `json:"enabled"` +} + +type GetDeviceClientCertificatesParams struct{} + +type GetDefaultDeviceSettingsPolicyParams struct{} + +type GetDeviceSettingsPolicyParams struct { + PolicyID *string `json:"-"` +} + +// UpdateDeviceClientCertificates controls the zero trust zone used to provision client certificates. +// +// API reference: https://api.cloudflare.com/#device-client-certificates +func (api *API) UpdateDeviceClientCertificates(ctx context.Context, rc *ResourceContainer, params UpdateDeviceClientCertificatesParams) (DeviceClientCertificates, error) { + if rc.Level != ZoneRouteLevel { + return DeviceClientCertificates{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + uri := fmt.Sprintf("/%s/%s/devices/policy/certificates", rc.Level, rc.Identifier) + + result := DeviceClientCertificates{Result: Enabled{false}} + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, params) + if err != nil { + return result, err + } + + if err := json.Unmarshal(res, &result); err != nil { + return result, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result, err +} + +// GetDeviceClientCertificates controls the zero trust zone used to provision +// client certificates. +// +// API reference: https://api.cloudflare.com/#device-client-certificates +func (api *API) GetDeviceClientCertificates(ctx context.Context, rc *ResourceContainer, params GetDeviceClientCertificatesParams) (DeviceClientCertificates, error) { + if rc.Level != ZoneRouteLevel { + return DeviceClientCertificates{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + uri := fmt.Sprintf("/%s/%s/devices/policy/certificates", rc.Level, rc.Identifier) + + result := DeviceClientCertificates{} + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return result, err + } + + if err := json.Unmarshal(res, &result); err != nil { + return result, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result, err +} + +// CreateDeviceSettingsPolicy creates a settings policy against devices that +// match the policy. +// +// API reference: https://api.cloudflare.com/#devices-create-device-settings-policy +func (api *API) CreateDeviceSettingsPolicy(ctx context.Context, rc *ResourceContainer, params CreateDeviceSettingsPolicyParams) (DeviceSettingsPolicy, error) { + if rc.Level != AccountRouteLevel { + return DeviceSettingsPolicy{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + uri := fmt.Sprintf("/%s/%s/devices/policy", rc.Level, rc.Identifier) + + result := DeviceSettingsPolicyResponse{} + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return DeviceSettingsPolicy{}, err + } + + if err := json.Unmarshal(res, &result); err != nil { + return DeviceSettingsPolicy{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, err +} + +// UpdateDefaultDeviceSettingsPolicy updates the default settings policy for an account +// +// API reference: https://api.cloudflare.com/#devices-update-default-device-settings-policy +func (api *API) UpdateDefaultDeviceSettingsPolicy(ctx context.Context, rc *ResourceContainer, params UpdateDefaultDeviceSettingsPolicyParams) (DeviceSettingsPolicy, error) { + if rc.Level != AccountRouteLevel { + return DeviceSettingsPolicy{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + result := DeviceSettingsPolicyResponse{} + uri := fmt.Sprintf("/%s/%s/devices/policy", rc.Level, rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, params) + if err != nil { + return DeviceSettingsPolicy{}, err + } + + if err := json.Unmarshal(res, &result); err != nil { + return DeviceSettingsPolicy{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, err +} + +// UpdateDeviceSettingsPolicy updates a settings policy +// +// API reference: https://api.cloudflare.com/#devices-update-device-settings-policy +func (api *API) UpdateDeviceSettingsPolicy(ctx context.Context, rc *ResourceContainer, params UpdateDeviceSettingsPolicyParams) (DeviceSettingsPolicy, error) { + if rc.Level != AccountRouteLevel { + return DeviceSettingsPolicy{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + uri := fmt.Sprintf("/%s/%s/devices/policy/%s", rc.Level, rc.Identifier, *params.PolicyID) + + result := DeviceSettingsPolicyResponse{} + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, params) + if err != nil { + return DeviceSettingsPolicy{}, err + } + + if err := json.Unmarshal(res, &result); err != nil { + return DeviceSettingsPolicy{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, err +} + +// DeleteDeviceSettingsPolicy deletes a settings policy and returns a list +// of all of the other policies in the account. +// +// API reference: https://api.cloudflare.com/#devices-delete-device-settings-policy +func (api *API) DeleteDeviceSettingsPolicy(ctx context.Context, rc *ResourceContainer, policyID string) ([]DeviceSettingsPolicy, error) { + if rc.Level != AccountRouteLevel { + return []DeviceSettingsPolicy{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + uri := fmt.Sprintf("/%s/%s/devices/policy/%s", rc.Level, rc.Identifier, policyID) + + result := DeleteDeviceSettingsPolicyResponse{} + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return []DeviceSettingsPolicy{}, err + } + + if err := json.Unmarshal(res, &result); err != nil { + return []DeviceSettingsPolicy{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, err +} + +// GetDefaultDeviceSettings gets the default device settings policy. +// +// API reference: https://api.cloudflare.com/#devices-get-default-device-settings-policy +func (api *API) GetDefaultDeviceSettingsPolicy(ctx context.Context, rc *ResourceContainer, params GetDefaultDeviceSettingsPolicyParams) (DeviceSettingsPolicy, error) { + if rc.Level != AccountRouteLevel { + return DeviceSettingsPolicy{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + uri := fmt.Sprintf("/%s/%s/devices/policy", rc.Level, rc.Identifier) + + result := DeviceSettingsPolicyResponse{} + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return DeviceSettingsPolicy{}, err + } + + if err := json.Unmarshal(res, &result); err != nil { + return DeviceSettingsPolicy{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, err +} + +// GetDefaultDeviceSettings gets the device settings policy by its policyID. +// +// API reference: https://api.cloudflare.com/#devices-get-device-settings-policy-by-id +func (api *API) GetDeviceSettingsPolicy(ctx context.Context, rc *ResourceContainer, params GetDeviceSettingsPolicyParams) (DeviceSettingsPolicy, error) { + if rc.Level != AccountRouteLevel { + return DeviceSettingsPolicy{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + uri := fmt.Sprintf("/%s/%s/devices/policy/%s", rc.Level, rc.Identifier, *params.PolicyID) + + result := DeviceSettingsPolicyResponse{} + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return DeviceSettingsPolicy{}, err + } + + if err := json.Unmarshal(res, &result); err != nil { + return DeviceSettingsPolicy{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, err +} + +type ListDeviceSettingsPoliciesParams struct { + ResultInfo +} + +// ListDeviceSettingsPolicies returns all device settings policies for an account +// +// API reference: https://api.cloudflare.com/#devices-list-device-settings-policies +func (api *API) ListDeviceSettingsPolicies(ctx context.Context, rc *ResourceContainer, params ListDeviceSettingsPoliciesParams) ([]DeviceSettingsPolicy, *ResultInfo, error) { + autoPaginate := true + if params.PerPage >= 1 || params.Page >= 1 { + autoPaginate = false + } + + if params.PerPage < 1 { + params.PerPage = listDeviceSettingsPoliciesDefaultPageSize + } + + var policies []DeviceSettingsPolicy + var lastResultInfo ResultInfo + for { + uri := buildURI(fmt.Sprintf("/%s/%s/devices/policies", rc.Level, rc.Identifier), params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, nil, err + } + var r ListDeviceSettingsPoliciesResponse + err = json.Unmarshal(res, &r) + if err != nil { + return nil, nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + policies = append(policies, r.Result...) + lastResultInfo = r.ResultInfo + params.ResultInfo = r.ResultInfo.Next() + if params.ResultInfo.Done() || !autoPaginate { + break + } + } + return policies, &lastResultInfo, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/diagnostics.go b/vendor/github.com/cloudflare/cloudflare-go/diagnostics.go new file mode 100644 index 0000000..4eb8e71 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/diagnostics.go @@ -0,0 +1,102 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +// DiagnosticsTracerouteConfiguration is the overarching structure of the +// diagnostics traceroute requests. +type DiagnosticsTracerouteConfiguration struct { + Targets []string `json:"targets"` + Colos []string `json:"colos,omitempty"` + Options DiagnosticsTracerouteConfigurationOptions `json:"options,omitempty"` +} + +// DiagnosticsTracerouteConfigurationOptions contains the options for performing +// traceroutes. +type DiagnosticsTracerouteConfigurationOptions struct { + PacketsPerTTL int `json:"packets_per_ttl"` + PacketType string `json:"packet_type"` + MaxTTL int `json:"max_ttl"` + WaitTime int `json:"wait_time"` +} + +// DiagnosticsTracerouteResponse is the outer response of the API response. +type DiagnosticsTracerouteResponse struct { + Response + Result []DiagnosticsTracerouteResponseResult `json:"result"` +} + +// DiagnosticsTracerouteResponseResult is the inner API response for the +// traceroute request. +type DiagnosticsTracerouteResponseResult struct { + Target string `json:"target"` + Colos []DiagnosticsTracerouteResponseColos `json:"colos"` +} + +// DiagnosticsTracerouteResponseColo contains the Name and City of a colocation. +type DiagnosticsTracerouteResponseColo struct { + Name string `json:"name"` + City string `json:"city"` +} + +// DiagnosticsTracerouteResponseNodes holds a summary of nodes contacted in the +// traceroute. +type DiagnosticsTracerouteResponseNodes struct { + Asn string `json:"asn"` + IP string `json:"ip"` + Name string `json:"name"` + PacketCount int `json:"packet_count"` + MeanRttMs float64 `json:"mean_rtt_ms"` + StdDevRttMs float64 `json:"std_dev_rtt_ms"` + MinRttMs float64 `json:"min_rtt_ms"` + MaxRttMs float64 `json:"max_rtt_ms"` +} + +// DiagnosticsTracerouteResponseHops holds packet and node information of the +// hops. +type DiagnosticsTracerouteResponseHops struct { + PacketsTTL int `json:"packets_ttl"` + PacketsSent int `json:"packets_sent"` + PacketsLost int `json:"packets_lost"` + Nodes []DiagnosticsTracerouteResponseNodes `json:"nodes"` +} + +// DiagnosticsTracerouteResponseColos is the summary struct of a colocation test. +type DiagnosticsTracerouteResponseColos struct { + Error string `json:"error"` + Colo DiagnosticsTracerouteResponseColo `json:"colo"` + TracerouteTimeMs int `json:"traceroute_time_ms"` + TargetSummary DiagnosticsTracerouteResponseNodes `json:"target_summary"` + Hops []DiagnosticsTracerouteResponseHops `json:"hops"` +} + +// PerformTraceroute initiates a traceroute from the Cloudflare network to the +// requested targets. +// +// API documentation: https://api.cloudflare.com/#diagnostics-traceroute +func (api *API) PerformTraceroute(ctx context.Context, accountID string, targets, colos []string, tracerouteOptions DiagnosticsTracerouteConfigurationOptions) ([]DiagnosticsTracerouteResponseResult, error) { + uri := fmt.Sprintf("/accounts/%s/diagnostics/traceroute", accountID) + diagnosticsPayload := DiagnosticsTracerouteConfiguration{ + Targets: targets, + Colos: colos, + Options: tracerouteOptions, + } + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, diagnosticsPayload) + if err != nil { + return []DiagnosticsTracerouteResponseResult{}, err + } + + var diagnosticsResponse DiagnosticsTracerouteResponse + err = json.Unmarshal(res, &diagnosticsResponse) + if err != nil { + return []DiagnosticsTracerouteResponseResult{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return diagnosticsResponse.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/dlp_dataset.go b/vendor/github.com/cloudflare/cloudflare-go/dlp_dataset.go new file mode 100644 index 0000000..09cf864 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/dlp_dataset.go @@ -0,0 +1,278 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +var ( + ErrMissingDatasetID = errors.New("missing required dataset ID") +) + +// DLPDatasetUpload represents a single upload version attached to a DLP dataset. +type DLPDatasetUpload struct { + NumCells int `json:"num_cells"` + Status string `json:"status,omitempty"` + Version int `json:"version"` +} + +// DLPDataset represents a DLP Exact Data Match dataset or Custom Word List. +type DLPDataset struct { + CreatedAt *time.Time `json:"created_at,omitempty"` + Description string `json:"description,omitempty"` + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + NumCells int `json:"num_cells"` + Secret *bool `json:"secret,omitempty"` + Status string `json:"status,omitempty"` + UpdatedAt *time.Time `json:"updated_at,omitempty"` + Uploads []DLPDatasetUpload `json:"uploads"` +} + +type ListDLPDatasetsParams struct{} + +type DLPDatasetListResponse struct { + Result []DLPDataset `json:"result"` + Response +} + +// ListDLPDatasets returns all the DLP datasets associated with an account. +// +// API reference: https://developers.cloudflare.com/api/operations/dlp-datasets-read-all +func (api *API) ListDLPDatasets(ctx context.Context, rc *ResourceContainer, params ListDLPDatasetsParams) ([]DLPDataset, error) { + if rc.Identifier == "" { + return nil, nil + } + + uri := buildURI(fmt.Sprintf("/%s/%s/dlp/datasets", rc.Level, rc.Identifier), nil) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + + var dlpDatasetListResponse DLPDatasetListResponse + err = json.Unmarshal(res, &dlpDatasetListResponse) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return dlpDatasetListResponse.Result, nil +} + +type DLPDatasetGetResponse struct { + Result DLPDataset `json:"result"` + Response +} + +// GetDLPDataset returns a DLP dataset based on the dataset ID. +// +// API reference: https://developers.cloudflare.com/api/operations/dlp-datasets-read +func (api *API) GetDLPDataset(ctx context.Context, rc *ResourceContainer, datasetID string) (DLPDataset, error) { + if rc.Identifier == "" { + return DLPDataset{}, nil + } + + if datasetID == "" { + return DLPDataset{}, ErrMissingDatasetID + } + + uri := buildURI(fmt.Sprintf("/%s/%s/dlp/datasets/%s", rc.Level, rc.Identifier, datasetID), nil) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return DLPDataset{}, err + } + + var dlpDatasetGetResponse DLPDatasetGetResponse + err = json.Unmarshal(res, &dlpDatasetGetResponse) + if err != nil { + return DLPDataset{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return dlpDatasetGetResponse.Result, nil +} + +type CreateDLPDatasetParams struct { + Description string `json:"description,omitempty"` + Name string `json:"name"` + Secret *bool `json:"secret,omitempty"` +} + +type CreateDLPDatasetResult struct { + MaxCells int `json:"max_cells"` + Secret string `json:"secret"` + Version int `json:"version"` + Dataset DLPDataset `json:"dataset"` +} + +type CreateDLPDatasetResponse struct { + Result CreateDLPDatasetResult `json:"result"` + Response +} + +// CreateDLPDataset creates a DLP dataset. +// +// API reference: https://developers.cloudflare.com/api/operations/dlp-datasets-create +func (api *API) CreateDLPDataset(ctx context.Context, rc *ResourceContainer, params CreateDLPDatasetParams) (CreateDLPDatasetResult, error) { + if rc.Identifier == "" { + return CreateDLPDatasetResult{}, nil + } + + uri := buildURI(fmt.Sprintf("/%s/%s/dlp/datasets", rc.Level, rc.Identifier), nil) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return CreateDLPDatasetResult{}, err + } + + var CreateDLPDatasetResponse CreateDLPDatasetResponse + err = json.Unmarshal(res, &CreateDLPDatasetResponse) + if err != nil { + return CreateDLPDatasetResult{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return CreateDLPDatasetResponse.Result, nil +} + +// DeleteDLPDataset deletes a DLP dataset. +// +// API reference: https://developers.cloudflare.com/api/operations/dlp-datasets-delete +func (api *API) DeleteDLPDataset(ctx context.Context, rc *ResourceContainer, datasetID string) error { + if rc.Identifier == "" { + return ErrMissingResourceIdentifier + } + + if datasetID == "" { + return ErrMissingDatasetID + } + + uri := buildURI(fmt.Sprintf("/%s/%s/dlp/datasets/%s", rc.Level, rc.Identifier, datasetID), nil) + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + return err +} + +type UpdateDLPDatasetParams struct { + DatasetID string + Description *string `json:"description,omitempty"` // nil to leave descrption as-is + Name *string `json:"name,omitempty"` // nil to leave name as-is +} + +type UpdateDLPDatasetResponse struct { + Result DLPDataset `json:"result"` + Response +} + +// UpdateDLPDataset updates the details of a DLP dataset. +// +// API reference: https://developers.cloudflare.com/api/operations/dlp-datasets-update +func (api *API) UpdateDLPDataset(ctx context.Context, rc *ResourceContainer, params UpdateDLPDatasetParams) (DLPDataset, error) { + if rc.Identifier == "" { + return DLPDataset{}, nil + } + + if params.DatasetID == "" { + return DLPDataset{}, ErrMissingDatasetID + } + + uri := buildURI(fmt.Sprintf("/%s/%s/dlp/datasets/%s", rc.Level, rc.Identifier, params.DatasetID), nil) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return DLPDataset{}, err + } + + var updateDLPDatasetResponse UpdateDLPDatasetResponse + err = json.Unmarshal(res, &updateDLPDatasetResponse) + if err != nil { + return DLPDataset{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return updateDLPDatasetResponse.Result, nil +} + +type CreateDLPDatasetUploadResult struct { + MaxCells int `json:"max_cells"` + Secret string `json:"secret"` + Version int `json:"version"` +} + +type CreateDLPDatasetUploadResponse struct { + Result CreateDLPDatasetUploadResult `json:"result"` + Response +} +type CreateDLPDatasetUploadParams struct { + DatasetID string +} + +// CreateDLPDatasetUpload creates a new upload version for the specified DLP dataset. +// +// API reference: https://developers.cloudflare.com/api/operations/dlp-datasets-create-version +func (api *API) CreateDLPDatasetUpload(ctx context.Context, rc *ResourceContainer, params CreateDLPDatasetUploadParams) (CreateDLPDatasetUploadResult, error) { + if rc.Identifier == "" { + return CreateDLPDatasetUploadResult{}, nil + } + + if params.DatasetID == "" { + return CreateDLPDatasetUploadResult{}, ErrMissingDatasetID + } + + uri := buildURI(fmt.Sprintf("/%s/%s/dlp/datasets/%s/upload", rc.Level, rc.Identifier, params.DatasetID), nil) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, nil) + if err != nil { + return CreateDLPDatasetUploadResult{}, err + } + + var dlpDatasetCreateUploadResponse CreateDLPDatasetUploadResponse + err = json.Unmarshal(res, &dlpDatasetCreateUploadResponse) + if err != nil { + return CreateDLPDatasetUploadResult{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return dlpDatasetCreateUploadResponse.Result, nil +} + +type UploadDLPDatasetVersionParams struct { + DatasetID string + Version int + Body interface{} +} + +type UploadDLPDatasetVersionResponse struct { + Result DLPDataset `json:"result"` + Response +} + +// UploadDLPDatasetVersion uploads a new version of the specified DLP dataset. +// +// API reference: https://developers.cloudflare.com/api/operations/dlp-datasets-upload-version +func (api *API) UploadDLPDatasetVersion(ctx context.Context, rc *ResourceContainer, params UploadDLPDatasetVersionParams) (DLPDataset, error) { + if rc.Identifier == "" { + return DLPDataset{}, nil + } + + if params.DatasetID == "" { + return DLPDataset{}, ErrMissingDatasetID + } + + uri := buildURI(fmt.Sprintf("/%s/%s/dlp/datasets/%s/upload/%d", rc.Level, rc.Identifier, params.DatasetID, params.Version), nil) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params.Body) + if err != nil { + return DLPDataset{}, err + } + + var dlpDatasetUploadVersionResponse UploadDLPDatasetVersionResponse + err = json.Unmarshal(res, &dlpDatasetUploadVersionResponse) + if err != nil { + return DLPDataset{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return dlpDatasetUploadVersionResponse.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/dlp_payload_log.go b/vendor/github.com/cloudflare/cloudflare-go/dlp_payload_log.go new file mode 100644 index 0000000..80123b4 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/dlp_payload_log.go @@ -0,0 +1,72 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +type DLPPayloadLogSettings struct { + PublicKey string `json:"public_key,omitempty"` + + // Only present in responses + UpdatedAt *time.Time `json:"updated_at,omitempty"` +} + +type GetDLPPayloadLogSettingsParams struct{} + +type DLPPayloadLogSettingsResponse struct { + Response + Result DLPPayloadLogSettings `json:"result"` +} + +// GetDLPPayloadLogSettings gets the current DLP payload logging settings. +// +// API reference: https://api.cloudflare.com/#dlp-payload-log-settings-get-settings +func (api *API) GetDLPPayloadLogSettings(ctx context.Context, rc *ResourceContainer, params GetDLPPayloadLogSettingsParams) (DLPPayloadLogSettings, error) { + if rc.Identifier == "" { + return DLPPayloadLogSettings{}, ErrMissingResourceIdentifier + } + + uri := buildURI(fmt.Sprintf("/%s/%s/dlp/payload_log", rc.Level, rc.Identifier), nil) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return DLPPayloadLogSettings{}, err + } + + var dlpPayloadLogSettingsResponse DLPPayloadLogSettingsResponse + err = json.Unmarshal(res, &dlpPayloadLogSettingsResponse) + if err != nil { + return DLPPayloadLogSettings{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return dlpPayloadLogSettingsResponse.Result, nil +} + +// UpdateDLPPayloadLogSettings sets the current DLP payload logging settings to new values. +// +// API reference: https://api.cloudflare.com/#dlp-payload-log-settings-update-settings +func (api *API) UpdateDLPPayloadLogSettings(ctx context.Context, rc *ResourceContainer, settings DLPPayloadLogSettings) (DLPPayloadLogSettings, error) { + if rc.Identifier == "" { + return DLPPayloadLogSettings{}, ErrMissingResourceIdentifier + } + + uri := buildURI(fmt.Sprintf("/%s/%s/dlp/payload_log", rc.Level, rc.Identifier), nil) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, settings) + if err != nil { + return DLPPayloadLogSettings{}, err + } + + var dlpPayloadLogSettingsResponse DLPPayloadLogSettingsResponse + err = json.Unmarshal(res, &dlpPayloadLogSettingsResponse) + if err != nil { + return DLPPayloadLogSettings{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return dlpPayloadLogSettingsResponse.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/dlp_profile.go b/vendor/github.com/cloudflare/cloudflare-go/dlp_profile.go new file mode 100644 index 0000000..25e78e9 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/dlp_profile.go @@ -0,0 +1,234 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +var ( + ErrMissingProfileID = errors.New("missing required profile ID") +) + +// DLPPattern represents a DLP Pattern that matches an entry. +type DLPPattern struct { + Regex string `json:"regex,omitempty"` + Validation string `json:"validation,omitempty"` +} + +// DLPEntry represents a DLP Entry, which can be matched in HTTP bodies or files. +type DLPEntry struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + ProfileID string `json:"profile_id,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + Type string `json:"type,omitempty"` + + // The following fields are only present for custom entries. + + Pattern *DLPPattern `json:"pattern,omitempty"` + CreatedAt *time.Time `json:"created_at,omitempty"` + UpdatedAt *time.Time `json:"updated_at,omitempty"` +} + +// Content types to exclude from context analysis and return all matches. +type DLPContextAwarenessSkip struct { + // Return all matches, regardless of context analysis result, if the data is a file. + Files *bool `json:"files,omitempty"` +} + +// Scan the context of predefined entries to only return matches surrounded by keywords. +type DLPContextAwareness struct { + Enabled *bool `json:"enabled,omitempty"` + Skip DLPContextAwarenessSkip `json:"skip"` +} + +// DLPProfile represents a DLP Profile, which contains a set +// of entries. +type DLPProfile struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + Description string `json:"description,omitempty"` + AllowedMatchCount int `json:"allowed_match_count"` + OCREnabled *bool `json:"ocr_enabled,omitempty"` + + ContextAwareness *DLPContextAwareness `json:"context_awareness,omitempty"` + + // The following fields are omitted for predefined DLP + // profiles. + Entries []DLPEntry `json:"entries,omitempty"` + CreatedAt *time.Time `json:"created_at,omitempty"` + UpdatedAt *time.Time `json:"updated_at,omitempty"` +} + +// DLPProfilesCreateRequest represents a request to create a +// set of profiles. +type DLPProfilesCreateRequest struct { + Profiles []DLPProfile `json:"profiles"` +} + +// DLPProfileListResponse represents the response from the list +// dlp profiles endpoint. +type DLPProfileListResponse struct { + Result []DLPProfile `json:"result"` + Response +} + +// DLPProfileResponse is the API response, containing a single +// access application. +type DLPProfileResponse struct { + Success bool `json:"success"` + Errors []string `json:"errors"` + Messages []string `json:"messages"` + Result DLPProfile `json:"result"` +} + +type ListDLPProfilesParams struct{} + +type CreateDLPProfilesParams struct { + Profiles []DLPProfile `json:"profiles"` + Type string +} + +type UpdateDLPProfileParams struct { + ProfileID string + Profile DLPProfile + Type string +} + +// ListDLPProfiles returns all DLP profiles within an account. +// +// API reference: https://api.cloudflare.com/#dlp-profiles-list-all-profiles +func (api *API) ListDLPProfiles(ctx context.Context, rc *ResourceContainer, params ListDLPProfilesParams) ([]DLPProfile, error) { + if rc.Identifier == "" { + return []DLPProfile{}, ErrMissingResourceIdentifier + } + + uri := buildURI(fmt.Sprintf("/%s/%s/dlp/profiles", rc.Level, rc.Identifier), nil) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []DLPProfile{}, err + } + + var dlpProfilesListResponse DLPProfileListResponse + err = json.Unmarshal(res, &dlpProfilesListResponse) + if err != nil { + return []DLPProfile{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return dlpProfilesListResponse.Result, nil +} + +// GetDLPProfile returns a single DLP profile (custom or predefined) based on +// the profile ID. +// +// API reference: https://api.cloudflare.com/#dlp-profiles-get-dlp-profile +func (api *API) GetDLPProfile(ctx context.Context, rc *ResourceContainer, profileID string) (DLPProfile, error) { + if rc.Identifier == "" { + return DLPProfile{}, ErrMissingResourceIdentifier + } + + if profileID == "" { + return DLPProfile{}, ErrMissingProfileID + } + + uri := buildURI(fmt.Sprintf("/%s/%s/dlp/profiles/%s", rc.Level, rc.Identifier, profileID), nil) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return DLPProfile{}, err + } + + var dlpProfileResponse DLPProfileResponse + err = json.Unmarshal(res, &dlpProfileResponse) + if err != nil { + return DLPProfile{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return dlpProfileResponse.Result, nil +} + +// CreateDLPProfiles creates a set of DLP Profile. +// +// API reference: https://api.cloudflare.com/#dlp-profiles-create-custom-profiles +func (api *API) CreateDLPProfiles(ctx context.Context, rc *ResourceContainer, params CreateDLPProfilesParams) ([]DLPProfile, error) { + if rc.Identifier == "" { + return []DLPProfile{}, ErrMissingResourceIdentifier + } + + if params.Type == "" || params.Type != "custom" { + return []DLPProfile{}, fmt.Errorf("unsupported DLP profile type: %q", params.Type) + } + + uri := buildURI(fmt.Sprintf("/%s/%s/dlp/profiles/%s", rc.Level, rc.Identifier, params.Type), nil) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return []DLPProfile{}, err + } + + var dLPCustomProfilesResponse DLPProfileListResponse + err = json.Unmarshal(res, &dLPCustomProfilesResponse) + if err != nil { + return []DLPProfile{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return dLPCustomProfilesResponse.Result, nil +} + +// DeleteDLPProfile deletes a DLP profile. Only custom profiles can be deleted. +// +// API reference: https://api.cloudflare.com/#dlp-profiles-delete-custom-profile +func (api *API) DeleteDLPProfile(ctx context.Context, rc *ResourceContainer, profileID string) error { + if rc.Identifier == "" { + return ErrMissingResourceIdentifier + } + + if profileID == "" { + return ErrMissingProfileID + } + + uri := buildURI(fmt.Sprintf("/%s/%s/dlp/profiles/custom/%s", rc.Level, rc.Identifier, profileID), nil) + + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + return err +} + +// UpdateDLPProfile updates a DLP profile. +// +// API reference: https://api.cloudflare.com/#dlp-profiles-update-custom-profile +// API reference: https://api.cloudflare.com/#dlp-profiles-update-predefined-profile +func (api *API) UpdateDLPProfile(ctx context.Context, rc *ResourceContainer, params UpdateDLPProfileParams) (DLPProfile, error) { + if rc.Identifier == "" { + return DLPProfile{}, ErrMissingResourceIdentifier + } + + if params.Type == "" { + params.Type = "custom" + } + + if params.ProfileID == "" { + return DLPProfile{}, ErrMissingProfileID + } + + uri := buildURI(fmt.Sprintf("/%s/%s/dlp/profiles/%s/%s", rc.Level, rc.Identifier, params.Type, params.ProfileID), nil) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params.Profile) + if err != nil { + return DLPProfile{}, err + } + + var dlpProfileResponse DLPProfileResponse + err = json.Unmarshal(res, &dlpProfileResponse) + if err != nil { + return DLPProfile{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return dlpProfileResponse.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/dns.go b/vendor/github.com/cloudflare/cloudflare-go/dns.go new file mode 100644 index 0000000..0c36efd --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/dns.go @@ -0,0 +1,419 @@ +package cloudflare + +import ( + "bytes" + "context" + "errors" + "fmt" + "net/http" + "regexp" + "strings" + "time" + + "github.com/goccy/go-json" + "golang.org/x/net/idna" +) + +// ErrMissingBINDContents is for when the BIND file contents is required but not set. +var ErrMissingBINDContents = errors.New("required BIND config contents missing") + +// DNSRecord represents a DNS record in a zone. +type DNSRecord struct { + CreatedOn time.Time `json:"created_on,omitempty"` + ModifiedOn time.Time `json:"modified_on,omitempty"` + Type string `json:"type,omitempty"` + Name string `json:"name,omitempty"` + Content string `json:"content,omitempty"` + Meta interface{} `json:"meta,omitempty"` + Data interface{} `json:"data,omitempty"` // data returned by: SRV, LOC + ID string `json:"id,omitempty"` + Priority *uint16 `json:"priority,omitempty"` + TTL int `json:"ttl,omitempty"` + Proxied *bool `json:"proxied,omitempty"` + Proxiable bool `json:"proxiable,omitempty"` + Comment string `json:"comment,omitempty"` // the server will omit the comment field when the comment is empty + Tags []string `json:"tags,omitempty"` +} + +// DNSRecordResponse represents the response from the DNS endpoint. +type DNSRecordResponse struct { + Result DNSRecord `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +type ListDirection string + +const ( + ListDirectionAsc ListDirection = "asc" + ListDirectionDesc ListDirection = "desc" +) + +type ListDNSRecordsParams struct { + Type string `url:"type,omitempty"` + Name string `url:"name,omitempty"` + Content string `url:"content,omitempty"` + Proxied *bool `url:"proxied,omitempty"` + Comment string `url:"comment,omitempty"` // currently, the server does not support searching for records with an empty comment + Tags []string `url:"tag,omitempty"` // potentially multiple `tag=` + TagMatch string `url:"tag-match,omitempty"` + Order string `url:"order,omitempty"` + Direction ListDirection `url:"direction,omitempty"` + Match string `url:"match,omitempty"` + Priority *uint16 `url:"-"` + + ResultInfo +} + +type UpdateDNSRecordParams struct { + Type string `json:"type,omitempty"` + Name string `json:"name,omitempty"` + Content string `json:"content,omitempty"` + Data interface{} `json:"data,omitempty"` // data for: SRV, LOC + ID string `json:"-"` + Priority *uint16 `json:"priority,omitempty"` + TTL int `json:"ttl,omitempty"` + Proxied *bool `json:"proxied,omitempty"` + Comment *string `json:"comment,omitempty"` // nil will keep the current comment, while StringPtr("") will empty it + Tags []string `json:"tags"` +} + +// DNSListResponse represents the response from the list DNS records endpoint. +type DNSListResponse struct { + Result []DNSRecord `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +// listDNSRecordsDefaultPageSize represents the default per_page size of the API. +var listDNSRecordsDefaultPageSize int = 100 + +// nontransitionalLookup implements the nontransitional processing as specified in +// Unicode Technical Standard 46 with almost all checkings off to maximize user freedom. +var nontransitionalLookup = idna.New( + idna.MapForLookup(), + idna.StrictDomainName(false), + idna.ValidateLabels(false), +) + +// toUTS46ASCII tries to convert IDNs (international domain names) +// from Unicode form to Punycode, using non-transitional process specified +// in UTS 46. +// +// Note: conversion errors are silently discarded and partial conversion +// results are used. +func toUTS46ASCII(name string) string { + name, _ = nontransitionalLookup.ToASCII(name) + return name +} + +// proxiedRecordsRe is the regular expression for determining if a DNS record +// is proxied or not. +var proxiedRecordsRe = regexp.MustCompile(`(?m)^.*\.\s+1\s+IN\s+CNAME.*$`) + +// proxiedRecordImportTemplate is the multipart template for importing *only* +// proxied records. See `nonProxiedRecordImportTemplate` for importing records +// that are not proxied. +var proxiedRecordImportTemplate = `--------------------------BOUNDARY +Content-Disposition: form-data; name="file"; filename="bind.txt" + +%s +--------------------------BOUNDARY +Content-Disposition: form-data; name="proxied" + +true +--------------------------BOUNDARY--` + +// nonProxiedRecordImportTemplate is the multipart template for importing DNS +// records that are not proxed. For importing proxied records, use +// `proxiedRecordImportTemplate`. +var nonProxiedRecordImportTemplate = `--------------------------BOUNDARY +Content-Disposition: form-data; name="file"; filename="bind.txt" + +%s +--------------------------BOUNDARY--` + +// sanitiseBINDFileInput accepts the BIND file as a string and removes parts +// that are not required for importing or would break the import (like SOA +// records). +func sanitiseBINDFileInput(s string) string { + // Remove SOA records. + soaRe := regexp.MustCompile(`(?m)[\r\n]+^.*IN\s+SOA.*$`) + s = soaRe.ReplaceAllString(s, "") + + // Remove all comments. + commentRe := regexp.MustCompile(`(?m)[\r\n]+^.*;;.*$`) + s = commentRe.ReplaceAllString(s, "") + + // Swap all the tabs to spaces. + r := strings.NewReplacer( + "\t", " ", + "\n\n", "\n", + ) + s = r.Replace(s) + s = strings.TrimSpace(s) + + return s +} + +// extractProxiedRecords accepts a BIND file (as a string) and returns only the +// proxied DNS records. +func extractProxiedRecords(s string) string { + proxiedOnlyRecords := proxiedRecordsRe.FindAllString(s, -1) + return strings.Join(proxiedOnlyRecords, "\n") +} + +// removeProxiedRecords accepts a BIND file (as a string) and returns the file +// contents without any proxied records included. +func removeProxiedRecords(s string) string { + return proxiedRecordsRe.ReplaceAllString(s, "") +} + +type ExportDNSRecordsParams struct{} +type ImportDNSRecordsParams struct { + BINDContents string +} + +type CreateDNSRecordParams struct { + CreatedOn time.Time `json:"created_on,omitempty" url:"created_on,omitempty"` + ModifiedOn time.Time `json:"modified_on,omitempty" url:"modified_on,omitempty"` + Type string `json:"type,omitempty" url:"type,omitempty"` + Name string `json:"name,omitempty" url:"name,omitempty"` + Content string `json:"content,omitempty" url:"content,omitempty"` + Meta interface{} `json:"meta,omitempty"` + Data interface{} `json:"data,omitempty"` // data returned by: SRV, LOC + ID string `json:"id,omitempty"` + Priority *uint16 `json:"priority,omitempty"` + TTL int `json:"ttl,omitempty"` + Proxied *bool `json:"proxied,omitempty" url:"proxied,omitempty"` + Proxiable bool `json:"proxiable,omitempty"` + Comment string `json:"comment,omitempty" url:"comment,omitempty"` // to the server, there's no difference between "no comment" and "empty comment" + Tags []string `json:"tags,omitempty"` +} + +// CreateDNSRecord creates a DNS record for the zone identifier. +// +// API reference: https://api.cloudflare.com/#dns-records-for-a-zone-create-dns-record +func (api *API) CreateDNSRecord(ctx context.Context, rc *ResourceContainer, params CreateDNSRecordParams) (DNSRecord, error) { + if rc.Identifier == "" { + return DNSRecord{}, ErrMissingZoneID + } + params.Name = toUTS46ASCII(params.Name) + + uri := fmt.Sprintf("/zones/%s/dns_records", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return DNSRecord{}, err + } + + var recordResp *DNSRecordResponse + err = json.Unmarshal(res, &recordResp) + if err != nil { + return DNSRecord{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return recordResp.Result, nil +} + +// ListDNSRecords returns a slice of DNS records for the given zone identifier. +// +// API reference: https://api.cloudflare.com/#dns-records-for-a-zone-list-dns-records +func (api *API) ListDNSRecords(ctx context.Context, rc *ResourceContainer, params ListDNSRecordsParams) ([]DNSRecord, *ResultInfo, error) { + if rc.Identifier == "" { + return nil, nil, ErrMissingZoneID + } + + params.Name = toUTS46ASCII(params.Name) + + autoPaginate := true + if params.PerPage >= 1 || params.Page >= 1 { + autoPaginate = false + } + + if params.PerPage < 1 { + params.PerPage = listDNSRecordsDefaultPageSize + } + + if params.Page < 1 { + params.Page = 1 + } + + var records []DNSRecord + var lastResultInfo ResultInfo + + for { + uri := buildURI(fmt.Sprintf("/zones/%s/dns_records", rc.Identifier), params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []DNSRecord{}, &ResultInfo{}, err + } + var listResponse DNSListResponse + err = json.Unmarshal(res, &listResponse) + if err != nil { + return []DNSRecord{}, &ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + records = append(records, listResponse.Result...) + lastResultInfo = listResponse.ResultInfo + params.ResultInfo = listResponse.ResultInfo.Next() + if params.ResultInfo.Done() || !autoPaginate { + break + } + } + return records, &lastResultInfo, nil +} + +// ErrMissingDNSRecordID is for when DNS record ID is needed but not given. +var ErrMissingDNSRecordID = errors.New("required DNS record ID missing") + +// GetDNSRecord returns a single DNS record for the given zone & record +// identifiers. +// +// API reference: https://api.cloudflare.com/#dns-records-for-a-zone-dns-record-details +func (api *API) GetDNSRecord(ctx context.Context, rc *ResourceContainer, recordID string) (DNSRecord, error) { + if rc.Identifier == "" { + return DNSRecord{}, ErrMissingZoneID + } + if recordID == "" { + return DNSRecord{}, ErrMissingDNSRecordID + } + + uri := fmt.Sprintf("/zones/%s/dns_records/%s", rc.Identifier, recordID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return DNSRecord{}, err + } + var r DNSRecordResponse + err = json.Unmarshal(res, &r) + if err != nil { + return DNSRecord{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// UpdateDNSRecord updates a single DNS record for the given zone & record +// identifiers. +// +// API reference: https://api.cloudflare.com/#dns-records-for-a-zone-update-dns-record +func (api *API) UpdateDNSRecord(ctx context.Context, rc *ResourceContainer, params UpdateDNSRecordParams) (DNSRecord, error) { + if rc.Identifier == "" { + return DNSRecord{}, ErrMissingZoneID + } + + if params.ID == "" { + return DNSRecord{}, ErrMissingDNSRecordID + } + + params.Name = toUTS46ASCII(params.Name) + + uri := fmt.Sprintf("/zones/%s/dns_records/%s", rc.Identifier, params.ID) + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, params) + if err != nil { + return DNSRecord{}, err + } + + var recordResp *DNSRecordResponse + err = json.Unmarshal(res, &recordResp) + if err != nil { + return DNSRecord{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return recordResp.Result, nil +} + +// DeleteDNSRecord deletes a single DNS record for the given zone & record +// identifiers. +// +// API reference: https://api.cloudflare.com/#dns-records-for-a-zone-delete-dns-record +func (api *API) DeleteDNSRecord(ctx context.Context, rc *ResourceContainer, recordID string) error { + if rc.Identifier == "" { + return ErrMissingZoneID + } + if recordID == "" { + return ErrMissingDNSRecordID + } + + uri := fmt.Sprintf("/zones/%s/dns_records/%s", rc.Identifier, recordID) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + var r DNSRecordResponse + err = json.Unmarshal(res, &r) + if err != nil { + return fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return nil +} + +// ExportDNSRecords returns all DNS records for a zone in the BIND format. +// +// API reference: https://developers.cloudflare.com/api/operations/dns-records-for-a-zone-export-dns-records +func (api *API) ExportDNSRecords(ctx context.Context, rc *ResourceContainer, params ExportDNSRecordsParams) (string, error) { + if rc.Level != ZoneRouteLevel { + return "", ErrRequiredZoneLevelResourceContainer + } + + if rc.Identifier == "" { + return "", ErrMissingZoneID + } + + uri := fmt.Sprintf("/zones/%s/dns_records/export", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return "", err + } + + return string(res), nil +} + +// ImportDNSRecords takes the contents of a BIND configuration file and imports +// all records at once. +// +// The current state of the API doesn't allow the proxying field to be +// automatically set on records where the TTL is 1. Instead you need to +// explicitly tell the endpoint which records are proxied in the form data. To +// achieve a simpler abstraction, we do the legwork in the method of making the +// two separate API calls (one for proxied and one for non-proxied) instead of +// making the end user know about this detail. +// +// API reference: https://developers.cloudflare.com/api/operations/dns-records-for-a-zone-import-dns-records +func (api *API) ImportDNSRecords(ctx context.Context, rc *ResourceContainer, params ImportDNSRecordsParams) error { + if rc.Level != ZoneRouteLevel { + return ErrRequiredZoneLevelResourceContainer + } + + if rc.Identifier == "" { + return ErrMissingZoneID + } + + if params.BINDContents == "" { + return ErrMissingBINDContents + } + + sanitisedBindData := sanitiseBINDFileInput(params.BINDContents) + nonProxiedRecords := removeProxiedRecords(sanitisedBindData) + proxiedOnlyRecords := extractProxiedRecords(sanitisedBindData) + + nonProxiedRecordPayload := []byte(fmt.Sprintf(nonProxiedRecordImportTemplate, nonProxiedRecords)) + nonProxiedReqBody := bytes.NewReader(nonProxiedRecordPayload) + + uri := fmt.Sprintf("/zones/%s/dns_records/import", rc.Identifier) + multipartUploadHeaders := http.Header{ + "Content-Type": {"multipart/form-data; boundary=------------------------BOUNDARY"}, + } + + _, err := api.makeRequestContextWithHeaders(ctx, http.MethodPost, uri, nonProxiedReqBody, multipartUploadHeaders) + if err != nil { + return err + } + + proxiedRecordPayload := []byte(fmt.Sprintf(proxiedRecordImportTemplate, proxiedOnlyRecords)) + proxiedReqBody := bytes.NewReader(proxiedRecordPayload) + + _, err = api.makeRequestContextWithHeaders(ctx, http.MethodPost, uri, proxiedReqBody, multipartUploadHeaders) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/dns_firewall.go b/vendor/github.com/cloudflare/cloudflare-go/dns_firewall.go new file mode 100644 index 0000000..090b24c --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/dns_firewall.go @@ -0,0 +1,220 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +var ErrMissingClusterID = errors.New("missing required cluster ID") + +// DNSFirewallCluster represents a DNS Firewall configuration. +type DNSFirewallCluster struct { + ID string `json:"id,omitempty"` + Name string `json:"name"` + UpstreamIPs []string `json:"upstream_ips"` + DNSFirewallIPs []string `json:"dns_firewall_ips,omitempty"` + MinimumCacheTTL uint `json:"minimum_cache_ttl,omitempty"` + MaximumCacheTTL uint `json:"maximum_cache_ttl,omitempty"` + DeprecateAnyRequests bool `json:"deprecate_any_requests"` + ModifiedOn string `json:"modified_on,omitempty"` +} + +// DNSFirewallAnalyticsMetrics represents a group of aggregated DNS Firewall metrics. +type DNSFirewallAnalyticsMetrics struct { + QueryCount *int64 `json:"queryCount"` + UncachedCount *int64 `json:"uncachedCount"` + StaleCount *int64 `json:"staleCount"` + ResponseTimeAvg *float64 `json:"responseTimeAvg"` + ResponseTimeMedian *float64 `json:"responseTimeMedian"` + ResponseTime90th *float64 `json:"responseTime90th"` + ResponseTime99th *float64 `json:"responseTime99th"` +} + +// DNSFirewallAnalytics represents a set of aggregated DNS Firewall metrics. +// TODO: Add the queried data and not only the aggregated values. +type DNSFirewallAnalytics struct { + Totals DNSFirewallAnalyticsMetrics `json:"totals"` + Min DNSFirewallAnalyticsMetrics `json:"min"` + Max DNSFirewallAnalyticsMetrics `json:"max"` +} + +// DNSFirewallUserAnalyticsOptions represents range and dimension selection on analytics endpoint. +type DNSFirewallUserAnalyticsOptions struct { + Metrics []string `url:"metrics,omitempty" del:","` + Since *time.Time `url:"since,omitempty"` + Until *time.Time `url:"until,omitempty"` +} + +// dnsFirewallResponse represents a DNS Firewall response. +type dnsFirewallResponse struct { + Response + Result *DNSFirewallCluster `json:"result"` +} + +// dnsFirewallListResponse represents an array of DNS Firewall responses. +type dnsFirewallListResponse struct { + Response + Result []*DNSFirewallCluster `json:"result"` +} + +// dnsFirewallAnalyticsResponse represents a DNS Firewall analytics response. +type dnsFirewallAnalyticsResponse struct { + Response + Result DNSFirewallAnalytics `json:"result"` +} + +type CreateDNSFirewallClusterParams struct { + Name string `json:"name"` + UpstreamIPs []string `json:"upstream_ips"` + DNSFirewallIPs []string `json:"dns_firewall_ips,omitempty"` + MinimumCacheTTL uint `json:"minimum_cache_ttl,omitempty"` + MaximumCacheTTL uint `json:"maximum_cache_ttl,omitempty"` + DeprecateAnyRequests bool `json:"deprecate_any_requests"` +} + +type GetDNSFirewallClusterParams struct { + ClusterID string `json:"-"` +} + +type UpdateDNSFirewallClusterParams struct { + ClusterID string `json:"-"` + Name string `json:"name"` + UpstreamIPs []string `json:"upstream_ips"` + DNSFirewallIPs []string `json:"dns_firewall_ips,omitempty"` + MinimumCacheTTL uint `json:"minimum_cache_ttl,omitempty"` + MaximumCacheTTL uint `json:"maximum_cache_ttl,omitempty"` + DeprecateAnyRequests bool `json:"deprecate_any_requests"` +} + +type ListDNSFirewallClustersParams struct{} + +type GetDNSFirewallUserAnalyticsParams struct { + ClusterID string `json:"-"` + DNSFirewallUserAnalyticsOptions +} + +// CreateDNSFirewallCluster creates a new DNS Firewall cluster. +// +// API reference: https://api.cloudflare.com/#dns-firewall-create-dns-firewall-cluster +func (api *API) CreateDNSFirewallCluster(ctx context.Context, rc *ResourceContainer, params CreateDNSFirewallClusterParams) (*DNSFirewallCluster, error) { + uri := fmt.Sprintf("/%s/dns_firewall", rc.URLFragment()) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return nil, err + } + + response := &dnsFirewallResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response.Result, nil +} + +// GetDNSFirewallCluster fetches a single DNS Firewall cluster. +// +// API reference: https://api.cloudflare.com/#dns-firewall-dns-firewall-cluster-details +func (api *API) GetDNSFirewallCluster(ctx context.Context, rc *ResourceContainer, params GetDNSFirewallClusterParams) (*DNSFirewallCluster, error) { + if params.ClusterID == "" { + return &DNSFirewallCluster{}, ErrMissingClusterID + } + + uri := fmt.Sprintf("/%s/dns_firewall/%s", rc.URLFragment(), params.ClusterID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + + response := &dnsFirewallResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response.Result, nil +} + +// ListDNSFirewallClusters lists the DNS Firewall clusters associated with an account. +// +// API reference: https://api.cloudflare.com/#dns-firewall-list-dns-firewall-clusters +func (api *API) ListDNSFirewallClusters(ctx context.Context, rc *ResourceContainer, params ListDNSFirewallClustersParams) ([]*DNSFirewallCluster, error) { + uri := fmt.Sprintf("/%s/dns_firewall", rc.URLFragment()) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + + response := &dnsFirewallListResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response.Result, nil +} + +// UpdateDNSFirewallCluster updates a DNS Firewall cluster. +// +// API reference: https://api.cloudflare.com/#dns-firewall-update-dns-firewall-cluster +func (api *API) UpdateDNSFirewallCluster(ctx context.Context, rc *ResourceContainer, params UpdateDNSFirewallClusterParams) error { + if params.ClusterID == "" { + return ErrMissingClusterID + } + + uri := fmt.Sprintf("/%s/dns_firewall/%s", rc.URLFragment(), params.ClusterID) + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, params) + if err != nil { + return err + } + + response := &dnsFirewallResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return nil +} + +// DeleteDNSFirewallCluster deletes a DNS Firewall cluster. Note that this cannot be +// undone, and will stop all traffic to that cluster. +// +// API reference: https://api.cloudflare.com/#dns-firewall-delete-dns-firewall-cluster +func (api *API) DeleteDNSFirewallCluster(ctx context.Context, rc *ResourceContainer, clusterID string) error { + uri := fmt.Sprintf("/%s/dns_firewall/%s", rc.URLFragment(), clusterID) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + response := &dnsFirewallResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return nil +} + +// GetDNSFirewallUserAnalytics retrieves analytics report for a specified dimension and time range. +func (api *API) GetDNSFirewallUserAnalytics(ctx context.Context, rc *ResourceContainer, params GetDNSFirewallUserAnalyticsParams) (DNSFirewallAnalytics, error) { + uri := buildURI(fmt.Sprintf("/%s/dns_firewall/%s/dns_analytics/report", rc.URLFragment(), params.ClusterID), params.DNSFirewallUserAnalyticsOptions) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return DNSFirewallAnalytics{}, err + } + + response := dnsFirewallAnalyticsResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return DNSFirewallAnalytics{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/duration.go b/vendor/github.com/cloudflare/cloudflare-go/duration.go new file mode 100644 index 0000000..2e39d5e --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/duration.go @@ -0,0 +1,41 @@ +package cloudflare + +import ( + "time" + + "github.com/goccy/go-json" +) + +// Duration implements json.Marshaler and json.Unmarshaler for time.Duration +// using the fmt.Stringer interface of time.Duration and time.ParseDuration. +type Duration struct { + time.Duration +} + +// MarshalJSON encodes a Duration as a JSON string formatted using String. +func (d Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(d.Duration.String()) +} + +// UnmarshalJSON decodes a Duration from a JSON string parsed using time.ParseDuration. +func (d *Duration) UnmarshalJSON(buf []byte) error { + var str string + + err := json.Unmarshal(buf, &str) + if err != nil { + return err + } + + dur, err := time.ParseDuration(str) + if err != nil { + return err + } + + d.Duration = dur + return nil +} + +var ( + _ = json.Marshaler((*Duration)(nil)) + _ = json.Unmarshaler((*Duration)(nil)) +) diff --git a/vendor/github.com/cloudflare/cloudflare-go/email_routing_destination.go b/vendor/github.com/cloudflare/cloudflare-go/email_routing_destination.go new file mode 100644 index 0000000..3ad0284 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/email_routing_destination.go @@ -0,0 +1,156 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +type EmailRoutingDestinationAddress struct { + Tag string `json:"tag,omitempty"` + Email string `json:"email,omitempty"` + Verified *time.Time `json:"verified,omitempty"` + Created *time.Time `json:"created,omitempty"` + Modified *time.Time `json:"modified,omitempty"` +} + +type ListEmailRoutingAddressParameters struct { + ResultInfo + Direction string `url:"direction,omitempty"` + Verified *bool `url:"verified,omitempty"` +} + +type ListEmailRoutingAddressResponse struct { + Result []EmailRoutingDestinationAddress `json:"result"` + ResultInfo `json:"result_info"` + Response +} + +type CreateEmailRoutingAddressParameters struct { + Email string `json:"email,omitempty"` +} + +type CreateEmailRoutingAddressResponse struct { + Result EmailRoutingDestinationAddress `json:"result,omitempty"` + Response +} + +// ListEmailRoutingDestinationAddresses Lists existing destination addresses. +// +// API reference: https://api.cloudflare.com/#email-routing-destination-addresses-list-destination-addresses +func (api *API) ListEmailRoutingDestinationAddresses(ctx context.Context, rc *ResourceContainer, params ListEmailRoutingAddressParameters) ([]EmailRoutingDestinationAddress, *ResultInfo, error) { + if rc.Identifier == "" { + return []EmailRoutingDestinationAddress{}, &ResultInfo{}, ErrMissingAccountID + } + + autoPaginate := true + if params.PerPage >= 1 || params.Page >= 1 { + autoPaginate = false + } + + if params.PerPage < 1 { + params.PerPage = 50 + } + + if params.Page < 1 { + params.Page = 1 + } + + var addresses []EmailRoutingDestinationAddress + var eResponse ListEmailRoutingAddressResponse + for { + eResponse = ListEmailRoutingAddressResponse{} + uri := buildURI(fmt.Sprintf("/accounts/%s/email/routing/addresses", rc.Identifier), params) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []EmailRoutingDestinationAddress{}, &ResultInfo{}, err + } + err = json.Unmarshal(res, &eResponse) + if err != nil { + return []EmailRoutingDestinationAddress{}, &ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + addresses = append(addresses, eResponse.Result...) + params.ResultInfo = eResponse.ResultInfo.Next() + if params.ResultInfo.Done() || !autoPaginate { + break + } + } + + return addresses, &eResponse.ResultInfo, nil +} + +// CreateEmailRoutingDestinationAddress Create a destination address to forward your emails to. +// Destination addresses need to be verified before they become active. +// +// API reference: https://api.cloudflare.com/#email-routing-destination-addresses-create-a-destination-address +func (api *API) CreateEmailRoutingDestinationAddress(ctx context.Context, rc *ResourceContainer, params CreateEmailRoutingAddressParameters) (EmailRoutingDestinationAddress, error) { + if rc.Identifier == "" { + return EmailRoutingDestinationAddress{}, ErrMissingAccountID + } + + uri := fmt.Sprintf("/accounts/%s/email/routing/addresses", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return EmailRoutingDestinationAddress{}, err + } + + var r CreateEmailRoutingAddressResponse + err = json.Unmarshal(res, &r) + if err != nil { + return EmailRoutingDestinationAddress{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// GetEmailRoutingDestinationAddress Gets information for a specific destination email already created. +// +// API reference: https://api.cloudflare.com/#email-routing-destination-addresses-get-a-destination-address +func (api *API) GetEmailRoutingDestinationAddress(ctx context.Context, rc *ResourceContainer, addressID string) (EmailRoutingDestinationAddress, error) { + if rc.Identifier == "" { + return EmailRoutingDestinationAddress{}, ErrMissingAccountID + } + + uri := fmt.Sprintf("/accounts/%s/email/routing/addresses/%s", rc.Identifier, addressID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return EmailRoutingDestinationAddress{}, err + } + + var r CreateEmailRoutingAddressResponse + err = json.Unmarshal(res, &r) + if err != nil { + return EmailRoutingDestinationAddress{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// DeleteEmailRoutingDestinationAddress Deletes a specific destination address. +// +// API reference: https://api.cloudflare.com/#email-routing-destination-addresses-delete-destination-address +func (api *API) DeleteEmailRoutingDestinationAddress(ctx context.Context, rc *ResourceContainer, addressID string) (EmailRoutingDestinationAddress, error) { + if rc.Identifier == "" { + return EmailRoutingDestinationAddress{}, ErrMissingAccountID + } + + uri := fmt.Sprintf("/accounts/%s/email/routing/addresses/%s", rc.Identifier, addressID) + + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return EmailRoutingDestinationAddress{}, err + } + + var r CreateEmailRoutingAddressResponse + err = json.Unmarshal(res, &r) + if err != nil { + return EmailRoutingDestinationAddress{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/email_routing_rules.go b/vendor/github.com/cloudflare/cloudflare-go/email_routing_rules.go new file mode 100644 index 0000000..736200d --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/email_routing_rules.go @@ -0,0 +1,274 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +var ErrMissingRuleID = errors.New("required rule id missing") + +type EmailRoutingRuleMatcher struct { + Type string `json:"type,omitempty"` + Field string `json:"field,omitempty"` + Value string `json:"value,omitempty"` +} + +type EmailRoutingRuleAction struct { + Type string `json:"type,omitempty"` + Value []string `json:"value,omitempty"` +} + +type EmailRoutingRule struct { + Tag string `json:"tag,omitempty"` + Name string `json:"name,omitempty"` + Priority int `json:"priority,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + Matchers []EmailRoutingRuleMatcher `json:"matchers,omitempty"` + Actions []EmailRoutingRuleAction `json:"actions,omitempty"` +} + +type ListEmailRoutingRulesParameters struct { + Enabled *bool `url:"enabled,omitempty"` + ResultInfo +} + +type ListEmailRoutingRuleResponse struct { + Result []EmailRoutingRule `json:"result"` + ResultInfo `json:"result_info,omitempty"` + Response +} + +type CreateEmailRoutingRuleParameters struct { + Matchers []EmailRoutingRuleMatcher `json:"matchers,omitempty"` + Actions []EmailRoutingRuleAction `json:"actions,omitempty"` + Name string `json:"name,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + Priority int `json:"priority,omitempty"` +} + +type CreateEmailRoutingRuleResponse struct { + Result EmailRoutingRule `json:"result"` + Response +} + +type GetEmailRoutingRuleResponse struct { + Result EmailRoutingRule `json:"result"` + Response +} + +type UpdateEmailRoutingRuleParameters struct { + Matchers []EmailRoutingRuleMatcher `json:"matchers,omitempty"` + Actions []EmailRoutingRuleAction `json:"actions,omitempty"` + Name string `json:"name,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + Priority int `json:"priority,omitempty"` + RuleID string +} + +type EmailRoutingCatchAllRule struct { + Tag string `json:"tag,omitempty"` + Name string `json:"name,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + Matchers []EmailRoutingRuleMatcher `json:"matchers,omitempty"` + Actions []EmailRoutingRuleAction `json:"actions,omitempty"` +} + +type EmailRoutingCatchAllRuleResponse struct { + Result EmailRoutingCatchAllRule `json:"result"` + Response +} + +// ListEmailRoutingRules Lists existing routing rules. +// +// API reference: https://api.cloudflare.com/#email-routing-routing-rules-list-routing-rules +func (api *API) ListEmailRoutingRules(ctx context.Context, rc *ResourceContainer, params ListEmailRoutingRulesParameters) ([]EmailRoutingRule, *ResultInfo, error) { + if rc.Identifier == "" { + return []EmailRoutingRule{}, &ResultInfo{}, ErrMissingZoneID + } + + autoPaginate := true + if params.PerPage >= 1 || params.Page >= 1 { + autoPaginate = false + } + if params.PerPage < 1 { + params.PerPage = 50 + } + if params.Page < 1 { + params.Page = 1 + } + + var rules []EmailRoutingRule + var rResponse ListEmailRoutingRuleResponse + for { + rResponse = ListEmailRoutingRuleResponse{} + uri := buildURI(fmt.Sprintf("/zones/%s/email/routing/rules", rc.Identifier), params) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []EmailRoutingRule{}, &ResultInfo{}, err + } + + err = json.Unmarshal(res, &rResponse) + if err != nil { + return []EmailRoutingRule{}, &ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + rules = append(rules, rResponse.Result...) + params.ResultInfo = rResponse.ResultInfo.Next() + if params.ResultInfo.Done() || !autoPaginate { + break + } + } + + return rules, &rResponse.ResultInfo, nil +} + +// CreateEmailRoutingRule Rules consist of a set of criteria for matching emails (such as an email being sent to a specific custom email address) plus a set of actions to take on the email (like forwarding it to a specific destination address). +// +// API reference: https://api.cloudflare.com/#email-routing-routing-rules-create-routing-rule +func (api *API) CreateEmailRoutingRule(ctx context.Context, rc *ResourceContainer, params CreateEmailRoutingRuleParameters) (EmailRoutingRule, error) { + if rc.Identifier == "" { + return EmailRoutingRule{}, ErrMissingZoneID + } + + uri := fmt.Sprintf("/zones/%s/email/routing/rules", rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return EmailRoutingRule{}, err + } + + var r CreateEmailRoutingRuleResponse + err = json.Unmarshal(res, &r) + if err != nil { + return EmailRoutingRule{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// GetEmailRoutingRule Get information for a specific routing rule already created. +// +// API reference: https://api.cloudflare.com/#email-routing-routing-rules-get-routing-rule +func (api *API) GetEmailRoutingRule(ctx context.Context, rc *ResourceContainer, ruleID string) (EmailRoutingRule, error) { + if rc.Identifier == "" { + return EmailRoutingRule{}, ErrMissingZoneID + } + + uri := fmt.Sprintf("/zones/%s/email/routing/rules/%s", rc.Identifier, ruleID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return EmailRoutingRule{}, err + } + + var r GetEmailRoutingRuleResponse + err = json.Unmarshal(res, &r) + if err != nil { + return EmailRoutingRule{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// UpdateEmailRoutingRule Update actions, matches, or enable/disable specific routing rules +// +// API reference: https://api.cloudflare.com/#email-routing-routing-rules-update-routing-rule +func (api *API) UpdateEmailRoutingRule(ctx context.Context, rc *ResourceContainer, params UpdateEmailRoutingRuleParameters) (EmailRoutingRule, error) { + if rc.Identifier == "" { + return EmailRoutingRule{}, ErrMissingZoneID + } + + if params.RuleID == "" { + return EmailRoutingRule{}, ErrMissingRuleID + } + + uri := fmt.Sprintf("/zones/%s/email/routing/rules/%s", rc.Identifier, params.RuleID) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return EmailRoutingRule{}, err + } + + var r GetEmailRoutingRuleResponse + err = json.Unmarshal(res, &r) + if err != nil { + return EmailRoutingRule{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// DeleteEmailRoutingRule Delete a specific routing rule. +// +// API reference: https://api.cloudflare.com/#email-routing-routing-rules-delete-routing-rule +func (api *API) DeleteEmailRoutingRule(ctx context.Context, rc *ResourceContainer, ruleID string) (EmailRoutingRule, error) { + if rc.Identifier == "" { + return EmailRoutingRule{}, ErrMissingZoneID + } + + uri := fmt.Sprintf("/zones/%s/email/routing/rules/%s", rc.Identifier, ruleID) + + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return EmailRoutingRule{}, err + } + + var r GetEmailRoutingRuleResponse + err = json.Unmarshal(res, &r) + if err != nil { + return EmailRoutingRule{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// GetEmailRoutingCatchAllRule Get information on the default catch-all routing rule. +// +// API reference: https://api.cloudflare.com/#email-routing-routing-rules-get-catch-all-rule +func (api *API) GetEmailRoutingCatchAllRule(ctx context.Context, rc *ResourceContainer) (EmailRoutingCatchAllRule, error) { + if rc.Identifier == "" { + return EmailRoutingCatchAllRule{}, ErrMissingZoneID + } + uri := fmt.Sprintf("/zones/%s/email/routing/rules/catch_all", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return EmailRoutingCatchAllRule{}, err + } + + var r EmailRoutingCatchAllRuleResponse + err = json.Unmarshal(res, &r) + if err != nil { + return EmailRoutingCatchAllRule{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// UpdateEmailRoutingCatchAllRule Enable or disable catch-all routing rule, or change action to forward to specific destination address. +// +// API reference: https://api.cloudflare.com/#email-routing-routing-rules-update-catch-all-rule +func (api *API) UpdateEmailRoutingCatchAllRule(ctx context.Context, rc *ResourceContainer, params EmailRoutingCatchAllRule) (EmailRoutingCatchAllRule, error) { + if rc.Identifier == "" { + return EmailRoutingCatchAllRule{}, ErrMissingZoneID + } + + uri := fmt.Sprintf("/zones/%s/email/routing/rules/catch_all", rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return EmailRoutingCatchAllRule{}, err + } + + var r EmailRoutingCatchAllRuleResponse + err = json.Unmarshal(res, &r) + if err != nil { + return EmailRoutingCatchAllRule{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/email_routing_settings.go b/vendor/github.com/cloudflare/cloudflare-go/email_routing_settings.go new file mode 100644 index 0000000..20fc3ea --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/email_routing_settings.go @@ -0,0 +1,118 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +type EmailRoutingSettings struct { + Tag string `json:"tag,omitempty"` + Name string `json:"name,omitempty"` + Enabled bool `json:"enabled,omitempty"` + Created *time.Time `json:"created,omitempty"` + Modified *time.Time `json:"modified,omitempty"` + SkipWizard *bool `json:"skip_wizard,omitempty"` + Status string `json:"status,omitempty"` +} + +type EmailRoutingSettingsResponse struct { + Result EmailRoutingSettings `json:"result,omitempty"` + Response +} + +type EmailRoutingDNSSettingsResponse struct { + Result []DNSRecord `json:"result,omitempty"` + Response +} + +// GetEmailRoutingSettings Get information about the settings for your Email Routing zone. +// +// API reference: https://api.cloudflare.com/#email-routing-settings-get-email-routing-settings +func (api *API) GetEmailRoutingSettings(ctx context.Context, rc *ResourceContainer) (EmailRoutingSettings, error) { + if rc.Identifier == "" { + return EmailRoutingSettings{}, ErrMissingZoneID + } + + uri := fmt.Sprintf("/zones/%s/email/routing", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return EmailRoutingSettings{}, err + } + + var r EmailRoutingSettingsResponse + err = json.Unmarshal(res, &r) + if err != nil { + return EmailRoutingSettings{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// EnableEmailRouting Enable you Email Routing zone. Add and lock the necessary MX and SPF records. +// +// API reference: https://api.cloudflare.com/#email-routing-settings-enable-email-routing +func (api *API) EnableEmailRouting(ctx context.Context, rc *ResourceContainer) (EmailRoutingSettings, error) { + if rc.Identifier == "" { + return EmailRoutingSettings{}, ErrMissingZoneID + } + uri := fmt.Sprintf("/zones/%s/email/routing/enable", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, nil) + if err != nil { + return EmailRoutingSettings{}, err + } + + var r EmailRoutingSettingsResponse + err = json.Unmarshal(res, &r) + if err != nil { + return EmailRoutingSettings{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// DisableEmailRouting Disable your Email Routing zone. Also removes additional MX records previously required for Email Routing to work. +// +// API reference: https://api.cloudflare.com/#email-routing-settings-disable-email-routing +func (api *API) DisableEmailRouting(ctx context.Context, rc *ResourceContainer) (EmailRoutingSettings, error) { + if rc.Identifier == "" { + return EmailRoutingSettings{}, ErrMissingZoneID + } + + uri := fmt.Sprintf("/zones/%s/email/routing/disable", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, nil) + if err != nil { + return EmailRoutingSettings{}, err + } + + var r EmailRoutingSettingsResponse + err = json.Unmarshal(res, &r) + if err != nil { + return EmailRoutingSettings{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// GetEmailRoutingDNSSettings Show the DNS records needed to configure your Email Routing zone. +// +// API reference: https://api.cloudflare.com/#email-routing-settings-email-routing---dns-settings +func (api *API) GetEmailRoutingDNSSettings(ctx context.Context, rc *ResourceContainer) ([]DNSRecord, error) { + if rc.Identifier == "" { + return []DNSRecord{}, ErrMissingZoneID + } + uri := fmt.Sprintf("/zones/%s/email/routing/dns", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []DNSRecord{}, err + } + + var r EmailRoutingDNSSettingsResponse + err = json.Unmarshal(res, &r) + if err != nil { + return []DNSRecord{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/errors.go b/vendor/github.com/cloudflare/cloudflare-go/errors.go new file mode 100644 index 0000000..93faab0 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/errors.go @@ -0,0 +1,415 @@ +package cloudflare + +import ( + "errors" + "fmt" + "net/http" + "strings" +) + +const ( + errEmptyCredentials = "invalid credentials: key & email must not be empty" //nolint:gosec,unused + errEmptyAPIToken = "invalid credentials: API Token must not be empty" //nolint:gosec,unused + errInternalServiceError = "internal service error" + errMakeRequestError = "error from makeRequest" + errUnmarshalError = "error unmarshalling the JSON response" + errUnmarshalErrorBody = "error unmarshalling the JSON response error body" + errRequestNotSuccessful = "error reported by API" + errMissingAccountID = "required missing account ID" + errMissingZoneID = "required missing zone ID" + errMissingAccountOrZoneID = "either account ID or zone ID must be provided" + errAccountIDAndZoneIDAreMutuallyExclusive = "account ID and zone ID are mutually exclusive" + errMissingResourceIdentifier = "required missing resource identifier" + errOperationStillRunning = "bulk operation did not finish before timeout" + errOperationUnexpectedStatus = "bulk operation returned an unexpected status" + errResultInfo = "incorrect pagination info (result_info) in responses" + errManualPagination = "unexpected pagination options passed to functions that handle pagination automatically" + errInvalidResourceIdentifer = "invalid resource identifier: %s" + errInvalidZoneIdentifer = "invalid zone identifier: %s" + errAPIKeysAndTokensAreMutuallyExclusive = "API keys and tokens are mutually exclusive" //nolint:gosec + errMissingCredentials = "no credentials provided" + + errInvalidResourceContainerAccess = "requested resource container (%q) is not supported for this endpoint" + errRequiredAccountLevelResourceContainer = "this endpoint requires using an account level resource container and identifiers" + errRequiredZoneLevelResourceContainer = "this endpoint requires using a zone level resource container and identifiers" +) + +var ( + ErrAPIKeysAndTokensAreMutuallyExclusive = errors.New(errAPIKeysAndTokensAreMutuallyExclusive) + ErrMissingCredentials = errors.New(errMissingCredentials) + ErrMissingAccountID = errors.New(errMissingAccountID) + ErrMissingZoneID = errors.New(errMissingZoneID) + ErrAccountIDOrZoneIDAreRequired = errors.New(errMissingAccountOrZoneID) + ErrAccountIDAndZoneIDAreMutuallyExclusive = errors.New(errAccountIDAndZoneIDAreMutuallyExclusive) + ErrMissingResourceIdentifier = errors.New(errMissingResourceIdentifier) + + ErrRequiredAccountLevelResourceContainer = errors.New(errRequiredAccountLevelResourceContainer) + ErrRequiredZoneLevelResourceContainer = errors.New(errRequiredZoneLevelResourceContainer) +) + +type ErrorType string + +const ( + ErrorTypeRequest ErrorType = "request" + ErrorTypeAuthentication ErrorType = "authentication" + ErrorTypeAuthorization ErrorType = "authorization" + ErrorTypeNotFound ErrorType = "not_found" + ErrorTypeRateLimit ErrorType = "rate_limit" + ErrorTypeService ErrorType = "service" +) + +type Error struct { + // The classification of error encountered. + Type ErrorType + + // StatusCode is the HTTP status code from the response. + StatusCode int + + // Errors is all of the error messages and codes, combined. + Errors []ResponseInfo + + // ErrorCodes is a list of all the error codes. + ErrorCodes []int + + // ErrorMessages is a list of all the error codes. + ErrorMessages []string + + // Messages is a list of informational messages provided by the endpoint. + Messages []ResponseInfo + + // RayID is the internal identifier for the request that was made. + RayID string +} + +func (e Error) Error() string { + var errString string + errMessages := []string{} + for _, err := range e.Errors { + m := "" + if err.Message != "" { + m += err.Message + } + + if err.Code != 0 { + m += fmt.Sprintf(" (%d)", err.Code) + } + + errMessages = append(errMessages, m) + } + + msgs := []string{} + for _, m := range e.Messages { + msgs = append(msgs, m.Message) + } + + errString += strings.Join(errMessages, ", ") + + // `Messages` is primarily used for additional validation failure notes in + // page rules. This shouldn't be used going forward but instead, use the + // error fields appropriately. + if len(msgs) > 0 { + errString += "\n" + strings.Join(msgs, " \n") + } + + return errString +} + +// RequestError is for 4xx errors that we encounter not covered elsewhere +// (generally bad payloads). +type RequestError struct { + cloudflareError *Error +} + +func (e RequestError) Error() string { + return e.cloudflareError.Error() +} + +func (e RequestError) Errors() []ResponseInfo { + return e.cloudflareError.Errors +} + +func (e RequestError) ErrorCodes() []int { + return e.cloudflareError.ErrorCodes +} + +func (e RequestError) ErrorMessages() []string { + return e.cloudflareError.ErrorMessages +} + +func (e RequestError) InternalErrorCodeIs(code int) bool { + return e.cloudflareError.InternalErrorCodeIs(code) +} + +func (e RequestError) Messages() []ResponseInfo { + return e.cloudflareError.Messages +} + +func (e RequestError) RayID() string { + return e.cloudflareError.RayID +} + +func (e RequestError) Type() ErrorType { + return e.cloudflareError.Type +} + +func (e RequestError) Unwrap() error { + return e.cloudflareError +} + +func NewRequestError(e *Error) RequestError { + return RequestError{ + cloudflareError: e, + } +} + +// RatelimitError is for HTTP 429s where the service is telling the client to +// slow down. +type RatelimitError struct { + cloudflareError *Error +} + +func (e RatelimitError) Error() string { + return e.cloudflareError.Error() +} + +func (e RatelimitError) Errors() []ResponseInfo { + return e.cloudflareError.Errors +} + +func (e RatelimitError) ErrorCodes() []int { + return e.cloudflareError.ErrorCodes +} + +func (e RatelimitError) ErrorMessages() []string { + return e.cloudflareError.ErrorMessages +} + +func (e RatelimitError) InternalErrorCodeIs(code int) bool { + return e.cloudflareError.InternalErrorCodeIs(code) +} + +func (e RatelimitError) RayID() string { + return e.cloudflareError.RayID +} + +func (e RatelimitError) Type() ErrorType { + return e.cloudflareError.Type +} + +func (e RatelimitError) Unwrap() error { + return e.cloudflareError +} + +func NewRatelimitError(e *Error) RatelimitError { + return RatelimitError{ + cloudflareError: e, + } +} + +// ServiceError is a handler for 5xx errors returned to the client. +type ServiceError struct { + cloudflareError *Error +} + +func (e ServiceError) Error() string { + return e.cloudflareError.Error() +} + +func (e ServiceError) Errors() []ResponseInfo { + return e.cloudflareError.Errors +} + +func (e ServiceError) ErrorCodes() []int { + return e.cloudflareError.ErrorCodes +} + +func (e ServiceError) ErrorMessages() []string { + return e.cloudflareError.ErrorMessages +} + +func (e ServiceError) InternalErrorCodeIs(code int) bool { + return e.cloudflareError.InternalErrorCodeIs(code) +} + +func (e ServiceError) RayID() string { + return e.cloudflareError.RayID +} + +func (e ServiceError) Type() ErrorType { + return e.cloudflareError.Type +} + +func (e ServiceError) Unwrap() error { + return e.cloudflareError +} + +func NewServiceError(e *Error) ServiceError { + return ServiceError{ + cloudflareError: e, + } +} + +// AuthenticationError is for HTTP 401 responses. +type AuthenticationError struct { + cloudflareError *Error +} + +func (e AuthenticationError) Error() string { + return e.cloudflareError.Error() +} + +func (e AuthenticationError) Errors() []ResponseInfo { + return e.cloudflareError.Errors +} + +func (e AuthenticationError) ErrorCodes() []int { + return e.cloudflareError.ErrorCodes +} + +func (e AuthenticationError) ErrorMessages() []string { + return e.cloudflareError.ErrorMessages +} + +func (e AuthenticationError) InternalErrorCodeIs(code int) bool { + return e.cloudflareError.InternalErrorCodeIs(code) +} + +func (e AuthenticationError) RayID() string { + return e.cloudflareError.RayID +} + +func (e AuthenticationError) Type() ErrorType { + return e.cloudflareError.Type +} + +func (e AuthenticationError) Unwrap() error { + return e.cloudflareError +} + +func NewAuthenticationError(e *Error) AuthenticationError { + return AuthenticationError{ + cloudflareError: e, + } +} + +// AuthorizationError is for HTTP 403 responses. +type AuthorizationError struct { + cloudflareError *Error +} + +func (e AuthorizationError) Error() string { + return e.cloudflareError.Error() +} + +func (e AuthorizationError) Errors() []ResponseInfo { + return e.cloudflareError.Errors +} + +func (e AuthorizationError) ErrorCodes() []int { + return e.cloudflareError.ErrorCodes +} + +func (e AuthorizationError) ErrorMessages() []string { + return e.cloudflareError.ErrorMessages +} + +func (e AuthorizationError) InternalErrorCodeIs(code int) bool { + return e.cloudflareError.InternalErrorCodeIs(code) +} + +func (e AuthorizationError) RayID() string { + return e.cloudflareError.RayID +} + +func (e AuthorizationError) Type() ErrorType { + return e.cloudflareError.Type +} + +func (e AuthorizationError) Unwrap() error { + return e.cloudflareError +} + +func NewAuthorizationError(e *Error) AuthorizationError { + return AuthorizationError{ + cloudflareError: e, + } +} + +// NotFoundError is for HTTP 404 responses. +type NotFoundError struct { + cloudflareError *Error +} + +func (e NotFoundError) Error() string { + return e.cloudflareError.Error() +} + +func (e NotFoundError) Errors() []ResponseInfo { + return e.cloudflareError.Errors +} + +func (e NotFoundError) ErrorCodes() []int { + return e.cloudflareError.ErrorCodes +} + +func (e NotFoundError) ErrorMessages() []string { + return e.cloudflareError.ErrorMessages +} + +func (e NotFoundError) InternalErrorCodeIs(code int) bool { + return e.cloudflareError.InternalErrorCodeIs(code) +} + +func (e NotFoundError) RayID() string { + return e.cloudflareError.RayID +} + +func (e NotFoundError) Type() ErrorType { + return e.cloudflareError.Type +} + +func (e NotFoundError) Unwrap() error { + return e.cloudflareError +} + +func NewNotFoundError(e *Error) NotFoundError { + return NotFoundError{ + cloudflareError: e, + } +} + +// ClientError returns a boolean whether or not the raised error was caused by +// something client side. +func (e *Error) ClientError() bool { + return e.StatusCode >= http.StatusBadRequest && + e.StatusCode < http.StatusInternalServerError +} + +// ClientRateLimited returns a boolean whether or not the raised error was +// caused by too many requests from the client. +func (e *Error) ClientRateLimited() bool { + return e.Type == ErrorTypeRateLimit +} + +// InternalErrorCodeIs returns a boolean whether or not the desired internal +// error code is present in `e.InternalErrorCodes`. +func (e *Error) InternalErrorCodeIs(code int) bool { + for _, errCode := range e.ErrorCodes { + if errCode == code { + return true + } + } + + return false +} + +// ErrorMessageContains returns a boolean whether or not a substring exists in +// any of the `e.ErrorMessages` slice entries. +func (e *Error) ErrorMessageContains(s string) bool { + for _, errMsg := range e.ErrorMessages { + if strings.Contains(errMsg, s) { + return true + } + } + return false +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/fallback_domain.go b/vendor/github.com/cloudflare/cloudflare-go/fallback_domain.go new file mode 100644 index 0000000..e8bdfc9 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/fallback_domain.go @@ -0,0 +1,133 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +// FallbackDomainResponse represents the response from the get fallback +// domain endpoints. +type FallbackDomainResponse struct { + Response + Result []FallbackDomain `json:"result"` +} + +// FallbackDomain represents the individual domain struct. +type FallbackDomain struct { + Suffix string `json:"suffix,omitempty"` + Description string `json:"description,omitempty"` + DNSServer []string `json:"dns_server,omitempty"` +} + +// ListFallbackDomains returns all fallback domains within an account. +// +// API reference: https://api.cloudflare.com/#devices-get-local-domain-fallback-list +func (api *API) ListFallbackDomains(ctx context.Context, accountID string) ([]FallbackDomain, error) { + uri := fmt.Sprintf("/%s/%s/devices/policy/fallback_domains", AccountRouteRoot, accountID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []FallbackDomain{}, err + } + + var fallbackDomainResponse FallbackDomainResponse + err = json.Unmarshal(res, &fallbackDomainResponse) + if err != nil { + return []FallbackDomain{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return fallbackDomainResponse.Result, nil +} + +// ListFallbackDomainsDeviceSettingsPolicy returns all fallback domains within an account for a specific device settings policy. +// +// API reference: https://api.cloudflare.com/#devices-get-local-domain-fallback-list +func (api *API) ListFallbackDomainsDeviceSettingsPolicy(ctx context.Context, accountID, policyID string) ([]FallbackDomain, error) { + uri := fmt.Sprintf("/%s/%s/devices/policy/%s/fallback_domains", AccountRouteRoot, accountID, policyID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []FallbackDomain{}, err + } + + var fallbackDomainResponse FallbackDomainResponse + err = json.Unmarshal(res, &fallbackDomainResponse) + if err != nil { + return []FallbackDomain{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return fallbackDomainResponse.Result, nil +} + +// UpdateFallbackDomain updates the existing fallback domain policy. +// +// API reference: https://api.cloudflare.com/#devices-set-local-domain-fallback-list +func (api *API) UpdateFallbackDomain(ctx context.Context, accountID string, domains []FallbackDomain) ([]FallbackDomain, error) { + uri := fmt.Sprintf("/%s/%s/devices/policy/fallback_domains", AccountRouteRoot, accountID) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, domains) + if err != nil { + return []FallbackDomain{}, err + } + + var fallbackDomainResponse FallbackDomainResponse + err = json.Unmarshal(res, &fallbackDomainResponse) + if err != nil { + return []FallbackDomain{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return fallbackDomainResponse.Result, nil +} + +// UpdateFallbackDomainDeviceSettingsPolicy updates the existing fallback domain policy for a specific device settings policy. +// +// API reference: https://api.cloudflare.com/#devices-set-local-domain-fallback-list +func (api *API) UpdateFallbackDomainDeviceSettingsPolicy(ctx context.Context, accountID, policyID string, domains []FallbackDomain) ([]FallbackDomain, error) { + uri := fmt.Sprintf("/%s/%s/devices/policy/%s/fallback_domains", AccountRouteRoot, accountID, policyID) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, domains) + if err != nil { + return []FallbackDomain{}, err + } + + var fallbackDomainResponse FallbackDomainResponse + err = json.Unmarshal(res, &fallbackDomainResponse) + if err != nil { + return []FallbackDomain{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return fallbackDomainResponse.Result, nil +} + +// RestoreFallbackDomainDefaultsDeviceSettingsPolicy resets the domain fallback values to the default +// list for a specific device settings policy. +// +// API reference: TBA. +func (api *API) RestoreFallbackDomainDefaults(ctx context.Context, accountID string) error { + uri := fmt.Sprintf("/%s/%s/devices/policy/fallback_domains?reset_defaults=true", AccountRouteRoot, accountID) + + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, []string{}) + if err != nil { + return err + } + + return nil +} + +// RestoreFallbackDomainDefaults resets the domain fallback values to the default +// list. +// +// API reference: TBA. +func (api *API) RestoreFallbackDomainDefaultsDeviceSettingsPolicy(ctx context.Context, accountID, policyID string) error { + uri := fmt.Sprintf("/%s/%s/devices/policy/%s/fallback_domains?reset_defaults=true", AccountRouteRoot, accountID, policyID) + + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, []string{}) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/filter.go b/vendor/github.com/cloudflare/cloudflare-go/filter.go new file mode 100644 index 0000000..107cab5 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/filter.go @@ -0,0 +1,290 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + + "github.com/goccy/go-json" +) + +var ErrNotEnoughFilterIDsProvided = errors.New("at least one filter ID must be provided.") + +// Filter holds the structure of the filter type. +type Filter struct { + ID string `json:"id,omitempty"` + Expression string `json:"expression"` + Paused bool `json:"paused"` + Description string `json:"description"` + + // Property is mentioned in documentation however isn't populated in + // any of the API requests. For now, let's just omit it unless it's + // provided. + Ref string `json:"ref,omitempty"` +} + +// FiltersDetailResponse is the API response that is returned +// for requesting all filters on a zone. +type FiltersDetailResponse struct { + Result []Filter `json:"result"` + ResultInfo `json:"result_info"` + Response +} + +// FilterDetailResponse is the API response that is returned +// for requesting a single filter on a zone. +type FilterDetailResponse struct { + Result Filter `json:"result"` + ResultInfo `json:"result_info"` + Response +} + +// FilterValidateExpression represents the JSON payload for checking +// an expression. +type FilterValidateExpression struct { + Expression string `json:"expression"` +} + +// FilterValidateExpressionResponse represents the API response for +// checking the expression. It conforms to the JSON API approach however +// we don't need all of the fields exposed. +type FilterValidateExpressionResponse struct { + Success bool `json:"success"` + Errors []FilterValidationExpressionMessage `json:"errors"` +} + +// FilterValidationExpressionMessage represents the API error message. +type FilterValidationExpressionMessage struct { + Message string `json:"message"` +} + +// FilterCreateParams contains required and optional params +// for creating a filter. +type FilterCreateParams struct { + ID string `json:"id,omitempty"` + Expression string `json:"expression"` + Paused bool `json:"paused"` + Description string `json:"description"` + Ref string `json:"ref,omitempty"` +} + +// FilterUpdateParams contains required and optional params +// for updating a filter. +type FilterUpdateParams struct { + ID string `json:"id"` + Expression string `json:"expression"` + Paused bool `json:"paused"` + Description string `json:"description"` + Ref string `json:"ref,omitempty"` +} + +type FilterListParams struct { + ResultInfo +} + +// Filter returns a single filter in a zone based on the filter ID. +// +// API reference: https://developers.cloudflare.com/firewall/api/cf-filters/get/#get-by-filter-id +func (api *API) Filter(ctx context.Context, rc *ResourceContainer, filterID string) (Filter, error) { + uri := fmt.Sprintf("/zones/%s/filters/%s", rc.Identifier, filterID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return Filter{}, err + } + + var filterResponse FilterDetailResponse + err = json.Unmarshal(res, &filterResponse) + if err != nil { + return Filter{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return filterResponse.Result, nil +} + +// Filters returns filters for a zone. +// +// Automatically paginates all results unless `params.PerPage` and `params.Page` +// is set. +// +// API reference: https://developers.cloudflare.com/firewall/api/cf-filters/get/#get-all-filters +func (api *API) Filters(ctx context.Context, rc *ResourceContainer, params FilterListParams) ([]Filter, *ResultInfo, error) { + autoPaginate := true + if params.PerPage >= 1 || params.Page >= 1 { + autoPaginate = false + } + if params.PerPage < 1 { + params.PerPage = 50 + } + if params.Page < 1 { + params.Page = 1 + } + + var filters []Filter + var fResponse FiltersDetailResponse + for { + fResponse = FiltersDetailResponse{} + uri := buildURI(fmt.Sprintf("/zones/%s/filters", rc.Identifier), params) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []Filter{}, &ResultInfo{}, err + } + + err = json.Unmarshal(res, &fResponse) + if err != nil { + return []Filter{}, &ResultInfo{}, fmt.Errorf("failed to unmarshal filters JSON data: %w", err) + } + + filters = append(filters, fResponse.Result...) + params.ResultInfo = fResponse.ResultInfo.Next() + + if params.ResultInfo.Done() || !autoPaginate { + break + } + } + + return filters, &fResponse.ResultInfo, nil +} + +// CreateFilters creates new filters. +// +// API reference: https://developers.cloudflare.com/firewall/api/cf-filters/post/ +func (api *API) CreateFilters(ctx context.Context, rc *ResourceContainer, params []FilterCreateParams) ([]Filter, error) { + uri := fmt.Sprintf("/zones/%s/filters", rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return []Filter{}, err + } + + var filtersResponse FiltersDetailResponse + err = json.Unmarshal(res, &filtersResponse) + if err != nil { + return []Filter{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return filtersResponse.Result, nil +} + +// UpdateFilter updates a single filter. +// +// API reference: https://developers.cloudflare.com/firewall/api/cf-filters/put/#update-a-single-filter +func (api *API) UpdateFilter(ctx context.Context, rc *ResourceContainer, params FilterUpdateParams) (Filter, error) { + if params.ID == "" { + return Filter{}, fmt.Errorf("filter ID cannot be empty") + } + + uri := fmt.Sprintf("/zones/%s/filters/%s", rc.Identifier, params.ID) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return Filter{}, err + } + + var filterResponse FilterDetailResponse + err = json.Unmarshal(res, &filterResponse) + if err != nil { + return Filter{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return filterResponse.Result, nil +} + +// UpdateFilters updates many filters at once. +// +// API reference: https://developers.cloudflare.com/firewall/api/cf-filters/put/#update-multiple-filters +func (api *API) UpdateFilters(ctx context.Context, rc *ResourceContainer, params []FilterUpdateParams) ([]Filter, error) { + for _, filter := range params { + if filter.ID == "" { + return []Filter{}, fmt.Errorf("filter ID cannot be empty") + } + } + + uri := fmt.Sprintf("/zones/%s/filters", rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return []Filter{}, err + } + + var filtersResponse FiltersDetailResponse + err = json.Unmarshal(res, &filtersResponse) + if err != nil { + return []Filter{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return filtersResponse.Result, nil +} + +// DeleteFilter deletes a single filter. +// +// API reference: https://developers.cloudflare.com/firewall/api/cf-filters/delete/#delete-a-single-filter +func (api *API) DeleteFilter(ctx context.Context, rc *ResourceContainer, filterID string) error { + if filterID == "" { + return fmt.Errorf("filter ID cannot be empty") + } + + uri := fmt.Sprintf("/zones/%s/filters/%s", rc.Identifier, filterID) + + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + return nil +} + +// DeleteFilters deletes multiple filters. +// +// API reference: https://developers.cloudflare.com/firewall/api/cf-filters/delete/#delete-multiple-filters +func (api *API) DeleteFilters(ctx context.Context, rc *ResourceContainer, filterIDs []string) error { + if len(filterIDs) == 0 { + return ErrNotEnoughFilterIDsProvided + } + + // Swap this to a typed struct and passing in all parameters together. + q := url.Values{} + for _, id := range filterIDs { + q.Add("id", id) + } + + uri := (&url.URL{ + Path: fmt.Sprintf("/zones/%s/filters", rc.Identifier), + RawQuery: q.Encode(), + }).String() + + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + return nil +} + +// ValidateFilterExpression checks correctness of a filter expression. +// +// API reference: https://developers.cloudflare.com/firewall/api/cf-filters/validation/ +func (api *API) ValidateFilterExpression(ctx context.Context, expression string) error { + expressionPayload := FilterValidateExpression{Expression: expression} + + _, err := api.makeRequestContext(ctx, http.MethodPost, "/filters/validate-expr", expressionPayload) + if err != nil { + var filterValidationResponse FilterValidateExpressionResponse + + jsonErr := json.Unmarshal([]byte(err.Error()), &filterValidationResponse) + if jsonErr != nil { + return fmt.Errorf(errUnmarshalError+": %w", jsonErr) + } + + if !filterValidationResponse.Success { + // Unsure why but the API returns `errors` as an array but it only + // ever shows the issue with one problem at a time ¯\_(ツ)_/¯ + return fmt.Errorf(filterValidationResponse.Errors[0].Message) + } + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/firewall.go b/vendor/github.com/cloudflare/cloudflare-go/firewall.go new file mode 100644 index 0000000..893165c --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/firewall.go @@ -0,0 +1,281 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/goccy/go-json" +) + +// AccessRule represents a firewall access rule. +type AccessRule struct { + ID string `json:"id,omitempty"` + Notes string `json:"notes,omitempty"` + AllowedModes []string `json:"allowed_modes,omitempty"` + Mode string `json:"mode,omitempty"` + Configuration AccessRuleConfiguration `json:"configuration,omitempty"` + Scope AccessRuleScope `json:"scope,omitempty"` + CreatedOn time.Time `json:"created_on,omitempty"` + ModifiedOn time.Time `json:"modified_on,omitempty"` +} + +// AccessRuleConfiguration represents the configuration of a firewall +// access rule. +type AccessRuleConfiguration struct { + Target string `json:"target,omitempty"` + Value string `json:"value,omitempty"` +} + +// AccessRuleScope represents the scope of a firewall access rule. +type AccessRuleScope struct { + ID string `json:"id,omitempty"` + Email string `json:"email,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` +} + +// AccessRuleResponse represents the response from the firewall access +// rule endpoint. +type AccessRuleResponse struct { + Result AccessRule `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +// AccessRuleListResponse represents the response from the list access rules +// endpoint. +type AccessRuleListResponse struct { + Result []AccessRule `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +// ListUserAccessRules returns a slice of access rules for the logged-in user. +// +// This takes an AccessRule to allow filtering of the results returned. +// +// API reference: https://api.cloudflare.com/#user-level-firewall-access-rule-list-access-rules +func (api *API) ListUserAccessRules(ctx context.Context, accessRule AccessRule, page int) (*AccessRuleListResponse, error) { + return api.listAccessRules(ctx, "/user", accessRule, page) +} + +// CreateUserAccessRule creates a firewall access rule for the logged-in user. +// +// API reference: https://api.cloudflare.com/#user-level-firewall-access-rule-create-access-rule +func (api *API) CreateUserAccessRule(ctx context.Context, accessRule AccessRule) (*AccessRuleResponse, error) { + return api.createAccessRule(ctx, "/user", accessRule) +} + +// UserAccessRule returns the details of a user's account access rule. +// +// API reference: https://api.cloudflare.com/#user-level-firewall-access-rule-list-access-rules +func (api *API) UserAccessRule(ctx context.Context, accessRuleID string) (*AccessRuleResponse, error) { + return api.retrieveAccessRule(ctx, "/user", accessRuleID) +} + +// UpdateUserAccessRule updates a single access rule for the logged-in user & +// given access rule identifier. +// +// API reference: https://api.cloudflare.com/#user-level-firewall-access-rule-update-access-rule +func (api *API) UpdateUserAccessRule(ctx context.Context, accessRuleID string, accessRule AccessRule) (*AccessRuleResponse, error) { + return api.updateAccessRule(ctx, "/user", accessRuleID, accessRule) +} + +// DeleteUserAccessRule deletes a single access rule for the logged-in user and +// access rule identifiers. +// +// API reference: https://api.cloudflare.com/#user-level-firewall-access-rule-update-access-rule +func (api *API) DeleteUserAccessRule(ctx context.Context, accessRuleID string) (*AccessRuleResponse, error) { + return api.deleteAccessRule(ctx, "/user", accessRuleID) +} + +// ListZoneAccessRules returns a slice of access rules for the given zone +// identifier. +// +// This takes an AccessRule to allow filtering of the results returned. +// +// API reference: https://api.cloudflare.com/#firewall-access-rule-for-a-zone-list-access-rules +func (api *API) ListZoneAccessRules(ctx context.Context, zoneID string, accessRule AccessRule, page int) (*AccessRuleListResponse, error) { + return api.listAccessRules(ctx, fmt.Sprintf("/zones/%s", zoneID), accessRule, page) +} + +// CreateZoneAccessRule creates a firewall access rule for the given zone +// identifier. +// +// API reference: https://api.cloudflare.com/#firewall-access-rule-for-a-zone-create-access-rule +func (api *API) CreateZoneAccessRule(ctx context.Context, zoneID string, accessRule AccessRule) (*AccessRuleResponse, error) { + return api.createAccessRule(ctx, fmt.Sprintf("/zones/%s", zoneID), accessRule) +} + +// ZoneAccessRule returns the details of a zone's access rule. +// +// API reference: https://api.cloudflare.com/#firewall-access-rule-for-a-zone-list-access-rules +func (api *API) ZoneAccessRule(ctx context.Context, zoneID string, accessRuleID string) (*AccessRuleResponse, error) { + return api.retrieveAccessRule(ctx, fmt.Sprintf("/zones/%s", zoneID), accessRuleID) +} + +// UpdateZoneAccessRule updates a single access rule for the given zone & +// access rule identifiers. +// +// API reference: https://api.cloudflare.com/#firewall-access-rule-for-a-zone-update-access-rule +func (api *API) UpdateZoneAccessRule(ctx context.Context, zoneID, accessRuleID string, accessRule AccessRule) (*AccessRuleResponse, error) { + return api.updateAccessRule(ctx, fmt.Sprintf("/zones/%s", zoneID), accessRuleID, accessRule) +} + +// DeleteZoneAccessRule deletes a single access rule for the given zone and +// access rule identifiers. +// +// API reference: https://api.cloudflare.com/#firewall-access-rule-for-a-zone-delete-access-rule +func (api *API) DeleteZoneAccessRule(ctx context.Context, zoneID, accessRuleID string) (*AccessRuleResponse, error) { + return api.deleteAccessRule(ctx, fmt.Sprintf("/zones/%s", zoneID), accessRuleID) +} + +// ListAccountAccessRules returns a slice of access rules for the given +// account identifier. +// +// This takes an AccessRule to allow filtering of the results returned. +// +// API reference: https://api.cloudflare.com/#account-level-firewall-access-rule-list-access-rules +func (api *API) ListAccountAccessRules(ctx context.Context, accountID string, accessRule AccessRule, page int) (*AccessRuleListResponse, error) { + return api.listAccessRules(ctx, fmt.Sprintf("/accounts/%s", accountID), accessRule, page) +} + +// CreateAccountAccessRule creates a firewall access rule for the given +// account identifier. +// +// API reference: https://api.cloudflare.com/#account-level-firewall-access-rule-create-access-rule +func (api *API) CreateAccountAccessRule(ctx context.Context, accountID string, accessRule AccessRule) (*AccessRuleResponse, error) { + return api.createAccessRule(ctx, fmt.Sprintf("/accounts/%s", accountID), accessRule) +} + +// AccountAccessRule returns the details of an account's access rule. +// +// API reference: https://api.cloudflare.com/#account-level-firewall-access-rule-access-rule-details +func (api *API) AccountAccessRule(ctx context.Context, accountID string, accessRuleID string) (*AccessRuleResponse, error) { + return api.retrieveAccessRule(ctx, fmt.Sprintf("/accounts/%s", accountID), accessRuleID) +} + +// UpdateAccountAccessRule updates a single access rule for the given +// account & access rule identifiers. +// +// API reference: https://api.cloudflare.com/#account-level-firewall-access-rule-update-access-rule +func (api *API) UpdateAccountAccessRule(ctx context.Context, accountID, accessRuleID string, accessRule AccessRule) (*AccessRuleResponse, error) { + return api.updateAccessRule(ctx, fmt.Sprintf("/accounts/%s", accountID), accessRuleID, accessRule) +} + +// DeleteAccountAccessRule deletes a single access rule for the given +// account and access rule identifiers. +// +// API reference: https://api.cloudflare.com/#account-level-firewall-access-rule-delete-access-rule +func (api *API) DeleteAccountAccessRule(ctx context.Context, accountID, accessRuleID string) (*AccessRuleResponse, error) { + return api.deleteAccessRule(ctx, fmt.Sprintf("/accounts/%s", accountID), accessRuleID) +} + +func (api *API) listAccessRules(ctx context.Context, prefix string, accessRule AccessRule, page int) (*AccessRuleListResponse, error) { + // Construct a query string + v := url.Values{} + if page <= 0 { + page = 1 + } + v.Set("page", strconv.Itoa(page)) + // Request as many rules as possible per page - API max is 100 + v.Set("per_page", "100") + if accessRule.Notes != "" { + v.Set("notes", accessRule.Notes) + } + if accessRule.Mode != "" { + v.Set("mode", accessRule.Mode) + } + if accessRule.Scope.Type != "" { + v.Set("scope_type", accessRule.Scope.Type) + } + if accessRule.Configuration.Value != "" { + v.Set("configuration_value", accessRule.Configuration.Value) + } + if accessRule.Configuration.Target != "" { + v.Set("configuration_target", accessRule.Configuration.Target) + } + v.Set("page", strconv.Itoa(page)) + + uri := fmt.Sprintf("%s/firewall/access_rules/rules?%s", prefix, v.Encode()) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + + response := &AccessRuleListResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return response, nil +} + +func (api *API) createAccessRule(ctx context.Context, prefix string, accessRule AccessRule) (*AccessRuleResponse, error) { + uri := fmt.Sprintf("%s/firewall/access_rules/rules", prefix) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, accessRule) + if err != nil { + return nil, err + } + + response := &AccessRuleResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response, nil +} + +func (api *API) retrieveAccessRule(ctx context.Context, prefix, accessRuleID string) (*AccessRuleResponse, error) { + uri := fmt.Sprintf("%s/firewall/access_rules/rules/%s", prefix, accessRuleID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + + if err != nil { + return nil, err + } + + response := &AccessRuleResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response, nil +} + +func (api *API) updateAccessRule(ctx context.Context, prefix, accessRuleID string, accessRule AccessRule) (*AccessRuleResponse, error) { + uri := fmt.Sprintf("%s/firewall/access_rules/rules/%s", prefix, accessRuleID) + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, accessRule) + if err != nil { + return nil, err + } + + response := &AccessRuleResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return response, nil +} + +func (api *API) deleteAccessRule(ctx context.Context, prefix, accessRuleID string) (*AccessRuleResponse, error) { + uri := fmt.Sprintf("%s/firewall/access_rules/rules/%s", prefix, accessRuleID) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return nil, err + } + + response := &AccessRuleResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/firewall_rules.go b/vendor/github.com/cloudflare/cloudflare-go/firewall_rules.go new file mode 100644 index 0000000..edcd2a0 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/firewall_rules.go @@ -0,0 +1,244 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "net/url" + "time" + + "github.com/goccy/go-json" +) + +// FirewallRule is the struct of the firewall rule. +type FirewallRule struct { + ID string `json:"id,omitempty"` + Paused bool `json:"paused"` + Description string `json:"description"` + Action string `json:"action"` + Priority interface{} `json:"priority"` + Filter Filter `json:"filter"` + Products []string `json:"products,omitempty"` + Ref string `json:"ref,omitempty"` + CreatedOn time.Time `json:"created_on,omitempty"` + ModifiedOn time.Time `json:"modified_on,omitempty"` +} + +// FirewallRulesDetailResponse is the API response for the firewall +// rules. +type FirewallRulesDetailResponse struct { + Result []FirewallRule `json:"result"` + ResultInfo `json:"result_info"` + Response +} + +// FirewallRuleResponse is the API response that is returned +// for requesting a single firewall rule on a zone. +type FirewallRuleResponse struct { + Result FirewallRule `json:"result"` + ResultInfo `json:"result_info"` + Response +} + +// FirewallRuleCreateParams contains required and optional params +// for creating a firewall rule. +type FirewallRuleCreateParams struct { + ID string `json:"id,omitempty"` + Paused bool `json:"paused"` + Description string `json:"description"` + Action string `json:"action"` + Priority interface{} `json:"priority"` + Filter Filter `json:"filter"` + Products []string `json:"products,omitempty"` + Ref string `json:"ref,omitempty"` +} + +// FirewallRuleUpdateParams contains required and optional params +// for updating a firewall rule. +type FirewallRuleUpdateParams struct { + ID string `json:"id"` + Paused bool `json:"paused"` + Description string `json:"description"` + Action string `json:"action"` + Priority interface{} `json:"priority"` + Filter Filter `json:"filter"` + Products []string `json:"products,omitempty"` + Ref string `json:"ref,omitempty"` +} + +type FirewallRuleListParams struct { + ResultInfo +} + +// FirewallRules returns all firewall rules. +// +// Automatically paginates all results unless `params.PerPage` and `params.Page` +// is set. +// +// API reference: https://developers.cloudflare.com/firewall/api/cf-firewall-rules/get/#get-all-rules +func (api *API) FirewallRules(ctx context.Context, rc *ResourceContainer, params FirewallRuleListParams) ([]FirewallRule, *ResultInfo, error) { + autoPaginate := true + if params.PerPage >= 1 || params.Page >= 1 { + autoPaginate = false + } + if params.PerPage < 1 { + params.PerPage = 50 + } + if params.Page < 1 { + params.Page = 1 + } + + var firewallRules []FirewallRule + var fResponse FirewallRulesDetailResponse + for { + fResponse = FirewallRulesDetailResponse{} + uri := buildURI(fmt.Sprintf("/zones/%s/firewall/rules", rc.Identifier), params) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []FirewallRule{}, &ResultInfo{}, err + } + + err = json.Unmarshal(res, &fResponse) + if err != nil { + return []FirewallRule{}, &ResultInfo{}, fmt.Errorf("failed to unmarshal filters JSON data: %w", err) + } + + firewallRules = append(firewallRules, fResponse.Result...) + params.ResultInfo = fResponse.ResultInfo.Next() + + if params.ResultInfo.Done() || !autoPaginate { + break + } + } + + return firewallRules, &fResponse.ResultInfo, nil +} + +// FirewallRule returns a single firewall rule based on the ID. +// +// API reference: https://developers.cloudflare.com/firewall/api/cf-firewall-rules/get/#get-by-rule-id +func (api *API) FirewallRule(ctx context.Context, rc *ResourceContainer, firewallRuleID string) (FirewallRule, error) { + uri := fmt.Sprintf("/zones/%s/firewall/rules/%s", rc.Identifier, firewallRuleID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return FirewallRule{}, err + } + + var firewallRuleResponse FirewallRuleResponse + err = json.Unmarshal(res, &firewallRuleResponse) + if err != nil { + return FirewallRule{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return firewallRuleResponse.Result, nil +} + +// CreateFirewallRules creates new firewall rules. +// +// API reference: https://developers.cloudflare.com/firewall/api/cf-firewall-rules/post/ +func (api *API) CreateFirewallRules(ctx context.Context, rc *ResourceContainer, params []FirewallRuleCreateParams) ([]FirewallRule, error) { + uri := fmt.Sprintf("/zones/%s/firewall/rules", rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return []FirewallRule{}, err + } + + var firewallRulesDetailResponse FirewallRulesDetailResponse + err = json.Unmarshal(res, &firewallRulesDetailResponse) + if err != nil { + return []FirewallRule{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return firewallRulesDetailResponse.Result, nil +} + +// UpdateFirewallRule updates a single firewall rule. +// +// API reference: https://developers.cloudflare.com/firewall/api/cf-firewall-rules/put/#update-a-single-rule +func (api *API) UpdateFirewallRule(ctx context.Context, rc *ResourceContainer, params FirewallRuleUpdateParams) (FirewallRule, error) { + if params.ID == "" { + return FirewallRule{}, fmt.Errorf("firewall rule ID cannot be empty") + } + + uri := fmt.Sprintf("/zones/%s/firewall/rules/%s", rc.Identifier, params.ID) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return FirewallRule{}, err + } + + var firewallRuleResponse FirewallRuleResponse + err = json.Unmarshal(res, &firewallRuleResponse) + if err != nil { + return FirewallRule{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return firewallRuleResponse.Result, nil +} + +// UpdateFirewallRules updates a single firewall rule. +// +// API reference: https://developers.cloudflare.com/firewall/api/cf-firewall-rules/put/#update-multiple-rules +func (api *API) UpdateFirewallRules(ctx context.Context, rc *ResourceContainer, params []FirewallRuleUpdateParams) ([]FirewallRule, error) { + for _, firewallRule := range params { + if firewallRule.ID == "" { + return []FirewallRule{}, fmt.Errorf("firewall ID cannot be empty") + } + } + + uri := fmt.Sprintf("/zones/%s/firewall/rules", rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return []FirewallRule{}, err + } + + var firewallRulesDetailResponse FirewallRulesDetailResponse + err = json.Unmarshal(res, &firewallRulesDetailResponse) + if err != nil { + return []FirewallRule{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return firewallRulesDetailResponse.Result, nil +} + +// DeleteFirewallRule deletes a single firewall rule. +// +// API reference: https://developers.cloudflare.com/firewall/api/cf-firewall-rules/delete/#delete-a-single-rule +func (api *API) DeleteFirewallRule(ctx context.Context, rc *ResourceContainer, firewallRuleID string) error { + if firewallRuleID == "" { + return fmt.Errorf("firewall rule ID cannot be empty") + } + + uri := fmt.Sprintf("/zones/%s/firewall/rules/%s", rc.Identifier, firewallRuleID) + + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + return nil +} + +// DeleteFirewallRules deletes multiple firewall rules at once. +// +// API reference: https://developers.cloudflare.com/firewall/api/cf-firewall-rules/delete/#delete-multiple-rules +func (api *API) DeleteFirewallRules(ctx context.Context, rc *ResourceContainer, firewallRuleIDs []string) error { + v := url.Values{} + + for _, ruleID := range firewallRuleIDs { + v.Add("id", ruleID) + } + + uri := fmt.Sprintf("/zones/%s/firewall/rules?%s", rc.Identifier, v.Encode()) + + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/flake.lock b/vendor/github.com/cloudflare/cloudflare-go/flake.lock new file mode 100644 index 0000000..153b6ef --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/flake.lock @@ -0,0 +1,540 @@ +{ + "nodes": { + "builtfilter": { + "inputs": { + "flox-floxpkgs": [ + "flox-floxpkgs" + ] + }, + "locked": { + "lastModified": 1679586988, + "narHash": "sha256-TkoF4E4yN40s042YG6DaOzk+vtbzsoey0lUO+tm03is=", + "owner": "flox", + "repo": "builtfilter", + "rev": "931a381bb96d909f51fcf25d7ca4fa9dcfb12aff", + "type": "github" + }, + "original": { + "owner": "flox", + "ref": "builtfilter-rs", + "repo": "builtfilter", + "type": "github" + } + }, + "capacitor": { + "inputs": { + "nixpkgs": "nixpkgs", + "nixpkgs-lib": "nixpkgs-lib" + }, + "locked": { + "lastModified": 1675110136, + "narHash": "sha256-83n/ZLBMoIkgYGy12F1hNaqMUgJsfkno5P1+sm9liOU=", + "owner": "flox", + "repo": "capacitor", + "rev": "9d4b9bce0f439e01fe2c2b2a1bfe08592a6204c4", + "type": "github" + }, + "original": { + "owner": "flox", + "ref": "v0", + "repo": "capacitor", + "type": "github" + } + }, + "capacitor_2": { + "inputs": { + "nixpkgs": [ + "flox-floxpkgs", + "nixpkgs", + "nixpkgs" + ], + "nixpkgs-lib": "nixpkgs-lib_2" + }, + "locked": { + "lastModified": 1675110136, + "narHash": "sha256-83n/ZLBMoIkgYGy12F1hNaqMUgJsfkno5P1+sm9liOU=", + "owner": "flox", + "repo": "capacitor", + "rev": "9d4b9bce0f439e01fe2c2b2a1bfe08592a6204c4", + "type": "github" + }, + "original": { + "owner": "flox", + "repo": "capacitor", + "type": "github" + } + }, + "catalog": { + "flake": false, + "locked": { + "lastModified": 1665076737, + "narHash": "sha256-S0bD7Z434Lvm7U4VHwvmxdTMrexdr72Yk6z0ExE3j7s=", + "owner": "flox", + "repo": "floxpkgs", + "rev": "bd8326c2fea27d01933eacb922f5ae70f97140c6", + "type": "github" + }, + "original": { + "owner": "flox", + "ref": "publish", + "repo": "floxpkgs", + "type": "github" + } + }, + "commitizen-src": { + "flake": false, + "locked": { + "lastModified": 1679149521, + "narHash": "sha256-F/fbBEwG7ijHELy4RnpvlXOPkTsXFxjALdK7UIGIzMo=", + "owner": "commitizen-tools", + "repo": "commitizen", + "rev": "378a42881891633d8a81939cb46426eb36ed01aa", + "type": "github" + }, + "original": { + "owner": "commitizen-tools", + "repo": "commitizen", + "type": "github" + } + }, + "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1673956053, + "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-utils": { + "locked": { + "lastModified": 1667395993, + "narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "flox": { + "inputs": { + "commitizen-src": "commitizen-src", + "flox-bash": [ + "flox-floxpkgs", + "flox-bash" + ], + "flox-floxpkgs": [ + "flox-floxpkgs" + ], + "shellHooks": "shellHooks" + }, + "locked": { + "lastModified": 1679679655, + "narHash": "sha256-zug5lg3CqDMkmtbort6CHFJgUPyzRip0tg7t3dbSTWo=", + "ref": "main", + "rev": "98a44ab06d9f9f5e4824fd3add2d32aef95e606f", + "revCount": 427, + "type": "git", + "url": "ssh://git@github.com/flox/flox" + }, + "original": { + "ref": "main", + "type": "git", + "url": "ssh://git@github.com/flox/flox" + } + }, + "flox-bash": { + "inputs": { + "flox-floxpkgs": [ + "flox-floxpkgs" + ] + }, + "locked": { + "lastModified": 1679665116, + "narHash": "sha256-v1qgb6rOVa9q8oIlps/Z4kw7+YQqN3AsScNEgy5xwGQ=", + "ref": "main", + "rev": "db7efcacb5f5935b286006288f23f931a573a918", + "revCount": 216, + "type": "git", + "url": "ssh://git@github.com/flox/flox-bash" + }, + "original": { + "ref": "main", + "type": "git", + "url": "ssh://git@github.com/flox/flox-bash" + } + }, + "flox-floxpkgs": { + "inputs": { + "builtfilter": "builtfilter", + "capacitor": "capacitor", + "catalog": "catalog", + "flox": "flox", + "flox-bash": "flox-bash", + "nixpkgs": "nixpkgs_3", + "tracelinks": "tracelinks" + }, + "locked": { + "lastModified": 1680131458, + "narHash": "sha256-B0xvpF2h5iotKAMP13l0VtsXRAPwKQLitmxKdBSWLQQ=", + "owner": "flox", + "repo": "floxpkgs", + "rev": "6d8216d24ce8959b599107ccff8b15c959540b1a", + "type": "github" + }, + "original": { + "owner": "flox", + "repo": "floxpkgs", + "type": "github" + } + }, + "gitignore": { + "inputs": { + "nixpkgs": [ + "flox-floxpkgs", + "flox", + "shellHooks", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1660459072, + "narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=", + "owner": "hercules-ci", + "repo": "gitignore.nix", + "rev": "a20de23b925fd8264fd7fad6454652e142fd7f73", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "gitignore.nix", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1676973346, + "narHash": "sha256-rft8oGMocTAhUVqG3LW6I8K/Fo9ICGmNjRqaWTJwav0=", + "owner": "flox", + "repo": "nixpkgs", + "rev": "d0d55259081f0b97c828f38559cad899d351cad1", + "type": "github" + }, + "original": { + "owner": "flox", + "ref": "stable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs-lib": { + "locked": { + "lastModified": 1679791877, + "narHash": "sha256-tTV1Mf0hPWIMtqyU16Kd2JUBDWvfHlDC9pF57vcbgpQ=", + "owner": "nix-community", + "repo": "nixpkgs.lib", + "rev": "cc060ddbf652a532b54057081d5abd6144d01971", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "nixpkgs.lib", + "type": "github" + } + }, + "nixpkgs-lib_2": { + "locked": { + "lastModified": 1679791877, + "narHash": "sha256-tTV1Mf0hPWIMtqyU16Kd2JUBDWvfHlDC9pF57vcbgpQ=", + "owner": "nix-community", + "repo": "nixpkgs.lib", + "rev": "cc060ddbf652a532b54057081d5abd6144d01971", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "nixpkgs.lib", + "type": "github" + } + }, + "nixpkgs-stable": { + "locked": { + "lastModified": 1678872516, + "narHash": "sha256-/E1YwtMtFAu2KUQKV/1+KFuReYPANM2Rzehk84VxVoc=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "9b8e5abb18324c7fe9f07cb100c3cd4a29cda8b8", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-22.11", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs-stable_2": { + "locked": { + "lastModified": 1676973346, + "narHash": "sha256-rft8oGMocTAhUVqG3LW6I8K/Fo9ICGmNjRqaWTJwav0=", + "owner": "flox", + "repo": "nixpkgs", + "rev": "d0d55259081f0b97c828f38559cad899d351cad1", + "type": "github" + }, + "original": { + "owner": "flox", + "ref": "stable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs-staging": { + "locked": { + "lastModified": 1679262748, + "narHash": "sha256-DQCrrAFrkxijC6haUzOC5ZoFqpcv/tg2WxnyW3np1Cc=", + "owner": "flox", + "repo": "nixpkgs", + "rev": "60c1d71f2ba4c80178ec84523c2ca0801522e0a6", + "type": "github" + }, + "original": { + "owner": "flox", + "ref": "staging", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs-unstable": { + "locked": { + "lastModified": 1679944645, + "narHash": "sha256-e5Qyoe11UZjVfgRfwNoSU57ZeKuEmjYb77B9IVW7L/M=", + "owner": "flox", + "repo": "nixpkgs", + "rev": "4bb072f0a8b267613c127684e099a70e1f6ff106", + "type": "github" + }, + "original": { + "owner": "flox", + "ref": "unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_2": { + "locked": { + "lastModified": 1678898370, + "narHash": "sha256-xTICr1j+uat5hk9FyuPOFGxpWHdJRibwZC+ATi0RbtE=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "ac718d02867a84b42522a0ece52d841188208f2c", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_3": { + "inputs": { + "capacitor": "capacitor_2", + "flox": [ + "flox-floxpkgs", + "flox" + ], + "flox-bash": [ + "flox-floxpkgs", + "flox-bash" + ], + "flox-floxpkgs": [ + "flox-floxpkgs" + ], + "nixpkgs": [ + "flox-floxpkgs", + "nixpkgs", + "nixpkgs-stable" + ], + "nixpkgs-stable": "nixpkgs-stable_2", + "nixpkgs-staging": "nixpkgs-staging", + "nixpkgs-unstable": "nixpkgs-unstable", + "nixpkgs__flox__aarch64-darwin": "nixpkgs__flox__aarch64-darwin", + "nixpkgs__flox__aarch64-linux": "nixpkgs__flox__aarch64-linux", + "nixpkgs__flox__i686-linux": "nixpkgs__flox__i686-linux", + "nixpkgs__flox__x86_64-darwin": "nixpkgs__flox__x86_64-darwin", + "nixpkgs__flox__x86_64-linux": "nixpkgs__flox__x86_64-linux" + }, + "locked": { + "lastModified": 1680113891, + "narHash": "sha256-JiAmKV8ECf877cDbTum06NQ28n8FvwC4DYlCYzEVWxg=", + "owner": "flox", + "repo": "nixpkgs-flox", + "rev": "b7e7e40e2aa1ca2a44db440f5dc52213564af02f", + "type": "github" + }, + "original": { + "owner": "flox", + "repo": "nixpkgs-flox", + "type": "github" + } + }, + "nixpkgs__flox__aarch64-darwin": { + "flake": false, + "locked": { + "host": "catalog.floxsdlc.com", + "lastModified": 1680113678, + "narHash": "sha256-rCkwbly3gyvNcw21VBMXcf6EmF7crKZAS8Ylh2B5H+I=", + "owner": "flox", + "repo": "nixpkgs-flox", + "rev": "31d3b5fafdf504842e1e47a0c9b5888cf5dbae06", + "type": "github" + }, + "original": { + "host": "catalog.floxsdlc.com", + "owner": "flox", + "ref": "aarch64-darwin", + "repo": "nixpkgs-flox", + "type": "github" + } + }, + "nixpkgs__flox__aarch64-linux": { + "flake": false, + "locked": { + "host": "catalog.floxsdlc.com", + "lastModified": 1680113660, + "narHash": "sha256-ZofvvoatURsk1p5JvqajdrcO9iIT7UHjV2cb7io4FK0=", + "owner": "flox", + "repo": "nixpkgs-flox", + "rev": "8112f3ce4a227cd5088165d929761d877eea6709", + "type": "github" + }, + "original": { + "host": "catalog.floxsdlc.com", + "owner": "flox", + "ref": "aarch64-linux", + "repo": "nixpkgs-flox", + "type": "github" + } + }, + "nixpkgs__flox__i686-linux": { + "flake": false, + "locked": { + "host": "catalog.floxsdlc.com", + "lastModified": 1680113793, + "narHash": "sha256-xbjWq4lOP+K2772K8kDF17+CPoVyalqXYWyoH3x8au8=", + "owner": "flox", + "repo": "nixpkgs-flox", + "rev": "c189975540c856ac025be398761788c118ff5733", + "type": "github" + }, + "original": { + "host": "catalog.floxsdlc.com", + "owner": "flox", + "ref": "i686-linux", + "repo": "nixpkgs-flox", + "type": "github" + } + }, + "nixpkgs__flox__x86_64-darwin": { + "flake": false, + "locked": { + "host": "catalog.floxsdlc.com", + "lastModified": 1680113786, + "narHash": "sha256-HBNnTigb0MxCM6j6XmIH/0BRlUp7Rpe1et13TBf7xfM=", + "owner": "flox", + "repo": "nixpkgs-flox", + "rev": "5ab94a52855a10ad1ac0edd7277654bd587faf3e", + "type": "github" + }, + "original": { + "host": "catalog.floxsdlc.com", + "owner": "flox", + "ref": "x86_64-darwin", + "repo": "nixpkgs-flox", + "type": "github" + } + }, + "nixpkgs__flox__x86_64-linux": { + "flake": false, + "locked": { + "host": "catalog.floxsdlc.com", + "lastModified": 1680113768, + "narHash": "sha256-YnzWsglWn0TAeg2xQaVg8UFgALVU0TYZ/PdKV32EEtk=", + "owner": "flox", + "repo": "nixpkgs-flox", + "rev": "50ecffffa7c51a92a7a6bb9eb105625ed8b8f18d", + "type": "github" + }, + "original": { + "host": "catalog.floxsdlc.com", + "owner": "flox", + "ref": "x86_64-linux", + "repo": "nixpkgs-flox", + "type": "github" + } + }, + "root": { + "inputs": { + "flox-floxpkgs": "flox-floxpkgs" + } + }, + "shellHooks": { + "inputs": { + "flake-compat": "flake-compat", + "flake-utils": "flake-utils", + "gitignore": "gitignore", + "nixpkgs": "nixpkgs_2", + "nixpkgs-stable": "nixpkgs-stable" + }, + "locked": { + "lastModified": 1678976941, + "narHash": "sha256-skNr08frCwN9NO+7I77MjOHHAw+L410/37JknNld+W4=", + "owner": "cachix", + "repo": "pre-commit-hooks.nix", + "rev": "32b1dbedfd77892a6e375737ef04d8efba634e9e", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "pre-commit-hooks.nix", + "type": "github" + } + }, + "tracelinks": { + "inputs": { + "flox-floxpkgs": [ + "flox-floxpkgs" + ] + }, + "locked": { + "lastModified": 1674847293, + "narHash": "sha256-wPirp+8gIUpoAgE8zoXZalAJzCzcdDHKLEPOapJUtfs=", + "ref": "main", + "rev": "46108503f52bc2fcc948abb9b00fc65a13e5f5bd", + "revCount": 9, + "type": "git", + "url": "ssh://git@github.com/flox/tracelinks" + }, + "original": { + "ref": "main", + "type": "git", + "url": "ssh://git@github.com/flox/tracelinks" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/flake.nix b/vendor/github.com/cloudflare/cloudflare-go/flake.nix new file mode 100644 index 0000000..3fd85db --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/flake.nix @@ -0,0 +1,7 @@ +{ + description = "A flox project"; + + inputs.flox-floxpkgs.url = "github:flox/floxpkgs"; + + outputs = args @ {flox-floxpkgs, ...}: flox-floxpkgs.project args (_: {}); +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/flox.nix b/vendor/github.com/cloudflare/cloudflare-go/flox.nix new file mode 100644 index 0000000..3f82311 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/flox.nix @@ -0,0 +1,3 @@ +{ + packages.nixpkgs-flox.go_1_20 = { }; +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/gateway_categories.go b/vendor/github.com/cloudflare/cloudflare-go/gateway_categories.go new file mode 100644 index 0000000..da23a74 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/gateway_categories.go @@ -0,0 +1,54 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +// GatewayCategory represents a single gateway category. +type GatewayCategory struct { + Beta *bool `json:"beta,omitempty"` + Class string `json:"class"` + Description string `json:"description"` + ID int `json:"id"` + Name string `json:"name"` + Subcategories []GatewayCategory `json:"subcategories"` +} + +// GatewayCategoriesResponse represents the response from the list +// gateway categories endpoint. +type GatewayCategoriesResponse struct { + Success bool `json:"success"` + Result []GatewayCategory `json:"result"` + Errors []string `json:"errors"` + Messages []string `json:"messages"` + ResultInfo ResultInfo `json:"result_info"` +} + +// ListGatewayCategoriesParams represents the parameters for listing gateway categories. +type ListGatewayCategoriesParams struct { + ResultInfo +} + +// ListGatewayCategories returns all gateway categories within an account. +// +// API reference: https://developers.cloudflare.com/api/operations/zero-trust-gateway-categories-list-categories +func (api *API) ListGatewayCategories(ctx context.Context, rc *ResourceContainer, params ListGatewayCategoriesParams) ([]GatewayCategory, ResultInfo, error) { + uri := fmt.Sprintf("/accounts/%s/gateway/categories", rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []GatewayCategory{}, ResultInfo{}, err + } + + var gResponse GatewayCategoriesResponse + err = json.Unmarshal(res, &gResponse) + if err != nil { + return []GatewayCategory{}, ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return gResponse.Result, gResponse.ResultInfo, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/healthchecks.go b/vendor/github.com/cloudflare/cloudflare-go/healthchecks.go new file mode 100644 index 0000000..cd5acd1 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/healthchecks.go @@ -0,0 +1,199 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// Healthcheck describes a Healthcheck object. +type Healthcheck struct { + ID string `json:"id,omitempty"` + CreatedOn *time.Time `json:"created_on,omitempty"` + ModifiedOn *time.Time `json:"modified_on,omitempty"` + Name string `json:"name"` + Description string `json:"description"` + Suspended bool `json:"suspended"` + Address string `json:"address"` + Retries int `json:"retries,omitempty"` + Timeout int `json:"timeout,omitempty"` + Interval int `json:"interval,omitempty"` + ConsecutiveSuccesses int `json:"consecutive_successes,omitempty"` + ConsecutiveFails int `json:"consecutive_fails,omitempty"` + Type string `json:"type,omitempty"` + CheckRegions []string `json:"check_regions"` + HTTPConfig *HealthcheckHTTPConfig `json:"http_config,omitempty"` + TCPConfig *HealthcheckTCPConfig `json:"tcp_config,omitempty"` + Status string `json:"status"` + FailureReason string `json:"failure_reason"` +} + +// HealthcheckHTTPConfig describes configuration for a HTTP healthcheck. +type HealthcheckHTTPConfig struct { + Method string `json:"method"` + Port uint16 `json:"port,omitempty"` + Path string `json:"path"` + ExpectedCodes []string `json:"expected_codes"` + ExpectedBody string `json:"expected_body"` + FollowRedirects bool `json:"follow_redirects"` + AllowInsecure bool `json:"allow_insecure"` + Header map[string][]string `json:"header"` +} + +// HealthcheckTCPConfig describes configuration for a TCP healthcheck. +type HealthcheckTCPConfig struct { + Method string `json:"method"` + Port uint16 `json:"port,omitempty"` +} + +// HealthcheckListResponse is the API response, containing an array of healthchecks. +type HealthcheckListResponse struct { + Response + Result []Healthcheck `json:"result"` + ResultInfo `json:"result_info"` +} + +// HealthcheckResponse is the API response, containing a single healthcheck. +type HealthcheckResponse struct { + Response + Result Healthcheck `json:"result"` +} + +// Healthchecks returns all healthchecks for a zone. +// +// API reference: https://api.cloudflare.com/#health-checks-list-health-checks +func (api *API) Healthchecks(ctx context.Context, zoneID string) ([]Healthcheck, error) { + uri := fmt.Sprintf("/zones/%s/healthchecks", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []Healthcheck{}, err + } + var r HealthcheckListResponse + err = json.Unmarshal(res, &r) + if err != nil { + return []Healthcheck{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// Healthcheck returns a single healthcheck by ID. +// +// API reference: https://api.cloudflare.com/#health-checks-health-check-details +func (api *API) Healthcheck(ctx context.Context, zoneID, healthcheckID string) (Healthcheck, error) { + uri := fmt.Sprintf("/zones/%s/healthchecks/%s", zoneID, healthcheckID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return Healthcheck{}, err + } + var r HealthcheckResponse + err = json.Unmarshal(res, &r) + if err != nil { + return Healthcheck{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// CreateHealthcheck creates a new healthcheck in a zone. +// +// API reference: https://api.cloudflare.com/#health-checks-create-health-check +func (api *API) CreateHealthcheck(ctx context.Context, zoneID string, healthcheck Healthcheck) (Healthcheck, error) { + uri := fmt.Sprintf("/zones/%s/healthchecks", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, healthcheck) + if err != nil { + return Healthcheck{}, err + } + var r HealthcheckResponse + err = json.Unmarshal(res, &r) + if err != nil { + return Healthcheck{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// UpdateHealthcheck updates an existing healthcheck. +// +// API reference: https://api.cloudflare.com/#health-checks-update-health-check +func (api *API) UpdateHealthcheck(ctx context.Context, zoneID string, healthcheckID string, healthcheck Healthcheck) (Healthcheck, error) { + uri := fmt.Sprintf("/zones/%s/healthchecks/%s", zoneID, healthcheckID) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, healthcheck) + if err != nil { + return Healthcheck{}, err + } + var r HealthcheckResponse + err = json.Unmarshal(res, &r) + if err != nil { + return Healthcheck{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// DeleteHealthcheck deletes a healthcheck in a zone. +// +// API reference: https://api.cloudflare.com/#health-checks-delete-health-check +func (api *API) DeleteHealthcheck(ctx context.Context, zoneID string, healthcheckID string) error { + uri := fmt.Sprintf("/zones/%s/healthchecks/%s", zoneID, healthcheckID) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + var r HealthcheckResponse + err = json.Unmarshal(res, &r) + if err != nil { + return fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return nil +} + +// CreateHealthcheckPreview creates a new preview of a healthcheck in a zone. +// +// API reference: https://api.cloudflare.com/#health-checks-create-preview-health-check +func (api *API) CreateHealthcheckPreview(ctx context.Context, zoneID string, healthcheck Healthcheck) (Healthcheck, error) { + uri := fmt.Sprintf("/zones/%s/healthchecks/preview", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, healthcheck) + if err != nil { + return Healthcheck{}, err + } + var r HealthcheckResponse + err = json.Unmarshal(res, &r) + if err != nil { + return Healthcheck{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// HealthcheckPreview returns a single healthcheck preview by its ID. +// +// API reference: https://api.cloudflare.com/#health-checks-health-check-preview-details +func (api *API) HealthcheckPreview(ctx context.Context, zoneID, id string) (Healthcheck, error) { + uri := fmt.Sprintf("/zones/%s/healthchecks/preview/%s", zoneID, id) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return Healthcheck{}, err + } + var r HealthcheckResponse + err = json.Unmarshal(res, &r) + if err != nil { + return Healthcheck{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// DeleteHealthcheckPreview deletes a healthcheck preview in a zone if it exists. +// +// API reference: https://api.cloudflare.com/#health-checks-delete-preview-health-check +func (api *API) DeleteHealthcheckPreview(ctx context.Context, zoneID string, id string) error { + uri := fmt.Sprintf("/zones/%s/healthchecks/preview/%s", zoneID, id) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + var r HealthcheckResponse + err = json.Unmarshal(res, &r) + if err != nil { + return fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/hyperdrive.go b/vendor/github.com/cloudflare/cloudflare-go/hyperdrive.go new file mode 100644 index 0000000..bd1912e --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/hyperdrive.go @@ -0,0 +1,222 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +var ( + ErrMissingHyperdriveConfigID = errors.New("required hyperdrive config id is missing") + ErrMissingHyperdriveConfigName = errors.New("required hyperdrive config name is missing") + ErrMissingHyperdriveConfigOriginDatabase = errors.New("required hyperdrive config origin database is missing") + ErrMissingHyperdriveConfigOriginPassword = errors.New("required hyperdrive config origin password is missing") + ErrMissingHyperdriveConfigOriginHost = errors.New("required hyperdrive config origin host is missing") + ErrMissingHyperdriveConfigOriginScheme = errors.New("required hyperdrive config origin scheme is missing") + ErrMissingHyperdriveConfigOriginUser = errors.New("required hyperdrive config origin user is missing") +) + +type HyperdriveConfig struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Origin HyperdriveConfigOrigin `json:"origin,omitempty"` + Caching HyperdriveConfigCaching `json:"caching,omitempty"` +} + +type HyperdriveOriginType string + +type HyperdriveConfigOrigin struct { + Database string `json:"database,omitempty"` + Host string `json:"host,omitempty"` + Port int `json:"port,omitempty"` + Scheme string `json:"scheme,omitempty"` + User string `json:"user,omitempty"` + AccessClientID string `json:"access_client_id,omitempty"` +} + +type HyperdriveConfigOriginWithSecrets struct { + HyperdriveConfigOrigin + Password string `json:"password"` + AccessClientSecret string `json:"access_client_secret,omitempty"` +} + +type HyperdriveConfigCaching struct { + Disabled *bool `json:"disabled,omitempty"` + MaxAge int `json:"max_age,omitempty"` + StaleWhileRevalidate int `json:"stale_while_revalidate,omitempty"` +} + +type HyperdriveConfigListResponse struct { + Response + Result []HyperdriveConfig `json:"result"` +} + +type CreateHyperdriveConfigParams struct { + Name string `json:"name"` + Origin HyperdriveConfigOriginWithSecrets `json:"origin"` + Caching HyperdriveConfigCaching `json:"caching,omitempty"` +} + +type HyperdriveConfigResponse struct { + Response + Result HyperdriveConfig `json:"result"` +} + +type UpdateHyperdriveConfigParams struct { + HyperdriveID string `json:"-"` + Name string `json:"name"` + Origin HyperdriveConfigOriginWithSecrets `json:"origin"` + Caching HyperdriveConfigCaching `json:"caching,omitempty"` +} + +type ListHyperdriveConfigParams struct{} + +// ListHyperdriveConfigs returns the Hyperdrive configs owned by an account. +// +// API reference: https://developers.cloudflare.com/api/operations/list-hyperdrive +func (api *API) ListHyperdriveConfigs(ctx context.Context, rc *ResourceContainer, params ListHyperdriveConfigParams) ([]HyperdriveConfig, error) { + if rc.Identifier == "" { + return []HyperdriveConfig{}, ErrMissingAccountID + } + + hResponse := HyperdriveConfigListResponse{} + uri := fmt.Sprintf("/accounts/%s/hyperdrive/configs", rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []HyperdriveConfig{}, err + } + + err = json.Unmarshal(res, &hResponse) + if err != nil { + return []HyperdriveConfig{}, fmt.Errorf("failed to unmarshal filters JSON data: %w", err) + } + + return hResponse.Result, nil +} + +// CreateHyperdriveConfig creates a new Hyperdrive config. +// +// API reference: https://developers.cloudflare.com/api/operations/create-hyperdrive +func (api *API) CreateHyperdriveConfig(ctx context.Context, rc *ResourceContainer, params CreateHyperdriveConfigParams) (HyperdriveConfig, error) { + if rc.Identifier == "" { + return HyperdriveConfig{}, ErrMissingAccountID + } + + if params.Name == "" { + return HyperdriveConfig{}, ErrMissingHyperdriveConfigName + } + + uri := fmt.Sprintf("/accounts/%s/hyperdrive/configs", rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return HyperdriveConfig{}, err + } + + var r HyperdriveConfigResponse + err = json.Unmarshal(res, &r) + if err != nil { + return HyperdriveConfig{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// DeleteHyperdriveConfig deletes a Hyperdrive config. +// +// API reference: https://developers.cloudflare.com/api/operations/delete-hyperdrive +func (api *API) DeleteHyperdriveConfig(ctx context.Context, rc *ResourceContainer, hyperdriveID string) error { + if rc.Identifier == "" { + return ErrMissingAccountID + } + if hyperdriveID == "" { + return ErrMissingHyperdriveConfigID + } + + uri := fmt.Sprintf("/accounts/%s/hyperdrive/configs/%s", rc.Identifier, hyperdriveID) + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + return nil +} + +// GetHyperdriveConfig returns a single Hyperdrive config based on the ID. +// +// API reference: https://developers.cloudflare.com/api/operations/get-hyperdrive +func (api *API) GetHyperdriveConfig(ctx context.Context, rc *ResourceContainer, hyperdriveID string) (HyperdriveConfig, error) { + if rc.Identifier == "" { + return HyperdriveConfig{}, ErrMissingAccountID + } + + if hyperdriveID == "" { + return HyperdriveConfig{}, ErrMissingHyperdriveConfigID + } + + uri := fmt.Sprintf("/accounts/%s/hyperdrive/configs/%s", rc.Identifier, hyperdriveID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return HyperdriveConfig{}, err + } + + var r HyperdriveConfigResponse + err = json.Unmarshal(res, &r) + if err != nil { + return HyperdriveConfig{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// UpdateHyperdriveConfig updates a Hyperdrive config. +// +// API reference: https://developers.cloudflare.com/api/operations/update-hyperdrive +func (api *API) UpdateHyperdriveConfig(ctx context.Context, rc *ResourceContainer, params UpdateHyperdriveConfigParams) (HyperdriveConfig, error) { + if rc.Identifier == "" { + return HyperdriveConfig{}, ErrMissingAccountID + } + + if params.HyperdriveID == "" { + return HyperdriveConfig{}, ErrMissingHyperdriveConfigID + } + + if params.Origin.Database == "" { + return HyperdriveConfig{}, ErrMissingHyperdriveConfigOriginDatabase + } + + if params.Origin.Password == "" { + return HyperdriveConfig{}, ErrMissingHyperdriveConfigOriginPassword + } + + if params.Origin.Host == "" { + return HyperdriveConfig{}, ErrMissingHyperdriveConfigOriginHost + } + + if params.Origin.Scheme == "" { + return HyperdriveConfig{}, ErrMissingHyperdriveConfigOriginScheme + } + + if params.Origin.User == "" { + return HyperdriveConfig{}, ErrMissingHyperdriveConfigOriginUser + } + + uri := fmt.Sprintf("/accounts/%s/hyperdrive/configs/%s", rc.Identifier, params.HyperdriveID) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return HyperdriveConfig{}, err + } + + var r HyperdriveConfigResponse + err = json.Unmarshal(res, &r) + if err != nil { + return HyperdriveConfig{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/images.go b/vendor/github.com/cloudflare/cloudflare-go/images.go new file mode 100644 index 0000000..35208ea --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/images.go @@ -0,0 +1,401 @@ +package cloudflare + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "mime/multipart" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +var ( + ErrInvalidImagesAPIVersion = errors.New("invalid images API version") + ErrMissingImageID = errors.New("required image ID missing") +) + +type ImagesAPIVersion string + +const ( + ImagesAPIVersionV1 ImagesAPIVersion = "v1" + ImagesAPIVersionV2 ImagesAPIVersion = "v2" +) + +// Image represents a Cloudflare Image. +type Image struct { + ID string `json:"id"` + Filename string `json:"filename"` + Meta map[string]interface{} `json:"meta,omitempty"` + RequireSignedURLs bool `json:"requireSignedURLs"` + Variants []string `json:"variants"` + Uploaded time.Time `json:"uploaded"` +} + +// UploadImageParams is the data required for an Image Upload request. +type UploadImageParams struct { + File io.ReadCloser + URL string + Name string + RequireSignedURLs bool + Metadata map[string]interface{} +} + +// write writes the image upload data to a multipart writer, so +// it can be used in an HTTP request. +func (b UploadImageParams) write(mpw *multipart.Writer) error { + if b.File == nil && b.URL == "" { + return errors.New("a file or url to upload must be specified") + } + + if b.File != nil { + name := b.Name + part, err := mpw.CreateFormFile("file", name) + if err != nil { + return err + } + _, err = io.Copy(part, b.File) + if err != nil { + _ = b.File.Close() + return err + } + _ = b.File.Close() + } + + if b.URL != "" { + err := mpw.WriteField("url", b.URL) + if err != nil { + return err + } + } + + // According to the Cloudflare docs, this field defaults to false. + // For simplicity, we will only send it if the value is true, however + // if the default is changed to true, this logic will need to be updated. + if b.RequireSignedURLs { + err := mpw.WriteField("requireSignedURLs", "true") + if err != nil { + return err + } + } + + if b.Metadata != nil { + part, err := mpw.CreateFormField("metadata") + if err != nil { + return err + } + err = json.NewEncoder(part).Encode(b.Metadata) + if err != nil { + return err + } + } + + return nil +} + +// UpdateImageParams is the data required for an UpdateImage request. +type UpdateImageParams struct { + ID string `json:"-"` + RequireSignedURLs bool `json:"requireSignedURLs"` + Metadata map[string]interface{} `json:"metadata,omitempty"` +} + +// CreateImageDirectUploadURLParams is the data required for a CreateImageDirectUploadURL request. +type CreateImageDirectUploadURLParams struct { + Version ImagesAPIVersion `json:"-"` + Expiry *time.Time `json:"expiry,omitempty"` + Metadata map[string]interface{} `json:"metadata,omitempty"` + RequireSignedURLs *bool `json:"requireSignedURLs,omitempty"` +} + +// ImageDirectUploadURLResponse is the API response for a direct image upload url. +type ImageDirectUploadURLResponse struct { + Result ImageDirectUploadURL `json:"result"` + Response +} + +// ImageDirectUploadURL . +type ImageDirectUploadURL struct { + ID string `json:"id"` + UploadURL string `json:"uploadURL"` +} + +// ImagesListResponse is the API response for listing all images. +type ImagesListResponse struct { + Result struct { + Images []Image `json:"images"` + } `json:"result"` + Response +} + +// ImageDetailsResponse is the API response for getting an image's details. +type ImageDetailsResponse struct { + Result Image `json:"result"` + Response +} + +// ImagesStatsResponse is the API response for image stats. +type ImagesStatsResponse struct { + Result struct { + Count ImagesStatsCount `json:"count"` + } `json:"result"` + Response +} + +// ImagesStatsCount is the stats attached to a ImagesStatsResponse. +type ImagesStatsCount struct { + Current int64 `json:"current"` + Allowed int64 `json:"allowed"` +} + +type ListImagesParams struct { + ResultInfo +} + +// UploadImage uploads a single image. +// +// API Reference: https://api.cloudflare.com/#cloudflare-images-upload-an-image-using-a-single-http-request +func (api *API) UploadImage(ctx context.Context, rc *ResourceContainer, params UploadImageParams) (Image, error) { + if rc.Level != AccountRouteLevel { + return Image{}, ErrRequiredAccountLevelResourceContainer + } + + if params.File != nil && params.URL != "" { + return Image{}, errors.New("file and url uploads are mutually exclusive and can only be performed individually") + } + + uri := fmt.Sprintf("/accounts/%s/images/v1", rc.Identifier) + + body := &bytes.Buffer{} + w := multipart.NewWriter(body) + if err := params.write(w); err != nil { + _ = w.Close() + return Image{}, fmt.Errorf("error writing multipart body: %w", err) + } + _ = w.Close() + + res, err := api.makeRequestContextWithHeaders( + ctx, + http.MethodPost, + uri, + body, + http.Header{ + "Accept": []string{"application/json"}, + "Content-Type": []string{w.FormDataContentType()}, + }, + ) + if err != nil { + return Image{}, err + } + + var imageDetailsResponse ImageDetailsResponse + err = json.Unmarshal(res, &imageDetailsResponse) + if err != nil { + return Image{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return imageDetailsResponse.Result, nil +} + +// UpdateImage updates an existing image's metadata. +// +// API Reference: https://api.cloudflare.com/#cloudflare-images-update-image +func (api *API) UpdateImage(ctx context.Context, rc *ResourceContainer, params UpdateImageParams) (Image, error) { + if rc.Level != AccountRouteLevel { + return Image{}, ErrRequiredAccountLevelResourceContainer + } + + uri := fmt.Sprintf("/accounts/%s/images/v1/%s", rc.Identifier, params.ID) + + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, params) + if err != nil { + return Image{}, err + } + + var imageDetailsResponse ImageDetailsResponse + err = json.Unmarshal(res, &imageDetailsResponse) + if err != nil { + return Image{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return imageDetailsResponse.Result, nil +} + +var imagesMultipartBoundary = "----CloudflareImagesGoClientBoundary" + +// CreateImageDirectUploadURL creates an authenticated direct upload url. +// +// API Reference: https://api.cloudflare.com/#cloudflare-images-create-authenticated-direct-upload-url +func (api *API) CreateImageDirectUploadURL(ctx context.Context, rc *ResourceContainer, params CreateImageDirectUploadURLParams) (ImageDirectUploadURL, error) { + if rc.Level != AccountRouteLevel { + return ImageDirectUploadURL{}, ErrRequiredAccountLevelResourceContainer + } + + if params.Version != "" && params.Version != ImagesAPIVersionV1 && params.Version != ImagesAPIVersionV2 { + return ImageDirectUploadURL{}, ErrInvalidImagesAPIVersion + } + + var err error + var uri string + var res []byte + switch params.Version { + case ImagesAPIVersionV2: + uri = fmt.Sprintf("/%s/%s/images/%s/direct_upload", rc.Level, rc.Identifier, params.Version) + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + if err := writer.SetBoundary(imagesMultipartBoundary); err != nil { + return ImageDirectUploadURL{}, fmt.Errorf("error setting multipart boundary") + } + + if *params.RequireSignedURLs { + if err = writer.WriteField("requireSignedURLs", "true"); err != nil { + return ImageDirectUploadURL{}, fmt.Errorf("error writing requireSignedURLs field: %w", err) + } + } + if !params.Expiry.IsZero() { + if err = writer.WriteField("expiry", params.Expiry.Format(time.RFC3339)); err != nil { + return ImageDirectUploadURL{}, fmt.Errorf("error writing expiry field: %w", err) + } + } + if params.Metadata != nil { + var metadataBytes []byte + if metadataBytes, err = json.Marshal(params.Metadata); err != nil { + return ImageDirectUploadURL{}, fmt.Errorf("error marshalling metadata to JSON: %w", err) + } + if err = writer.WriteField("metadata", string(metadataBytes)); err != nil { + return ImageDirectUploadURL{}, fmt.Errorf("error writing metadata field: %w", err) + } + } + if err = writer.Close(); err != nil { + return ImageDirectUploadURL{}, fmt.Errorf("error closing multipart writer: %w", err) + } + + res, err = api.makeRequestContextWithHeaders( + ctx, + http.MethodPost, + uri, + body, + http.Header{ + "Accept": []string{"application/json"}, + "Content-Type": []string{writer.FormDataContentType()}, + }, + ) + case ImagesAPIVersionV1: + case "": + uri = fmt.Sprintf("/%s/%s/images/%s/direct_upload", rc.Level, rc.Identifier, ImagesAPIVersionV1) + res, err = api.makeRequestContext(ctx, http.MethodPost, uri, params) + default: + return ImageDirectUploadURL{}, ErrInvalidImagesAPIVersion + } + + if err != nil { + return ImageDirectUploadURL{}, err + } + + var imageDirectUploadURLResponse ImageDirectUploadURLResponse + err = json.Unmarshal(res, &imageDirectUploadURLResponse) + if err != nil { + return ImageDirectUploadURL{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return imageDirectUploadURLResponse.Result, nil +} + +// ListImages lists all images. +// +// API Reference: https://api.cloudflare.com/#cloudflare-images-list-images +func (api *API) ListImages(ctx context.Context, rc *ResourceContainer, params ListImagesParams) ([]Image, error) { + uri := buildURI(fmt.Sprintf("/accounts/%s/images/v1", rc.Identifier), params) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []Image{}, err + } + + var imagesListResponse ImagesListResponse + err = json.Unmarshal(res, &imagesListResponse) + if err != nil { + return []Image{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return imagesListResponse.Result.Images, nil +} + +// GetImage gets the details of an uploaded image. +// +// API Reference: https://api.cloudflare.com/#cloudflare-images-image-details +func (api *API) GetImage(ctx context.Context, rc *ResourceContainer, id string) (Image, error) { + if rc.Level != AccountRouteLevel { + return Image{}, ErrRequiredAccountLevelResourceContainer + } + + uri := fmt.Sprintf("/accounts/%s/images/v1/%s", rc.Identifier, id) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return Image{}, err + } + + var imageDetailsResponse ImageDetailsResponse + err = json.Unmarshal(res, &imageDetailsResponse) + if err != nil { + return Image{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return imageDetailsResponse.Result, nil +} + +// GetBaseImage gets the base image used to derive variants. +// +// API Reference: https://api.cloudflare.com/#cloudflare-images-base-image +func (api *API) GetBaseImage(ctx context.Context, rc *ResourceContainer, id string) ([]byte, error) { + if rc.Level != AccountRouteLevel { + return []byte{}, ErrRequiredAccountLevelResourceContainer + } + + uri := fmt.Sprintf("/accounts/%s/images/v1/%s/blob", rc.Identifier, id) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + return res, nil +} + +// DeleteImage deletes an image. +// +// API Reference: https://api.cloudflare.com/#cloudflare-images-delete-image +func (api *API) DeleteImage(ctx context.Context, rc *ResourceContainer, id string) error { + if rc.Level != AccountRouteLevel { + return ErrRequiredAccountLevelResourceContainer + } + + uri := fmt.Sprintf("/accounts/%s/images/v1/%s", rc.Identifier, id) + + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + return nil +} + +// GetImagesStats gets an account's statistics for Cloudflare Images. +// +// API Reference: https://api.cloudflare.com/#cloudflare-images-images-usage-statistics +func (api *API) GetImagesStats(ctx context.Context, rc *ResourceContainer) (ImagesStatsCount, error) { + if rc.Level != AccountRouteLevel { + return ImagesStatsCount{}, ErrRequiredAccountLevelResourceContainer + } + + uri := fmt.Sprintf("/accounts/%s/images/v1/stats", rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return ImagesStatsCount{}, err + } + + var imagesStatsResponse ImagesStatsResponse + err = json.Unmarshal(res, &imagesStatsResponse) + if err != nil { + return ImagesStatsCount{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return imagesStatsResponse.Result.Count, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/images_variants.go b/vendor/github.com/cloudflare/cloudflare-go/images_variants.go new file mode 100644 index 0000000..2949551 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/images_variants.go @@ -0,0 +1,163 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +type ImagesVariant struct { + ID string `json:"id,omitempty"` + NeverRequireSignedURLs *bool `json:"neverRequireSignedURLs,omitempty"` + Options ImagesVariantsOptions `json:"options,omitempty"` +} + +type ImagesVariantsOptions struct { + Fit string `json:"fit,omitempty"` + Height int `json:"height,omitempty"` + Metadata string `json:"metadata,omitempty"` + Width int `json:"width,omitempty"` +} + +type ListImageVariantsParams struct{} + +type ListImagesVariantsResponse struct { + Result ListImageVariantsResult `json:"result,omitempty"` + Response +} + +type ListImageVariantsResult struct { + ImagesVariants map[string]ImagesVariant `json:"variants,omitempty"` +} + +type CreateImagesVariantParams struct { + ID string `json:"id,omitempty"` + NeverRequireSignedURLs *bool `json:"neverRequireSignedURLs,omitempty"` + Options ImagesVariantsOptions `json:"options,omitempty"` +} + +type UpdateImagesVariantParams struct { + ID string `json:"-"` + NeverRequireSignedURLs *bool `json:"neverRequireSignedURLs,omitempty"` + Options ImagesVariantsOptions `json:"options,omitempty"` +} + +type ImagesVariantResult struct { + Variant ImagesVariant `json:"variant,omitempty"` +} + +type ImagesVariantResponse struct { + Result ImagesVariantResult `json:"result,omitempty"` + Response +} + +// Lists existing variants. +// +// API Reference: https://developers.cloudflare.com/api/operations/cloudflare-images-variants-list-variants +func (api *API) ListImagesVariants(ctx context.Context, rc *ResourceContainer, params ListImageVariantsParams) (ListImageVariantsResult, error) { + if rc.Identifier == "" { + return ListImageVariantsResult{}, ErrMissingAccountID + } + + baseURL := fmt.Sprintf("/accounts/%s/images/v1/variants", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodGet, baseURL, nil) + if err != nil { + return ListImageVariantsResult{}, err + } + + var listImageVariantsResponse ListImagesVariantsResponse + err = json.Unmarshal(res, &listImageVariantsResponse) + if err != nil { + return ListImageVariantsResult{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return listImageVariantsResponse.Result, nil +} + +// Fetch details for a single variant. +// +// API Reference: https://developers.cloudflare.com/api/operations/cloudflare-images-variants-variant-details +func (api *API) GetImagesVariant(ctx context.Context, rc *ResourceContainer, variantID string) (ImagesVariant, error) { + if rc.Identifier == "" { + return ImagesVariant{}, ErrMissingAccountID + } + + baseURL := fmt.Sprintf("/accounts/%s/images/v1/variants/%s", rc.Identifier, variantID) + res, err := api.makeRequestContext(ctx, http.MethodGet, baseURL, nil) + if err != nil { + return ImagesVariant{}, err + } + + var imagesVariantDetailResponse ImagesVariantResponse + err = json.Unmarshal(res, &imagesVariantDetailResponse) + if err != nil { + return ImagesVariant{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return imagesVariantDetailResponse.Result.Variant, nil +} + +// Specify variants that allow you to resize images for different use cases. +// +// API Reference: https://developers.cloudflare.com/api/operations/cloudflare-images-variants-create-a-variant +func (api *API) CreateImagesVariant(ctx context.Context, rc *ResourceContainer, params CreateImagesVariantParams) (ImagesVariant, error) { + if rc.Identifier == "" { + return ImagesVariant{}, ErrMissingAccountID + } + + baseURL := fmt.Sprintf("/accounts/%s/images/v1/variants", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPost, baseURL, params) + if err != nil { + return ImagesVariant{}, err + } + + var createImagesVariantResponse ImagesVariantResponse + err = json.Unmarshal(res, &createImagesVariantResponse) + if err != nil { + return ImagesVariant{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return createImagesVariantResponse.Result.Variant, nil +} + +// Deleting a variant purges the cache for all images associated with the variant. +// +// API Reference: https://developers.cloudflare.com/api/operations/cloudflare-images-variants-variant-details +func (api *API) DeleteImagesVariant(ctx context.Context, rc *ResourceContainer, variantID string) error { + if rc.Identifier == "" { + return ErrMissingAccountID + } + + baseURL := fmt.Sprintf("/accounts/%s/images/v1/variants/%s", rc.Identifier, variantID) + _, err := api.makeRequestContext(ctx, http.MethodDelete, baseURL, nil) + if err != nil { + return fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + return nil +} + +// Updating a variant purges the cache for all images associated with the variant. +// +// API Reference: https://developers.cloudflare.com/api/operations/cloudflare-images-variants-variant-details +func (api *API) UpdateImagesVariant(ctx context.Context, rc *ResourceContainer, params UpdateImagesVariantParams) (ImagesVariant, error) { + if rc.Identifier == "" { + return ImagesVariant{}, ErrMissingAccountID + } + + baseURL := fmt.Sprintf("/accounts/%s/images/v1/variants/%s", rc.Identifier, params.ID) + res, err := api.makeRequestContext(ctx, http.MethodPatch, baseURL, params) + if err != nil { + return ImagesVariant{}, err + } + + var imagesVariantDetailResponse ImagesVariantResponse + err = json.Unmarshal(res, &imagesVariantDetailResponse) + if err != nil { + return ImagesVariant{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return imagesVariantDetailResponse.Result.Variant, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/intelligence_asn.go b/vendor/github.com/cloudflare/cloudflare-go/intelligence_asn.go new file mode 100644 index 0000000..f66cdff --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/intelligence_asn.go @@ -0,0 +1,101 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +// ErrMissingASN is for when ASN is required but not set. +var ErrMissingASN = errors.New("required asn missing") + +// ASNInfo represents ASN information. +type ASNInfo struct { + ASN int `json:"asn"` + Description string `json:"description"` + Country string `json:"country"` + Type string `json:"type"` + DomainCount int `json:"domain_count"` + TopDomains []string `json:"top_domains"` +} + +// IntelligenceASNOverviewParameters represents parameters for an ASN request. +type IntelligenceASNOverviewParameters struct { + AccountID string + ASN int +} + +// IntelligenceASNResponse represents an API response for ASN info. +type IntelligenceASNResponse struct { + Response + Result []ASNInfo `json:"result,omitempty"` +} + +// IntelligenceASNSubnetsParameters represents parameters for an ASN subnet request. +type IntelligenceASNSubnetsParameters struct { + AccountID string + ASN int +} + +// IntelligenceASNSubnetResponse represents an ASN subnet API response. +type IntelligenceASNSubnetResponse struct { + ASN int `json:"asn,omitempty"` + IPCountTotal int `json:"ip_count_total,omitempty"` + Subnets []string `json:"subnets,omitempty"` + Count int `json:"count,omitempty"` + Page int `json:"page,omitempty"` + PerPage int `json:"per_page,omitempty"` +} + +// IntelligenceASNOverview get overview for an ASN number +// +// API Reference: https://api.cloudflare.com/#asn-intelligence-get-asn-overview +func (api *API) IntelligenceASNOverview(ctx context.Context, params IntelligenceASNOverviewParameters) ([]ASNInfo, error) { + if params.AccountID == "" { + return []ASNInfo{}, ErrMissingAccountID + } + + if params.ASN == 0 { + return []ASNInfo{}, ErrMissingASN + } + + uri := fmt.Sprintf("/accounts/%s/intel/asn/%d", params.AccountID, params.ASN) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []ASNInfo{}, err + } + + var asnInfoResponse IntelligenceASNResponse + if err := json.Unmarshal(res, &asnInfoResponse); err != nil { + return []ASNInfo{}, err + } + return asnInfoResponse.Result, nil +} + +// IntelligenceASNSubnets gets all subnets of an ASN +// +// API Reference: https://api.cloudflare.com/#asn-intelligence-get-asn-subnets +func (api *API) IntelligenceASNSubnets(ctx context.Context, params IntelligenceASNSubnetsParameters) (IntelligenceASNSubnetResponse, error) { + if params.AccountID == "" { + return IntelligenceASNSubnetResponse{}, ErrMissingAccountID + } + + if params.ASN == 0 { + return IntelligenceASNSubnetResponse{}, ErrMissingASN + } + + uri := fmt.Sprintf("/accounts/%s/intel/asn/%d/subnets", params.AccountID, params.ASN) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return IntelligenceASNSubnetResponse{}, err + } + + var intelligenceASNSubnetResponse IntelligenceASNSubnetResponse + if err := json.Unmarshal(res, &intelligenceASNSubnetResponse); err != nil { + return IntelligenceASNSubnetResponse{}, err + } + return intelligenceASNSubnetResponse, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/intelligence_domain.go b/vendor/github.com/cloudflare/cloudflare-go/intelligence_domain.go new file mode 100644 index 0000000..b12b2c8 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/intelligence_domain.go @@ -0,0 +1,177 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +// ErrMissingDomain is for when domain is needed but not given. +var ErrMissingDomain = errors.New("required domain missing") + +// DomainDetails represents details for a domain. +type DomainDetails struct { + Domain string `json:"domain"` + ResolvesToRefs []ResolvesToRefs `json:"resolves_to_refs"` + PopularityRank int `json:"popularity_rank"` + Application Application `json:"application"` + RiskTypes []interface{} `json:"risk_types"` + ContentCategories []ContentCategories `json:"content_categories"` + AdditionalInformation AdditionalInformation `json:"additional_information"` +} + +// ResolvesToRefs what a domain resolves to. +type ResolvesToRefs struct { + ID string `json:"id"` + Value string `json:"value"` +} + +type Application struct { + ID int `json:"id"` + Name string `json:"name"` +} + +// ContentCategories represents the categories for a domain. +type ContentCategories struct { + ID int `json:"id"` + SuperCategoryID int `json:"super_category_id"` + Name string `json:"name"` +} + +// AdditionalInformation represents any additional information for a domain. +type AdditionalInformation struct { + SuspectedMalwareFamily string `json:"suspected_malware_family"` +} + +// DomainHistory represents the history for a domain. +type DomainHistory struct { + Domain string `json:"domain"` + Categorizations []Categorizations `json:"categorizations"` +} + +// Categories represents categories for a domain. +type Categories struct { + ID int `json:"id"` + Name string `json:"name"` +} + +// Categorizations represents the categories and when those categories were set. +type Categorizations struct { + Categories []Categories `json:"categories"` + Start string `json:"start"` + End string `json:"end"` +} + +// GetDomainDetailsParameters represent the parameters for a domain details request. +type GetDomainDetailsParameters struct { + AccountID string `url:"-"` + Domain string `url:"domain,omitempty"` +} + +// DomainDetailsResponse represents an API response for domain details. +type DomainDetailsResponse struct { + Response + Result DomainDetails `json:"result,omitempty"` +} + +// GetBulkDomainDetailsParameters represents the parameters for bulk domain details request. +type GetBulkDomainDetailsParameters struct { + AccountID string `url:"-"` + Domains []string `url:"domain"` +} + +// GetBulkDomainDetailsResponse represents an API response for bulk domain details. +type GetBulkDomainDetailsResponse struct { + Response + Result []DomainDetails `json:"result,omitempty"` +} + +// GetDomainHistoryParameters represents the parameters for domain history request. +type GetDomainHistoryParameters struct { + AccountID string `url:"-"` + Domain string `url:"domain,omitempty"` +} + +// GetDomainHistoryResponse represents an API response for domain history. +type GetDomainHistoryResponse struct { + Response + Result []DomainHistory `json:"result,omitempty"` +} + +// IntelligenceDomainDetails gets domain information. +// +// API Reference: https://api.cloudflare.com/#domain-intelligence-get-domain-details +func (api *API) IntelligenceDomainDetails(ctx context.Context, params GetDomainDetailsParameters) (DomainDetails, error) { + if params.AccountID == "" { + return DomainDetails{}, ErrMissingAccountID + } + + if params.Domain == "" { + return DomainDetails{}, ErrMissingDomain + } + + uri := buildURI(fmt.Sprintf("/accounts/%s/intel/domain", params.AccountID), params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return DomainDetails{}, err + } + + var domainDetails DomainDetailsResponse + if err := json.Unmarshal(res, &domainDetails); err != nil { + return DomainDetails{}, err + } + return domainDetails.Result, nil +} + +// IntelligenceBulkDomainDetails gets domain information for a list of domains. +// +// API Reference: https://api.cloudflare.com/#domain-intelligence-get-multiple-domain-details +func (api *API) IntelligenceBulkDomainDetails(ctx context.Context, params GetBulkDomainDetailsParameters) ([]DomainDetails, error) { + if params.AccountID == "" { + return []DomainDetails{}, ErrMissingAccountID + } + + if len(params.Domains) == 0 { + return []DomainDetails{}, ErrMissingDomain + } + + uri := buildURI(fmt.Sprintf("/accounts/%s/intel/domain/bulk", params.AccountID), params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []DomainDetails{}, err + } + + var domainDetails GetBulkDomainDetailsResponse + if err := json.Unmarshal(res, &domainDetails); err != nil { + return []DomainDetails{}, err + } + return domainDetails.Result, nil +} + +// IntelligenceDomainHistory get domain history for given domain +// +// API Reference: https://api.cloudflare.com/#domain-history-get-domain-history +func (api *API) IntelligenceDomainHistory(ctx context.Context, params GetDomainHistoryParameters) ([]DomainHistory, error) { + if params.AccountID == "" { + return []DomainHistory{}, ErrMissingAccountID + } + + if params.Domain == "" { + return []DomainHistory{}, ErrMissingDomain + } + + uri := buildURI(fmt.Sprintf("/accounts/%s/intel/domain-history", params.AccountID), params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []DomainHistory{}, err + } + + var domainDetails GetDomainHistoryResponse + if err := json.Unmarshal(res, &domainDetails); err != nil { + return []DomainHistory{}, err + } + return domainDetails.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/intelligence_ip.go b/vendor/github.com/cloudflare/cloudflare-go/intelligence_ip.go new file mode 100644 index 0000000..47c74b3 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/intelligence_ip.go @@ -0,0 +1,156 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +// IPIntelligence represents IP intelligence information. +type IPIntelligence struct { + IP string `json:"ip"` + BelongsToRef BelongsToRef `json:"belongs_to_ref"` + RiskTypes []RiskTypes `json:"risk_types"` +} + +// BelongsToRef represents information about who owns an IP address. +type BelongsToRef struct { + ID string `json:"id"` + Value int `json:"value"` + Type string `json:"type"` + Country string `json:"country"` + Description string `json:"description"` +} + +// RiskTypes represent risk types for an IP. +type RiskTypes struct { + ID int `json:"id"` + SuperCategoryID int `json:"super_category_id"` + Name string `json:"name"` +} + +// IPPassiveDNS represent DNS response. +type IPPassiveDNS struct { + ReverseRecords []ReverseRecords `json:"reverse_records,omitempty"` + Count int `json:"count,omitempty"` + Page int `json:"page,omitempty"` + PerPage int `json:"per_page,omitempty"` +} + +// ReverseRecords represent records for passive DNS. +type ReverseRecords struct { + FirstSeen string `json:"first_seen,omitempty"` + LastSeen string `json:"last_seen,omitempty"` + Hostname string `json:"hostname,omitempty"` +} + +// IPIntelligenceParameters represents parameters for an IP Intelligence request. +type IPIntelligenceParameters struct { + AccountID string `url:"-"` + IPv4 string `url:"ipv4,omitempty"` + IPv6 string `url:"ipv6,omitempty"` +} + +// IPIntelligenceResponse represents an IP Intelligence API response. +type IPIntelligenceResponse struct { + Response + Result []IPIntelligence `json:"result,omitempty"` +} + +// IPIntelligenceListParameters represents the parameters for an IP list request. +type IPIntelligenceListParameters struct { + AccountID string +} + +// IPIntelligenceItem represents an item in an IP list. +type IPIntelligenceItem struct { + ID int `json:"id,omitempty"` + Name string `json:"name,omitempty"` +} + +// IPIntelligenceListResponse represents the response for an IP list API response. +type IPIntelligenceListResponse struct { + Response + Result []IPIntelligenceItem `json:"result,omitempty"` +} + +// IPIntelligencePassiveDNSParameters represents the parameters for a passive DNS request. +type IPIntelligencePassiveDNSParameters struct { + AccountID string `url:"-"` + IPv4 string `url:"ipv4,omitempty"` + Start string `url:"start,omitempty"` + End string `url:"end,omitempty"` + Page int `url:"page,omitempty"` + PerPage int `url:"per_page,omitempty"` +} + +// IPIntelligencePassiveDNSResponse represents a passive API response. +type IPIntelligencePassiveDNSResponse struct { + Response + Result IPPassiveDNS `json:"result,omitempty"` +} + +// IntelligenceGetIPOverview gets information about ipv4 or ipv6 address. +// +// API Reference: https://api.cloudflare.com/#ip-intelligence-get-ip-overview +func (api *API) IntelligenceGetIPOverview(ctx context.Context, params IPIntelligenceParameters) ([]IPIntelligence, error) { + if params.AccountID == "" { + return []IPIntelligence{}, ErrMissingAccountID + } + + uri := buildURI(fmt.Sprintf("/accounts/%s/intel/ip", params.AccountID), params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []IPIntelligence{}, err + } + + var ipDetails IPIntelligenceResponse + if err := json.Unmarshal(res, &ipDetails); err != nil { + return []IPIntelligence{}, err + } + return ipDetails.Result, nil +} + +// IntelligenceGetIPList gets intelligence ip-lists. +// +// API Reference: https://api.cloudflare.com/#ip-list-get-ip-lists +func (api *API) IntelligenceGetIPList(ctx context.Context, params IPIntelligenceListParameters) ([]IPIntelligenceItem, error) { + if params.AccountID == "" { + return []IPIntelligenceItem{}, ErrMissingAccountID + } + + uri := fmt.Sprintf("/accounts/%s/intel/ip-list", params.AccountID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []IPIntelligenceItem{}, err + } + + var ipListItem IPIntelligenceListResponse + if err := json.Unmarshal(res, &ipListItem); err != nil { + return []IPIntelligenceItem{}, err + } + return ipListItem.Result, nil +} + +// IntelligencePassiveDNS gets a history of DNS for an ip. +// +// API Reference: https://api.cloudflare.com/#passive-dns-by-ip-get-passive-dns-by-ip +func (api *API) IntelligencePassiveDNS(ctx context.Context, params IPIntelligencePassiveDNSParameters) (IPPassiveDNS, error) { + if params.AccountID == "" { + return IPPassiveDNS{}, ErrMissingAccountID + } + + uri := buildURI(fmt.Sprintf("/accounts/%s/intel/dns", params.AccountID), params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return IPPassiveDNS{}, err + } + + var passiveDNS IPIntelligencePassiveDNSResponse + if err := json.Unmarshal(res, &passiveDNS); err != nil { + return IPPassiveDNS{}, err + } + return passiveDNS.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/intelligence_phishing.go b/vendor/github.com/cloudflare/cloudflare-go/intelligence_phishing.go new file mode 100644 index 0000000..ceb3f49 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/intelligence_phishing.go @@ -0,0 +1,52 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +// PhishingScan represent information about a phishing scan. +type PhishingScan struct { + URL string `json:"url"` + Phishing bool `json:"phishing"` + Verified bool `json:"verified"` + Score float64 `json:"score"` + Classifier string `json:"classifier"` +} + +// PhishingScanParameters represent parameters for a phishing scan request. +type PhishingScanParameters struct { + AccountID string `url:"-"` + URL string `url:"url,omitempty"` + Skip bool `url:"skip,omitempty"` +} + +// PhishingScanResponse represent an API response for a phishing scan. +type PhishingScanResponse struct { + Response + Result PhishingScan `json:"result,omitempty"` +} + +// IntelligencePhishingScan scans a URL for suspected phishing +// +// API Reference: https://api.cloudflare.com/#phishing-url-scanner-scan-suspicious-url +func (api *API) IntelligencePhishingScan(ctx context.Context, params PhishingScanParameters) (PhishingScan, error) { + if params.AccountID == "" { + return PhishingScan{}, ErrMissingAccountID + } + + uri := buildURI(fmt.Sprintf("/accounts/%s/intel-phishing/predict", params.AccountID), params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return PhishingScan{}, err + } + + var phishingScanResponse PhishingScanResponse + if err := json.Unmarshal(res, &phishingScanResponse); err != nil { + return PhishingScan{}, err + } + return phishingScanResponse.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/intelligence_whois.go b/vendor/github.com/cloudflare/cloudflare-go/intelligence_whois.go new file mode 100644 index 0000000..72a3a61 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/intelligence_whois.go @@ -0,0 +1,60 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +// WHOIS represents whois information. +type WHOIS struct { + Domain string `json:"domain,omitempty"` + CreatedDate string `json:"created_date,omitempty"` + UpdatedDate string `json:"updated_date,omitempty"` + Registrant string `json:"registrant,omitempty"` + RegistrantOrg string `json:"registrant_org,omitempty"` + RegistrantCountry string `json:"registrant_country,omitempty"` + RegistrantEmail string `json:"registrant_email,omitempty"` + Registrar string `json:"registrar,omitempty"` + Nameservers []string `json:"nameservers,omitempty"` +} + +// WHOISParameters represents parameters for a who is request. +type WHOISParameters struct { + AccountID string `url:"-"` + Domain string `url:"domain"` +} + +// WHOISResponse represents an API response for a whois request. +type WHOISResponse struct { + Response + Result WHOIS `json:"result,omitempty"` +} + +// IntelligenceWHOIS gets whois information for a domain. +// +// API Reference: https://api.cloudflare.com/#whois-record-get-whois-record +func (api *API) IntelligenceWHOIS(ctx context.Context, params WHOISParameters) (WHOIS, error) { + if params.AccountID == "" { + return WHOIS{}, ErrMissingAccountID + } + + if params.Domain == "" { + return WHOIS{}, ErrMissingDomain + } + + uri := buildURI(fmt.Sprintf("/accounts/%s/intel/whois", params.AccountID), params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return WHOIS{}, err + } + + var whoisResponse WHOISResponse + if err := json.Unmarshal(res, &whoisResponse); err != nil { + return WHOIS{}, err + } + + return whoisResponse.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/ip_access_rules.go b/vendor/github.com/cloudflare/cloudflare-go/ip_access_rules.go new file mode 100644 index 0000000..58d29cd --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/ip_access_rules.go @@ -0,0 +1,89 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +type ListIPAccessRulesOrderOption string +type ListIPAccessRulesMatchOption string +type IPAccessRulesModeOption string + +const ( + IPAccessRulesConfigurationTarget ListIPAccessRulesOrderOption = "configuration.target" + IPAccessRulesConfigurationValue ListIPAccessRulesOrderOption = "configuration.value" + IPAccessRulesMatchOptionAll ListIPAccessRulesMatchOption = "all" + IPAccessRulesMatchOptionAny ListIPAccessRulesMatchOption = "any" + IPAccessRulesModeBlock IPAccessRulesModeOption = "block" + IPAccessRulesModeChallenge IPAccessRulesModeOption = "challenge" + IPAccessRulesModeJsChallenge IPAccessRulesModeOption = "js_challenge" + IPAccessRulesModeManagedChallenge IPAccessRulesModeOption = "managed_challenge" + IPAccessRulesModeWhitelist IPAccessRulesModeOption = "whitelist" +) + +type ListIPAccessRulesFilters struct { + Configuration IPAccessRuleConfiguration `json:"configuration,omitempty"` + Match ListIPAccessRulesMatchOption `json:"match,omitempty"` + Mode IPAccessRulesModeOption `json:"mode,omitempty"` + Notes string `json:"notes,omitempty"` +} + +type ListIPAccessRulesParams struct { + Direction string `url:"direction,omitempty"` + Filters ListIPAccessRulesFilters `url:"filters,omitempty"` + Order ListIPAccessRulesOrderOption `url:"order,omitempty"` + PaginationOptions +} + +type IPAccessRuleConfiguration struct { + Target string `json:"target,omitempty"` + Value string `json:"value,omitempty"` +} + +type IPAccessRule struct { + AllowedModes []IPAccessRulesModeOption `json:"allowed_modes"` + Configuration IPAccessRuleConfiguration `json:"configuration"` + CreatedOn string `json:"created_on"` + ID string `json:"id"` + Mode IPAccessRulesModeOption `json:"mode"` + ModifiedOn string `json:"modified_on"` + Notes string `json:"notes"` +} + +type ListIPAccessRulesResponse struct { + Result []IPAccessRule `json:"result"` + ResultInfo `json:"result_info"` + Response +} + +// ListIPAccessRules fetches IP Access rules of a zone/user/account. You can +// filter the results using several optional parameters. +// +// API references: +// - https://developers.cloudflare.com/api/operations/ip-access-rules-for-a-user-list-ip-access-rules +// - https://developers.cloudflare.com/api/operations/ip-access-rules-for-a-zone-list-ip-access-rules +// - https://developers.cloudflare.com/api/operations/ip-access-rules-for-an-account-list-ip-access-rules +func (api *API) ListIPAccessRules(ctx context.Context, rc *ResourceContainer, params ListIPAccessRulesParams) ([]IPAccessRule, *ResultInfo, error) { + if rc.Identifier == "" { + return []IPAccessRule{}, &ResultInfo{}, ErrMissingResourceIdentifier + } + + uri := buildURI(fmt.Sprintf("/%s/%s/firewall/access_rules/rules", rc.Level, rc.Identifier), params) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []IPAccessRule{}, &ResultInfo{}, err + } + + result := ListIPAccessRulesResponse{} + + err = json.Unmarshal(res, &result) + if err != nil { + return []IPAccessRule{}, &ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, &result.ResultInfo, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/ip_list.go b/vendor/github.com/cloudflare/cloudflare-go/ip_list.go new file mode 100644 index 0000000..a9c5e71 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/ip_list.go @@ -0,0 +1,507 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// The definitions in this file are deprecated and should be removed after +// enough time is given for users to migrate. Use the more general `List` +// methods instead. + +const ( + // IPListTypeIP specifies a list containing IP addresses. + IPListTypeIP = "ip" +) + +// IPListBulkOperation contains information about a Bulk Operation. +type IPListBulkOperation struct { + ID string `json:"id"` + Status string `json:"status"` + Error string `json:"error"` + Completed *time.Time `json:"completed"` +} + +// IPList contains information about an IP List. +type IPList struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Kind string `json:"kind"` + NumItems int `json:"num_items"` + NumReferencingFilters int `json:"num_referencing_filters"` + CreatedOn *time.Time `json:"created_on"` + ModifiedOn *time.Time `json:"modified_on"` +} + +// IPListItem contains information about a single IP List Item. +type IPListItem struct { + ID string `json:"id"` + IP string `json:"ip"` + Comment string `json:"comment"` + CreatedOn *time.Time `json:"created_on"` + ModifiedOn *time.Time `json:"modified_on"` +} + +// IPListCreateRequest contains data for a new IP List. +type IPListCreateRequest struct { + Name string `json:"name"` + Description string `json:"description"` + Kind string `json:"kind"` +} + +// IPListItemCreateRequest contains data for a new IP List Item. +type IPListItemCreateRequest struct { + IP string `json:"ip"` + Comment string `json:"comment"` +} + +// IPListItemDeleteRequest wraps IP List Items that shall be deleted. +type IPListItemDeleteRequest struct { + Items []IPListItemDeleteItemRequest `json:"items"` +} + +// IPListItemDeleteItemRequest contains single IP List Items that shall be deleted. +type IPListItemDeleteItemRequest struct { + ID string `json:"id"` +} + +// IPListUpdateRequest contains data for an IP List update. +type IPListUpdateRequest struct { + Description string `json:"description"` +} + +// IPListResponse contains a single IP List. +type IPListResponse struct { + Response + Result IPList `json:"result"` +} + +// IPListItemCreateResponse contains information about the creation of an IP List Item. +type IPListItemCreateResponse struct { + Response + Result struct { + OperationID string `json:"operation_id"` + } `json:"result"` +} + +// IPListListResponse contains a slice of IP Lists. +type IPListListResponse struct { + Response + Result []IPList `json:"result"` +} + +// IPListBulkOperationResponse contains information about a Bulk Operation. +type IPListBulkOperationResponse struct { + Response + Result IPListBulkOperation `json:"result"` +} + +// IPListDeleteResponse contains information about the deletion of an IP List. +type IPListDeleteResponse struct { + Response + Result struct { + ID string `json:"id"` + } `json:"result"` +} + +// IPListItemsListResponse contains information about IP List Items. +type IPListItemsListResponse struct { + Response + ResultInfo `json:"result_info"` + Result []IPListItem `json:"result"` +} + +// IPListItemDeleteResponse contains information about the deletion of an IP List Item. +type IPListItemDeleteResponse struct { + Response + Result struct { + OperationID string `json:"operation_id"` + } `json:"result"` +} + +// IPListItemsGetResponse contains information about a single IP List Item. +type IPListItemsGetResponse struct { + Response + Result IPListItem `json:"result"` +} + +// ListIPLists lists all IP Lists. +// +// API reference: https://api.cloudflare.com/#rules-lists-list-lists +// +// Deprecated: Use `ListLists` instead. +func (api *API) ListIPLists(ctx context.Context, accountID string) ([]IPList, error) { + uri := fmt.Sprintf("/accounts/%s/rules/lists", accountID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []IPList{}, err + } + + result := IPListListResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return []IPList{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// CreateIPList creates a new IP List. +// +// API reference: https://api.cloudflare.com/#rules-lists-create-list +// +// Deprecated: Use `CreateList` instead. +func (api *API) CreateIPList(ctx context.Context, accountID, name, description, kind string) (IPList, + error) { + uri := fmt.Sprintf("/accounts/%s/rules/lists", accountID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, + IPListCreateRequest{Name: name, Description: description, Kind: kind}) + if err != nil { + return IPList{}, err + } + + result := IPListResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return IPList{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// GetIPList returns a single IP List +// +// API reference: https://api.cloudflare.com/#rules-lists-get-list +// +// Deprecated: Use `GetList` instead. +func (api *API) GetIPList(ctx context.Context, accountID, ID string) (IPList, error) { + uri := fmt.Sprintf("/accounts/%s/rules/lists/%s", accountID, ID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return IPList{}, err + } + + result := IPListResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return IPList{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// UpdateIPList updates the description of an existing IP List. +// +// API reference: https://api.cloudflare.com/#rules-lists-update-list +// +// Deprecated: Use `UpdateList` instead. +func (api *API) UpdateIPList(ctx context.Context, accountID, ID, description string) (IPList, error) { + uri := fmt.Sprintf("/accounts/%s/rules/lists/%s", accountID, ID) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, IPListUpdateRequest{Description: description}) + if err != nil { + return IPList{}, err + } + + result := IPListResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return IPList{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// DeleteIPList deletes an IP List. +// +// API reference: https://api.cloudflare.com/#rules-lists-delete-list +// +// Deprecated: Use `DeleteList` instead. +func (api *API) DeleteIPList(ctx context.Context, accountID, ID string) (IPListDeleteResponse, error) { + uri := fmt.Sprintf("/accounts/%s/rules/lists/%s", accountID, ID) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return IPListDeleteResponse{}, err + } + + result := IPListDeleteResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return IPListDeleteResponse{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result, nil +} + +// ListIPListItems returns a list with all items in an IP List. +// +// API reference: https://api.cloudflare.com/#rules-lists-list-list-items +// +// Deprecated: Use `ListListItems` instead. +func (api *API) ListIPListItems(ctx context.Context, accountID, ID string) ([]IPListItem, error) { + var list []IPListItem + var cursor string + var cursorQuery string + + for { + if len(cursor) > 0 { + cursorQuery = fmt.Sprintf("?cursor=%s", cursor) + } + uri := fmt.Sprintf("/accounts/%s/rules/lists/%s/items%s", accountID, ID, cursorQuery) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []IPListItem{}, err + } + + result := IPListItemsListResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return []IPListItem{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + list = append(list, result.Result...) + if cursor = result.ResultInfo.Cursors.After; cursor == "" { + break + } + } + + return list, nil +} + +// CreateIPListItemAsync creates a new IP List Item asynchronously. Users have +// to poll the operation status by using the operation_id returned by this +// function. +// +// API reference: https://api.cloudflare.com/#rules-lists-create-list-items +// +// Deprecated: Use `CreateListItemAsync` instead. +func (api *API) CreateIPListItemAsync(ctx context.Context, accountID, ID, ip, comment string) (IPListItemCreateResponse, error) { + uri := fmt.Sprintf("/accounts/%s/rules/lists/%s/items", accountID, ID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, []IPListItemCreateRequest{{IP: ip, Comment: comment}}) + if err != nil { + return IPListItemCreateResponse{}, err + } + + result := IPListItemCreateResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return IPListItemCreateResponse{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result, nil +} + +// CreateIPListItem creates a new IP List Item synchronously and returns the +// current set of IP List Items. +// +// Deprecated: Use `CreateListItem` instead. +func (api *API) CreateIPListItem(ctx context.Context, accountID, ID, ip, comment string) ([]IPListItem, error) { + result, err := api.CreateIPListItemAsync(ctx, accountID, ID, ip, comment) + + if err != nil { + return []IPListItem{}, err + } + + err = api.pollIPListBulkOperation(ctx, accountID, result.Result.OperationID) + if err != nil { + return []IPListItem{}, err + } + + return api.ListIPListItems(ctx, accountID, ID) +} + +// CreateIPListItemsAsync bulk creates many IP List Items asynchronously. Users +// have to poll the operation status by using the operation_id returned by this +// function. +// +// API reference: https://api.cloudflare.com/#rules-lists-create-list-items +// +// Deprecated: Use `CreateListItemsAsync` instead. +func (api *API) CreateIPListItemsAsync(ctx context.Context, accountID, ID string, items []IPListItemCreateRequest) ( + IPListItemCreateResponse, error) { + uri := fmt.Sprintf("/accounts/%s/rules/lists/%s/items", accountID, ID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, items) + if err != nil { + return IPListItemCreateResponse{}, err + } + + result := IPListItemCreateResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return IPListItemCreateResponse{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result, nil +} + +// CreateIPListItems bulk creates many IP List Items synchronously and returns +// the current set of IP List Items. +// +// Deprecated: Use `CreateListItems` instead. +func (api *API) CreateIPListItems(ctx context.Context, accountID, ID string, items []IPListItemCreateRequest) ( + []IPListItem, error) { + result, err := api.CreateIPListItemsAsync(ctx, accountID, ID, items) + if err != nil { + return []IPListItem{}, err + } + + err = api.pollIPListBulkOperation(ctx, accountID, result.Result.OperationID) + if err != nil { + return []IPListItem{}, err + } + + return api.ListIPListItems(ctx, accountID, ID) +} + +// ReplaceIPListItemsAsync replaces all IP List Items asynchronously. Users have +// to poll the operation status by using the operation_id returned by this +// function. +// +// API reference: https://api.cloudflare.com/#rules-lists-replace-list-items +// +// Deprecated: Use `ReplaceListItemsAsync` instead. +func (api *API) ReplaceIPListItemsAsync(ctx context.Context, accountID, ID string, items []IPListItemCreateRequest) ( + IPListItemCreateResponse, error) { + uri := fmt.Sprintf("/accounts/%s/rules/lists/%s/items", accountID, ID) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, items) + if err != nil { + return IPListItemCreateResponse{}, err + } + + result := IPListItemCreateResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return IPListItemCreateResponse{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result, nil +} + +// ReplaceIPListItems replaces all IP List Items synchronously and returns the +// current set of IP List Items. +// +// Deprecated: Use `ReplaceListItems` instead. +func (api *API) ReplaceIPListItems(ctx context.Context, accountID, ID string, items []IPListItemCreateRequest) ( + []IPListItem, error) { + result, err := api.ReplaceIPListItemsAsync(ctx, accountID, ID, items) + if err != nil { + return []IPListItem{}, err + } + + err = api.pollIPListBulkOperation(ctx, accountID, result.Result.OperationID) + if err != nil { + return []IPListItem{}, err + } + + return api.ListIPListItems(ctx, accountID, ID) +} + +// DeleteIPListItemsAsync removes specific Items of an IP List by their ID +// asynchronously. Users have to poll the operation status by using the +// operation_id returned by this function. +// +// API reference: https://api.cloudflare.com/#rules-lists-delete-list-items +// +// Deprecated: Use `DeleteListItemsAsync` instead. +func (api *API) DeleteIPListItemsAsync(ctx context.Context, accountID, ID string, items IPListItemDeleteRequest) ( + IPListItemDeleteResponse, error) { + uri := fmt.Sprintf("/accounts/%s/rules/lists/%s/items", accountID, ID) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, items) + if err != nil { + return IPListItemDeleteResponse{}, err + } + + result := IPListItemDeleteResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return IPListItemDeleteResponse{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result, nil +} + +// DeleteIPListItems removes specific Items of an IP List by their ID +// synchronously and returns the current set of IP List Items. +// +// Deprecated: Use `DeleteListItems` instead. +func (api *API) DeleteIPListItems(ctx context.Context, accountID, ID string, items IPListItemDeleteRequest) ( + []IPListItem, error) { + result, err := api.DeleteIPListItemsAsync(ctx, accountID, ID, items) + if err != nil { + return []IPListItem{}, err + } + + err = api.pollIPListBulkOperation(ctx, accountID, result.Result.OperationID) + if err != nil { + return []IPListItem{}, err + } + + return api.ListIPListItems(ctx, accountID, ID) +} + +// GetIPListItem returns a single IP List Item. +// +// API reference: https://api.cloudflare.com/#rules-lists-get-list-item +// +// Deprecated: Use `GetListItem` instead. +func (api *API) GetIPListItem(ctx context.Context, accountID, listID, id string) (IPListItem, error) { + uri := fmt.Sprintf("/accounts/%s/rules/lists/%s/items/%s", accountID, listID, id) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return IPListItem{}, err + } + + result := IPListItemsGetResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return IPListItem{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// GetIPListBulkOperation returns the status of a bulk operation. +// +// API reference: https://api.cloudflare.com/#rules-lists-get-bulk-operation +// +// Deprecated: Use `GetListBulkOperation` instead. +func (api *API) GetIPListBulkOperation(ctx context.Context, accountID, ID string) (IPListBulkOperation, error) { + uri := fmt.Sprintf("/accounts/%s/rules/lists/bulk_operations/%s", accountID, ID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return IPListBulkOperation{}, err + } + + result := IPListBulkOperationResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return IPListBulkOperation{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// pollIPListBulkOperation implements synchronous behaviour for some +// asynchronous endpoints. bulk-operation status can be either pending, running, +// failed or completed. +func (api *API) pollIPListBulkOperation(ctx context.Context, accountID, ID string) error { + for i := uint8(0); i < 16; i++ { + sleepDuration := 1 << (i / 2) * time.Second + select { + case <-time.After(sleepDuration): + case <-ctx.Done(): + return fmt.Errorf("operation aborted during backoff: %w", ctx.Err()) + } + + bulkResult, err := api.GetIPListBulkOperation(ctx, accountID, ID) + if err != nil { + return err + } + + switch bulkResult.Status { + case "failed": + return errors.New(bulkResult.Error) + case "pending", "running": + continue + case "completed": + return nil + default: + return fmt.Errorf("%s: %s", errOperationUnexpectedStatus, bulkResult.Status) + } + } + + return errors.New(errOperationStillRunning) +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/ips.go b/vendor/github.com/cloudflare/cloudflare-go/ips.go new file mode 100644 index 0000000..3f181a7 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/ips.go @@ -0,0 +1,72 @@ +package cloudflare + +import ( + "errors" + "fmt" + "io" + "net/http" + "strings" + + "github.com/goccy/go-json" +) + +// IPRangesResponse contains the structure for the API response, not modified. +type IPRangesResponse struct { + IPv4CIDRs []string `json:"ipv4_cidrs"` + IPv6CIDRs []string `json:"ipv6_cidrs"` + ChinaColos []string `json:"china_colos"` +} + +// IPRanges contains lists of IPv4 and IPv6 CIDRs. +type IPRanges struct { + IPv4CIDRs []string `json:"ipv4_cidrs"` + IPv6CIDRs []string `json:"ipv6_cidrs"` + ChinaIPv4CIDRs []string `json:"china_ipv4_cidrs"` + ChinaIPv6CIDRs []string `json:"china_ipv6_cidrs"` +} + +// IPsResponse is the API response containing a list of IPs. +type IPsResponse struct { + Response + Result IPRangesResponse `json:"result"` +} + +// IPs gets a list of Cloudflare's IP ranges. +// +// This does not require logging in to the API. +// +// API reference: https://api.cloudflare.com/#cloudflare-ips +func IPs() (IPRanges, error) { + uri := fmt.Sprintf("%s/ips?china_colo=1", apiURL) + resp, err := http.Get(uri) //nolint:gosec + if err != nil { + return IPRanges{}, fmt.Errorf("HTTP request failed: %w", err) + } + if resp.StatusCode != http.StatusOK { + return IPRanges{}, errors.New("HTTP request failed: status is not HTTP 200") + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return IPRanges{}, fmt.Errorf("Response body could not be read: %w", err) + } + var r IPsResponse + err = json.Unmarshal(body, &r) + if err != nil { + return IPRanges{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + var ips IPRanges + ips.IPv4CIDRs = r.Result.IPv4CIDRs + ips.IPv6CIDRs = r.Result.IPv6CIDRs + + for _, ip := range r.Result.ChinaColos { + if strings.Contains(ip, ":") { + ips.ChinaIPv6CIDRs = append(ips.ChinaIPv6CIDRs, ip) + } else { + ips.ChinaIPv4CIDRs = append(ips.ChinaIPv4CIDRs, ip) + } + } + + return ips, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/keyless.go b/vendor/github.com/cloudflare/cloudflare-go/keyless.go new file mode 100644 index 0000000..39896af --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/keyless.go @@ -0,0 +1,146 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// KeylessSSL represents Keyless SSL configuration. +type KeylessSSL struct { + ID string `json:"id"` + Name string `json:"name"` + Host string `json:"host"` + Port int `json:"port"` + Status string `json:"status"` + Enabled bool `json:"enabled"` + Permissions []string `json:"permissions"` + CreatedOn time.Time `json:"created_on"` + ModifiedOn time.Time `json:"modified_on"` +} + +// KeylessSSLCreateRequest represents the request format made for creating KeylessSSL. +type KeylessSSLCreateRequest struct { + Host string `json:"host"` + Port int `json:"port"` + Certificate string `json:"certificate"` + Name string `json:"name,omitempty"` + BundleMethod string `json:"bundle_method,omitempty"` +} + +// KeylessSSLDetailResponse is the API response, containing a single Keyless SSL. +type KeylessSSLDetailResponse struct { + Response + Result KeylessSSL `json:"result"` +} + +// KeylessSSLListResponse represents the response from the Keyless SSL list endpoint. +type KeylessSSLListResponse struct { + Response + Result []KeylessSSL `json:"result"` +} + +// KeylessSSLUpdateRequest represents the request for updating KeylessSSL. +type KeylessSSLUpdateRequest struct { + Host string `json:"host,omitempty"` + Name string `json:"name,omitempty"` + Port int `json:"port,omitempty"` + Enabled *bool `json:"enabled,omitempty"` +} + +// CreateKeylessSSL creates a new Keyless SSL configuration for the zone. +// +// API reference: https://api.cloudflare.com/#keyless-ssl-for-a-zone-create-keyless-ssl-configuration +func (api *API) CreateKeylessSSL(ctx context.Context, zoneID string, keylessSSL KeylessSSLCreateRequest) (KeylessSSL, error) { + uri := fmt.Sprintf("/zones/%s/keyless_certificates", zoneID) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, keylessSSL) + if err != nil { + return KeylessSSL{}, err + } + + var keylessSSLDetailResponse KeylessSSLDetailResponse + err = json.Unmarshal(res, &keylessSSLDetailResponse) + if err != nil { + return KeylessSSL{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return keylessSSLDetailResponse.Result, nil +} + +// ListKeylessSSL lists Keyless SSL configurations for a zone. +// +// API reference: https://api.cloudflare.com/#keyless-ssl-for-a-zone-list-keyless-ssl-configurations +func (api *API) ListKeylessSSL(ctx context.Context, zoneID string) ([]KeylessSSL, error) { + uri := fmt.Sprintf("/zones/%s/keyless_certificates", zoneID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + + var keylessSSLListResponse KeylessSSLListResponse + err = json.Unmarshal(res, &keylessSSLListResponse) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return keylessSSLListResponse.Result, nil +} + +// KeylessSSL provides the configuration for a given Keyless SSL identifier. +// +// API reference: https://api.cloudflare.com/#keyless-ssl-for-a-zone-keyless-ssl-details +func (api *API) KeylessSSL(ctx context.Context, zoneID, keylessSSLID string) (KeylessSSL, error) { + uri := fmt.Sprintf("/zones/%s/keyless_certificates/%s", zoneID, keylessSSLID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return KeylessSSL{}, err + } + + var keylessResponse KeylessSSLDetailResponse + err = json.Unmarshal(res, &keylessResponse) + if err != nil { + return KeylessSSL{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return keylessResponse.Result, nil +} + +// UpdateKeylessSSL updates an existing Keyless SSL configuration. +// +// API reference: https://api.cloudflare.com/#keyless-ssl-for-a-zone-edit-keyless-ssl-configuration +func (api *API) UpdateKeylessSSL(ctx context.Context, zoneID, kelessSSLID string, keylessSSL KeylessSSLUpdateRequest) (KeylessSSL, error) { + uri := fmt.Sprintf("/zones/%s/keyless_certificates/%s", zoneID, kelessSSLID) + + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, keylessSSL) + if err != nil { + return KeylessSSL{}, err + } + + var keylessSSLDetailResponse KeylessSSLDetailResponse + err = json.Unmarshal(res, &keylessSSLDetailResponse) + if err != nil { + return KeylessSSL{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return keylessSSLDetailResponse.Result, nil +} + +// DeleteKeylessSSL deletes an existing Keyless SSL configuration. +// +// API reference: https://api.cloudflare.com/#keyless-ssl-for-a-zone-delete-keyless-ssl-configuration +func (api *API) DeleteKeylessSSL(ctx context.Context, zoneID, keylessSSLID string) error { + uri := fmt.Sprintf("/zones/%s/keyless_certificates/%s", zoneID, keylessSSLID) + + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/list.go b/vendor/github.com/cloudflare/cloudflare-go/list.go new file mode 100644 index 0000000..f7cef4a --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/list.go @@ -0,0 +1,629 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +const ( + // ListTypeIP specifies a list containing IP addresses. + ListTypeIP = "ip" + // ListTypeRedirect specifies a list containing redirects. + ListTypeRedirect = "redirect" + // ListTypeHostname specifies a list containing hostnames. + ListTypeHostname = "hostname" + // ListTypeHostname specifies a list containing autonomous system numbers (ASNs). + ListTypeASN = "asn" +) + +// ListBulkOperation contains information about a Bulk Operation. +type ListBulkOperation struct { + ID string `json:"id"` + Status string `json:"status"` + Error string `json:"error"` + Completed *time.Time `json:"completed"` +} + +// List contains information about a List. +type List struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Kind string `json:"kind"` + NumItems int `json:"num_items"` + NumReferencingFilters int `json:"num_referencing_filters"` + CreatedOn *time.Time `json:"created_on"` + ModifiedOn *time.Time `json:"modified_on"` +} + +// Redirect represents a redirect item in a List. +type Redirect struct { + SourceUrl string `json:"source_url"` + IncludeSubdomains *bool `json:"include_subdomains,omitempty"` + TargetUrl string `json:"target_url"` + StatusCode *int `json:"status_code,omitempty"` + PreserveQueryString *bool `json:"preserve_query_string,omitempty"` + SubpathMatching *bool `json:"subpath_matching,omitempty"` + PreservePathSuffix *bool `json:"preserve_path_suffix,omitempty"` +} + +type Hostname struct { + UrlHostname string `json:"url_hostname"` +} + +// ListItem contains information about a single List Item. +type ListItem struct { + ID string `json:"id"` + IP *string `json:"ip,omitempty"` + Redirect *Redirect `json:"redirect,omitempty"` + Hostname *Hostname `json:"hostname,omitempty"` + ASN *uint32 `json:"asn,omitempty"` + Comment string `json:"comment"` + CreatedOn *time.Time `json:"created_on"` + ModifiedOn *time.Time `json:"modified_on"` +} + +// ListCreateRequest contains data for a new List. +type ListCreateRequest struct { + Name string `json:"name"` + Description string `json:"description"` + Kind string `json:"kind"` +} + +// ListItemCreateRequest contains data for a new List Item. +type ListItemCreateRequest struct { + IP *string `json:"ip,omitempty"` + Redirect *Redirect `json:"redirect,omitempty"` + Hostname *Hostname `json:"hostname,omitempty"` + ASN *uint32 `json:"asn,omitempty"` + Comment string `json:"comment"` +} + +// ListItemDeleteRequest wraps List Items that shall be deleted. +type ListItemDeleteRequest struct { + Items []ListItemDeleteItemRequest `json:"items"` +} + +// ListItemDeleteItemRequest contains single List Items that shall be deleted. +type ListItemDeleteItemRequest struct { + ID string `json:"id"` +} + +// ListUpdateRequest contains data for a List update. +type ListUpdateRequest struct { + Description string `json:"description"` +} + +// ListResponse contains a single List. +type ListResponse struct { + Response + Result List `json:"result"` +} + +// ListItemCreateResponse contains information about the creation of a List Item. +type ListItemCreateResponse struct { + Response + Result struct { + OperationID string `json:"operation_id"` + } `json:"result"` +} + +// ListListResponse contains a slice of Lists. +type ListListResponse struct { + Response + Result []List `json:"result"` +} + +// ListBulkOperationResponse contains information about a Bulk Operation. +type ListBulkOperationResponse struct { + Response + Result ListBulkOperation `json:"result"` +} + +// ListDeleteResponse contains information about the deletion of a List. +type ListDeleteResponse struct { + Response + Result struct { + ID string `json:"id"` + } `json:"result"` +} + +// ListItemsListResponse contains information about List Items. +type ListItemsListResponse struct { + Response + ResultInfo `json:"result_info"` + Result []ListItem `json:"result"` +} + +// ListItemDeleteResponse contains information about the deletion of a List Item. +type ListItemDeleteResponse struct { + Response + Result struct { + OperationID string `json:"operation_id"` + } `json:"result"` +} + +// ListItemsGetResponse contains information about a single List Item. +type ListItemsGetResponse struct { + Response + Result ListItem `json:"result"` +} + +type ListListsParams struct { +} + +type ListCreateParams struct { + Name string + Description string + Kind string +} + +type ListGetParams struct { + ID string +} + +type ListUpdateParams struct { + ID string + Description string +} + +type ListDeleteParams struct { + ID string +} + +type ListListItemsParams struct { + ID string `url:"-"` + Search string `url:"search,omitempty"` + PerPage int `url:"per_page,omitempty"` + Cursor string `url:"cursor,omitempty"` +} + +type ListCreateItemsParams struct { + ID string + Items []ListItemCreateRequest +} + +type ListCreateItemParams struct { + ID string + Item ListItemCreateRequest +} + +type ListReplaceItemsParams struct { + ID string + Items []ListItemCreateRequest +} + +type ListDeleteItemsParams struct { + ID string + Items ListItemDeleteRequest +} + +type ListGetItemParams struct { + ListID string + ID string +} + +type ListGetBulkOperationParams struct { + ID string +} + +// ListLists lists all Lists. +// +// API reference: https://api.cloudflare.com/#rules-lists-list-lists +func (api *API) ListLists(ctx context.Context, rc *ResourceContainer, params ListListsParams) ([]List, error) { + if rc.Identifier == "" { + return []List{}, ErrMissingAccountID + } + + uri := fmt.Sprintf("/accounts/%s/rules/lists", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []List{}, err + } + + result := ListListResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return []List{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// CreateList creates a new List. +// +// API reference: https://api.cloudflare.com/#rules-lists-create-list +func (api *API) CreateList(ctx context.Context, rc *ResourceContainer, params ListCreateParams) (List, error) { + if rc.Identifier == "" { + return List{}, ErrMissingAccountID + } + + uri := fmt.Sprintf("/accounts/%s/rules/lists", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, ListCreateRequest{Name: params.Name, Description: params.Description, Kind: params.Kind}) + if err != nil { + return List{}, err + } + + result := ListResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return List{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// GetList returns a single List. +// +// API reference: https://api.cloudflare.com/#rules-lists-get-list +func (api *API) GetList(ctx context.Context, rc *ResourceContainer, listID string) (List, error) { + if rc.Identifier == "" { + return List{}, ErrMissingAccountID + } + + if listID == "" { + return List{}, ErrMissingListID + } + + uri := fmt.Sprintf("/accounts/%s/rules/lists/%s", rc.Identifier, listID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return List{}, err + } + + result := ListResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return List{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// UpdateList updates the description of an existing List. +// +// API reference: https://api.cloudflare.com/#rules-lists-update-list +func (api *API) UpdateList(ctx context.Context, rc *ResourceContainer, params ListUpdateParams) (List, error) { + if rc.Identifier == "" { + return List{}, ErrMissingAccountID + } + + if params.ID == "" { + return List{}, ErrMissingListID + } + + uri := fmt.Sprintf("/accounts/%s/rules/lists/%s", rc.Identifier, params.ID) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, ListUpdateRequest{Description: params.Description}) + if err != nil { + return List{}, err + } + + result := ListResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return List{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// DeleteList deletes a List. +// +// API reference: https://api.cloudflare.com/#rules-lists-delete-list +func (api *API) DeleteList(ctx context.Context, rc *ResourceContainer, listID string) (ListDeleteResponse, error) { + if rc.Identifier == "" { + return ListDeleteResponse{}, ErrMissingAccountID + } + + if listID == "" { + return ListDeleteResponse{}, ErrMissingListID + } + + uri := fmt.Sprintf("/accounts/%s/rules/lists/%s", rc.Identifier, listID) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return ListDeleteResponse{}, err + } + + result := ListDeleteResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return ListDeleteResponse{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result, nil +} + +// ListListItems returns a list with all items in a List. +// +// API reference: https://api.cloudflare.com/#rules-lists-list-list-items +func (api *API) ListListItems(ctx context.Context, rc *ResourceContainer, params ListListItemsParams) ([]ListItem, error) { + var list []ListItem + + for { + uri := buildURI(fmt.Sprintf("/accounts/%s/rules/lists/%s/items", rc.Identifier, params.ID), params) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []ListItem{}, err + } + + result := ListItemsListResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return []ListItem{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + list = append(list, result.Result...) + if cursor := result.ResultInfo.Cursors.After; cursor == "" { + break + } else { + params.Cursor = cursor + } + } + + return list, nil +} + +// CreateListItemAsync creates a new List Item asynchronously. Users have to poll the operation status by +// using the operation_id returned by this function. +// +// API reference: https://api.cloudflare.com/#rules-lists-create-list-items +func (api *API) CreateListItemAsync(ctx context.Context, rc *ResourceContainer, params ListCreateItemParams) (ListItemCreateResponse, error) { + if rc.Identifier == "" { + return ListItemCreateResponse{}, ErrMissingAccountID + } + + if params.ID == "" { + return ListItemCreateResponse{}, ErrMissingListID + } + + uri := fmt.Sprintf("/accounts/%s/rules/lists/%s/items", rc.Identifier, params.ID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, []ListItemCreateRequest{params.Item}) + if err != nil { + return ListItemCreateResponse{}, err + } + + result := ListItemCreateResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return ListItemCreateResponse{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result, nil +} + +// CreateListItem creates a new List Item synchronously and returns the current set of List Items. +func (api *API) CreateListItem(ctx context.Context, rc *ResourceContainer, params ListCreateItemParams) ([]ListItem, error) { + result, err := api.CreateListItemAsync(ctx, rc, params) + + if err != nil { + return []ListItem{}, err + } + + err = api.pollListBulkOperation(ctx, rc, result.Result.OperationID) + if err != nil { + return []ListItem{}, err + } + + return api.ListListItems(ctx, rc, ListListItemsParams{ID: params.ID}) +} + +// CreateListItemsAsync bulk creates multiple List Items asynchronously. Users +// have to poll the operation status by using the operation_id returned by this +// function. +// +// API reference: https://api.cloudflare.com/#rules-lists-create-list-items +func (api *API) CreateListItemsAsync(ctx context.Context, rc *ResourceContainer, params ListCreateItemsParams) (ListItemCreateResponse, error) { + if rc.Identifier == "" { + return ListItemCreateResponse{}, ErrMissingAccountID + } + + if params.ID == "" { + return ListItemCreateResponse{}, ErrMissingListID + } + + uri := fmt.Sprintf("/accounts/%s/rules/lists/%s/items", rc.Identifier, params.ID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params.Items) + if err != nil { + return ListItemCreateResponse{}, err + } + + result := ListItemCreateResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return ListItemCreateResponse{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result, nil +} + +// CreateListItems bulk creates multiple List Items synchronously and returns +// the current set of List Items. +func (api *API) CreateListItems(ctx context.Context, rc *ResourceContainer, params ListCreateItemsParams) ([]ListItem, error) { + result, err := api.CreateListItemsAsync(ctx, rc, params) + if err != nil { + return []ListItem{}, err + } + + err = api.pollListBulkOperation(ctx, rc, result.Result.OperationID) + if err != nil { + return []ListItem{}, err + } + + return api.ListListItems(ctx, rc, ListListItemsParams{ID: params.ID}) +} + +// ReplaceListItemsAsync replaces all List Items asynchronously. Users have to +// poll the operation status by using the operation_id returned by this +// function. +// +// API reference: https://api.cloudflare.com/#rules-lists-replace-list-items +func (api *API) ReplaceListItemsAsync(ctx context.Context, rc *ResourceContainer, params ListReplaceItemsParams) (ListItemCreateResponse, error) { + if rc.Identifier == "" { + return ListItemCreateResponse{}, ErrMissingAccountID + } + + if params.ID == "" { + return ListItemCreateResponse{}, ErrMissingListID + } + + uri := fmt.Sprintf("/accounts/%s/rules/lists/%s/items", rc.Identifier, params.ID) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params.Items) + if err != nil { + return ListItemCreateResponse{}, err + } + + result := ListItemCreateResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return ListItemCreateResponse{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result, nil +} + +// ReplaceListItems replaces all List Items synchronously and returns the +// current set of List Items. +func (api *API) ReplaceListItems(ctx context.Context, rc *ResourceContainer, params ListReplaceItemsParams) ( + []ListItem, error) { + result, err := api.ReplaceListItemsAsync(ctx, rc, params) + if err != nil { + return []ListItem{}, err + } + + err = api.pollListBulkOperation(ctx, rc, result.Result.OperationID) + if err != nil { + return []ListItem{}, err + } + + return api.ListListItems(ctx, rc, ListListItemsParams{ID: params.ID}) +} + +// DeleteListItemsAsync removes specific Items of a List by their ID +// asynchronously. Users have to poll the operation status by using the +// operation_id returned by this function. +// +// API reference: https://api.cloudflare.com/#rules-lists-delete-list-items +func (api *API) DeleteListItemsAsync(ctx context.Context, rc *ResourceContainer, params ListDeleteItemsParams) (ListItemDeleteResponse, error) { + if rc.Identifier == "" { + return ListItemDeleteResponse{}, ErrMissingAccountID + } + + if params.ID == "" { + return ListItemDeleteResponse{}, ErrMissingListID + } + + uri := fmt.Sprintf("/accounts/%s/rules/lists/%s/items", rc.Identifier, params.ID) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, params.Items) + if err != nil { + return ListItemDeleteResponse{}, err + } + + result := ListItemDeleteResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return ListItemDeleteResponse{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result, nil +} + +// DeleteListItems removes specific Items of a List by their ID synchronously +// and returns the current set of List Items. +func (api *API) DeleteListItems(ctx context.Context, rc *ResourceContainer, params ListDeleteItemsParams) ([]ListItem, error) { + result, err := api.DeleteListItemsAsync(ctx, rc, params) + if err != nil { + return []ListItem{}, err + } + + err = api.pollListBulkOperation(ctx, rc, result.Result.OperationID) + if err != nil { + return []ListItem{}, err + } + + return api.ListListItems(ctx, AccountIdentifier(rc.Identifier), ListListItemsParams{ID: params.ID}) +} + +// GetListItem returns a single List Item. +// +// API reference: https://api.cloudflare.com/#rules-lists-get-list-item +func (api *API) GetListItem(ctx context.Context, rc *ResourceContainer, listID, itemID string) (ListItem, error) { + if rc.Identifier == "" { + return ListItem{}, ErrMissingAccountID + } + + if listID == "" { + return ListItem{}, ErrMissingListID + } + + if itemID == "" { + return ListItem{}, ErrMissingResourceIdentifier + } + + uri := fmt.Sprintf("/accounts/%s/rules/lists/%s/items/%s", rc.Identifier, listID, itemID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return ListItem{}, err + } + + result := ListItemsGetResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return ListItem{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// GetListBulkOperation returns the status of a bulk operation. +// +// API reference: https://api.cloudflare.com/#rules-lists-get-bulk-operation +func (api *API) GetListBulkOperation(ctx context.Context, rc *ResourceContainer, ID string) (ListBulkOperation, error) { + if rc.Identifier == "" { + return ListBulkOperation{}, ErrMissingAccountID + } + + if ID == "" { + return ListBulkOperation{}, ErrMissingResourceIdentifier + } + + uri := fmt.Sprintf("/accounts/%s/rules/lists/bulk_operations/%s", rc.Identifier, ID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return ListBulkOperation{}, err + } + + result := ListBulkOperationResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return ListBulkOperation{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// pollListBulkOperation implements synchronous behaviour for some asynchronous +// endpoints. bulk-operation status can be either pending, running, failed or +// completed. +func (api *API) pollListBulkOperation(ctx context.Context, rc *ResourceContainer, ID string) error { + for i := uint8(0); i < 16; i++ { + sleepDuration := 1 << (i / 2) * time.Second + select { + case <-time.After(sleepDuration): + case <-ctx.Done(): + return fmt.Errorf("operation aborted during backoff: %w", ctx.Err()) + } + + bulkResult, err := api.GetListBulkOperation(ctx, rc, ID) + if err != nil { + return err + } + + switch bulkResult.Status { + case "failed": + return errors.New(bulkResult.Error) + case "pending", "running": + continue + case "completed": + return nil + default: + return fmt.Errorf("%s: %s", errOperationUnexpectedStatus, bulkResult.Status) + } + } + + return errors.New(errOperationStillRunning) +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/load_balancing.go b/vendor/github.com/cloudflare/cloudflare-go/load_balancing.go new file mode 100644 index 0000000..44d7fd9 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/load_balancing.go @@ -0,0 +1,821 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// LoadBalancerPool represents a load balancer pool's properties. +type LoadBalancerPool struct { + ID string `json:"id,omitempty"` + CreatedOn *time.Time `json:"created_on,omitempty"` + ModifiedOn *time.Time `json:"modified_on,omitempty"` + Description string `json:"description"` + Name string `json:"name"` + Enabled bool `json:"enabled"` + MinimumOrigins *int `json:"minimum_origins,omitempty"` + Monitor string `json:"monitor,omitempty"` + Origins []LoadBalancerOrigin `json:"origins"` + NotificationEmail string `json:"notification_email,omitempty"` + Latitude *float32 `json:"latitude,omitempty"` + Longitude *float32 `json:"longitude,omitempty"` + LoadShedding *LoadBalancerLoadShedding `json:"load_shedding,omitempty"` + OriginSteering *LoadBalancerOriginSteering `json:"origin_steering,omitempty"` + Healthy *bool `json:"healthy,omitempty"` + + // CheckRegions defines the geographic region(s) from where to run health-checks from - e.g. "WNAM", "WEU", "SAF", "SAM". + // Providing a null/empty value means "all regions", which may not be available to all plan types. + CheckRegions []string `json:"check_regions"` +} + +// LoadBalancerOrigin represents a Load Balancer origin's properties. +type LoadBalancerOrigin struct { + Name string `json:"name"` + Address string `json:"address"` + Enabled bool `json:"enabled"` + // Weight of this origin relative to other origins in the pool. + // Based on the configured weight the total traffic is distributed + // among origins within the pool. + // + // When LoadBalancerOriginSteering.Policy="least_outstanding_requests", this + // weight is used to scale the origin's outstanding requests. + // When LoadBalancerOriginSteering.Policy="least_connections", this + // weight is used to scale the origin's open connections. + Weight float64 `json:"weight"` + Header map[string][]string `json:"header"` + // The virtual network subnet ID the origin belongs in. + // Virtual network must also belong to the account. + VirtualNetworkID string `json:"virtual_network_id,omitempty"` +} + +// LoadBalancerOriginSteering controls origin selection for new sessions and traffic without session affinity. +type LoadBalancerOriginSteering struct { + // Policy determines the type of origin steering policy to use. + // It defaults to "random" (weighted) when empty or unspecified. + // + // "random": Select an origin randomly. + // + // "hash": Select an origin by computing a hash over the CF-Connecting-IP address. + // + // "least_outstanding_requests": Select an origin by taking into consideration origin weights, + // as well as each origin's number of outstanding requests. Origins with more pending requests + // are weighted proportionately less relative to others. + // + // "least_connections": Select an origin by taking into consideration origin weights, + // as well as each origin's number of open connections. Origins with more open connections + // are weighted proportionately less relative to others. Supported for HTTP/1 and HTTP/2 connections. + Policy string `json:"policy,omitempty"` +} + +// LoadBalancerMonitor represents a load balancer monitor's properties. +type LoadBalancerMonitor struct { + ID string `json:"id,omitempty"` + CreatedOn *time.Time `json:"created_on,omitempty"` + ModifiedOn *time.Time `json:"modified_on,omitempty"` + Type string `json:"type"` + Description string `json:"description"` + Method string `json:"method"` + Path string `json:"path"` + Header map[string][]string `json:"header"` + Timeout int `json:"timeout"` + Retries int `json:"retries"` + Interval int `json:"interval"` + ConsecutiveUp int `json:"consecutive_up"` + ConsecutiveDown int `json:"consecutive_down"` + Port uint16 `json:"port,omitempty"` + ExpectedBody string `json:"expected_body"` + ExpectedCodes string `json:"expected_codes"` + FollowRedirects bool `json:"follow_redirects"` + AllowInsecure bool `json:"allow_insecure"` + ProbeZone string `json:"probe_zone"` +} + +// LoadBalancer represents a load balancer's properties. +type LoadBalancer struct { + ID string `json:"id,omitempty"` + CreatedOn *time.Time `json:"created_on,omitempty"` + ModifiedOn *time.Time `json:"modified_on,omitempty"` + Description string `json:"description"` + Name string `json:"name"` + TTL int `json:"ttl,omitempty"` + FallbackPool string `json:"fallback_pool"` + DefaultPools []string `json:"default_pools"` + RegionPools map[string][]string `json:"region_pools"` + PopPools map[string][]string `json:"pop_pools"` + CountryPools map[string][]string `json:"country_pools"` + Proxied bool `json:"proxied"` + Enabled *bool `json:"enabled,omitempty"` + Persistence string `json:"session_affinity,omitempty"` + PersistenceTTL int `json:"session_affinity_ttl,omitempty"` + SessionAffinityAttributes *SessionAffinityAttributes `json:"session_affinity_attributes,omitempty"` + Rules []*LoadBalancerRule `json:"rules,omitempty"` + RandomSteering *RandomSteering `json:"random_steering,omitempty"` + AdaptiveRouting *AdaptiveRouting `json:"adaptive_routing,omitempty"` + LocationStrategy *LocationStrategy `json:"location_strategy,omitempty"` + + // SteeringPolicy controls pool selection logic. + // + // "off": Select pools in DefaultPools order. + // + // "geo": Select pools based on RegionPools/PopPools/CountryPools. + // For non-proxied requests, the country for CountryPools is determined by LocationStrategy. + // + // "dynamic_latency": Select pools based on RTT (requires health checks). + // + // "random": Selects pools in a random order. + // + // "proximity": Use the pools' latitude and longitude to select the closest pool using + // the Cloudflare PoP location for proxied requests or the location determined by + // LocationStrategy for non-proxied requests. + // + // "least_outstanding_requests": Select a pool by taking into consideration + // RandomSteering weights, as well as each pool's number of outstanding requests. + // Pools with more pending requests are weighted proportionately less relative to others. + // + // "least_connections": Select a pool by taking into consideration + // RandomSteering weights, as well as each pool's number of open connections. + // Pools with more open connections are weighted proportionately less relative to others. + // Supported for HTTP/1 and HTTP/2 connections. + // + // "": Maps to "geo" if RegionPools or PopPools or CountryPools have entries otherwise "off". + SteeringPolicy string `json:"steering_policy,omitempty"` +} + +// LoadBalancerLoadShedding contains the settings for controlling load shedding. +type LoadBalancerLoadShedding struct { + DefaultPercent float32 `json:"default_percent,omitempty"` + DefaultPolicy string `json:"default_policy,omitempty"` + SessionPercent float32 `json:"session_percent,omitempty"` + SessionPolicy string `json:"session_policy,omitempty"` +} + +// LoadBalancerRule represents a single rule entry for a Load Balancer. Each rules +// is run one after the other in priority order. Disabled rules are skipped. +type LoadBalancerRule struct { + Overrides LoadBalancerRuleOverrides `json:"overrides"` + + // Name is required but is only used for human readability + Name string `json:"name"` + + Condition string `json:"condition"` + + // Priority controls the order of rule execution the lowest value will be invoked first + Priority int `json:"priority"` + + // FixedResponse if set and the condition is true we will not run + // routing logic but rather directly respond with the provided fields. + // FixedResponse implies terminates. + FixedResponse *LoadBalancerFixedResponseData `json:"fixed_response,omitempty"` + + Disabled bool `json:"disabled"` + + // Terminates flag this rule as 'terminating'. No further rules will + // be executed after this one. + Terminates bool `json:"terminates,omitempty"` +} + +// LoadBalancerFixedResponseData contains all the data needed to generate +// a fixed response from a Load Balancer. This behavior can be enabled via Rules. +type LoadBalancerFixedResponseData struct { + // MessageBody data to write into the http body + MessageBody string `json:"message_body,omitempty"` + // StatusCode the http status code to response with + StatusCode int `json:"status_code,omitempty"` + // ContentType value of the http 'content-type' header + ContentType string `json:"content_type,omitempty"` + // Location value of the http 'location' header + Location string `json:"location,omitempty"` +} + +// LoadBalancerRuleOverrides are the set of field overridable by the rules system. +type LoadBalancerRuleOverrides struct { + // session affinity + Persistence string `json:"session_affinity,omitempty"` + PersistenceTTL *uint `json:"session_affinity_ttl,omitempty"` + + SessionAffinityAttrs *LoadBalancerRuleOverridesSessionAffinityAttrs `json:"session_affinity_attributes,omitempty"` + + TTL uint `json:"ttl,omitempty"` + + SteeringPolicy string `json:"steering_policy,omitempty"` + FallbackPool string `json:"fallback_pool,omitempty"` + + DefaultPools []string `json:"default_pools,omitempty"` + PoPPools map[string][]string `json:"pop_pools,omitempty"` + RegionPools map[string][]string `json:"region_pools,omitempty"` + CountryPools map[string][]string `json:"country_pools,omitempty"` + + RandomSteering *RandomSteering `json:"random_steering,omitempty"` + AdaptiveRouting *AdaptiveRouting `json:"adaptive_routing,omitempty"` + LocationStrategy *LocationStrategy `json:"location_strategy,omitempty"` +} + +// RandomSteering configures pool weights. +// +// SteeringPolicy="random": A random pool is selected with probability +// proportional to pool weights. +// +// SteeringPolicy="least_outstanding_requests": Use pool weights to +// scale each pool's outstanding requests. +// +// SteeringPolicy="least_connections": Use pool weights to +// scale each pool's open connections. +type RandomSteering struct { + DefaultWeight float64 `json:"default_weight,omitempty"` + PoolWeights map[string]float64 `json:"pool_weights,omitempty"` +} + +// AdaptiveRouting controls features that modify the routing of requests +// to pools and origins in response to dynamic conditions, such as during +// the interval between active health monitoring requests. +// For example, zero-downtime failover occurs immediately when an origin +// becomes unavailable due to HTTP 521, 522, or 523 response codes. +// If there is another healthy origin in the same pool, the request is +// retried once against this alternate origin. +type AdaptiveRouting struct { + // FailoverAcrossPools extends zero-downtime failover of requests to healthy origins + // from alternate pools, when no healthy alternate exists in the same pool, according + // to the failover order defined by traffic and origin steering. + // When set false (the default) zero-downtime failover will only occur between origins + // within the same pool. See SessionAffinityAttributes for control over when sessions + // are broken or reassigned. + FailoverAcrossPools *bool `json:"failover_across_pools,omitempty"` +} + +// LocationStrategy controls location-based steering for non-proxied requests. +// See SteeringPolicy to learn how steering is affected. +type LocationStrategy struct { + // PreferECS determines whether the EDNS Client Subnet (ECS) GeoIP should + // be preferred as the authoritative location. + // + // "always": Always prefer ECS. + // + // "never": Never prefer ECS. + // + // "proximity": (default) Prefer ECS only when SteeringPolicy="proximity". + // + // "geo": Prefer ECS only when SteeringPolicy="geo". + PreferECS string `json:"prefer_ecs,omitempty"` + // Mode determines the authoritative location when ECS is not preferred, + // does not exist in the request, or its GeoIP lookup is unsuccessful. + // + // "pop": (default) Use the Cloudflare PoP location. + // + // "resolver_ip": Use the DNS resolver GeoIP location. + // If the GeoIP lookup is unsuccessful, use the Cloudflare PoP location. + Mode string `json:"mode,omitempty"` +} + +// LoadBalancerRuleOverridesSessionAffinityAttrs mimics SessionAffinityAttributes without the +// DrainDuration field as that field can not be overwritten via rules. +type LoadBalancerRuleOverridesSessionAffinityAttrs struct { + SameSite string `json:"samesite,omitempty"` + Secure string `json:"secure,omitempty"` + ZeroDowntimeFailover string `json:"zero_downtime_failover,omitempty"` + Headers []string `json:"headers,omitempty"` + RequireAllHeaders *bool `json:"require_all_headers,omitempty"` +} + +// SessionAffinityAttributes represents additional configuration options for session affinity. +type SessionAffinityAttributes struct { + SameSite string `json:"samesite,omitempty"` + Secure string `json:"secure,omitempty"` + DrainDuration int `json:"drain_duration,omitempty"` + ZeroDowntimeFailover string `json:"zero_downtime_failover,omitempty"` + Headers []string `json:"headers,omitempty"` + RequireAllHeaders bool `json:"require_all_headers,omitempty"` +} + +// LoadBalancerOriginHealth represents the health of the origin. +type LoadBalancerOriginHealth struct { + Healthy bool `json:"healthy,omitempty"` + RTT Duration `json:"rtt,omitempty"` + FailureReason string `json:"failure_reason,omitempty"` + ResponseCode int `json:"response_code,omitempty"` +} + +// LoadBalancerPoolPopHealth represents the health of the pool for given PoP. +type LoadBalancerPoolPopHealth struct { + Healthy bool `json:"healthy,omitempty"` + Origins []map[string]LoadBalancerOriginHealth `json:"origins,omitempty"` +} + +// LoadBalancerPoolHealth represents the healthchecks from different PoPs for a pool. +type LoadBalancerPoolHealth struct { + ID string `json:"pool_id,omitempty"` + PopHealth map[string]LoadBalancerPoolPopHealth `json:"pop_health,omitempty"` +} + +// loadBalancerPoolResponse represents the response from the load balancer pool endpoints. +type loadBalancerPoolResponse struct { + Response + Result LoadBalancerPool `json:"result"` +} + +// loadBalancerPoolListResponse represents the response from the List Pools endpoint. +type loadBalancerPoolListResponse struct { + Response + Result []LoadBalancerPool `json:"result"` + ResultInfo ResultInfo `json:"result_info"` +} + +// loadBalancerMonitorResponse represents the response from the load balancer monitor endpoints. +type loadBalancerMonitorResponse struct { + Response + Result LoadBalancerMonitor `json:"result"` +} + +// loadBalancerMonitorListResponse represents the response from the List Monitors endpoint. +type loadBalancerMonitorListResponse struct { + Response + Result []LoadBalancerMonitor `json:"result"` + ResultInfo ResultInfo `json:"result_info"` +} + +// loadBalancerResponse represents the response from the load balancer endpoints. +type loadBalancerResponse struct { + Response + Result LoadBalancer `json:"result"` +} + +// loadBalancerListResponse represents the response from the List Load Balancers endpoint. +type loadBalancerListResponse struct { + Response + Result []LoadBalancer `json:"result"` + ResultInfo ResultInfo `json:"result_info"` +} + +// loadBalancerPoolHealthResponse represents the response from the Pool Health Details endpoint. +type loadBalancerPoolHealthResponse struct { + Response + Result LoadBalancerPoolHealth `json:"result"` +} + +type CreateLoadBalancerPoolParams struct { + LoadBalancerPool LoadBalancerPool +} + +type ListLoadBalancerPoolParams struct { + PaginationOptions +} + +type UpdateLoadBalancerPoolParams struct { + LoadBalancer LoadBalancerPool +} + +type CreateLoadBalancerMonitorParams struct { + LoadBalancerMonitor LoadBalancerMonitor +} + +type ListLoadBalancerMonitorParams struct { + PaginationOptions +} + +type UpdateLoadBalancerMonitorParams struct { + LoadBalancerMonitor LoadBalancerMonitor +} + +type CreateLoadBalancerParams struct { + LoadBalancer LoadBalancer +} + +type ListLoadBalancerParams struct { + PaginationOptions +} + +type UpdateLoadBalancerParams struct { + LoadBalancer LoadBalancer +} + +var ( + ErrMissingPoolID = errors.New("missing required pool ID") + ErrMissingMonitorID = errors.New("missing required monitor ID") + ErrMissingLoadBalancerID = errors.New("missing required load balancer ID") +) + +// CreateLoadBalancerPool creates a new load balancer pool. +// +// API reference: https://api.cloudflare.com/#load-balancer-pools-create-pool +func (api *API) CreateLoadBalancerPool(ctx context.Context, rc *ResourceContainer, params CreateLoadBalancerPoolParams) (LoadBalancerPool, error) { + if rc.Level == ZoneRouteLevel { + return LoadBalancerPool{}, fmt.Errorf(errInvalidResourceContainerAccess, ZoneRouteLevel) + } + + var uri string + if rc.Level == UserRouteLevel { + uri = "/user/load_balancers/pools" + } else { + uri = fmt.Sprintf("/accounts/%s/load_balancers/pools", rc.Identifier) + } + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params.LoadBalancerPool) + if err != nil { + return LoadBalancerPool{}, err + } + var r loadBalancerPoolResponse + if err := json.Unmarshal(res, &r); err != nil { + return LoadBalancerPool{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// ListLoadBalancerPools lists load balancer pools connected to an account. +// +// API reference: https://api.cloudflare.com/#load-balancer-pools-list-pools +func (api *API) ListLoadBalancerPools(ctx context.Context, rc *ResourceContainer, params ListLoadBalancerPoolParams) ([]LoadBalancerPool, error) { + if rc.Level == ZoneRouteLevel { + return []LoadBalancerPool{}, fmt.Errorf(errInvalidResourceContainerAccess, ZoneRouteLevel) + } + + var uri string + if rc.Level == UserRouteLevel { + uri = "/user/load_balancers/pools" + } else { + uri = fmt.Sprintf("/accounts/%s/load_balancers/pools", rc.Identifier) + } + + uri = buildURI(uri, params.PaginationOptions) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + var r loadBalancerPoolListResponse + if err := json.Unmarshal(res, &r); err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// GetLoadBalancerPool returns the details for a load balancer pool. +// +// API reference: https://api.cloudflare.com/#load-balancer-pools-pool-details +func (api *API) GetLoadBalancerPool(ctx context.Context, rc *ResourceContainer, poolID string) (LoadBalancerPool, error) { + if rc.Level == ZoneRouteLevel { + return LoadBalancerPool{}, fmt.Errorf(errInvalidResourceContainerAccess, ZoneRouteLevel) + } + + if poolID == "" { + return LoadBalancerPool{}, ErrMissingPoolID + } + + var uri string + if rc.Level == UserRouteLevel { + uri = fmt.Sprintf("/user/load_balancers/pools/%s", poolID) + } else { + uri = fmt.Sprintf("/accounts/%s/load_balancers/pools/%s", rc.Identifier, poolID) + } + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return LoadBalancerPool{}, err + } + var r loadBalancerPoolResponse + if err := json.Unmarshal(res, &r); err != nil { + return LoadBalancerPool{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// DeleteLoadBalancerPool disables and deletes a load balancer pool. +// +// API reference: https://api.cloudflare.com/#load-balancer-pools-delete-pool +func (api *API) DeleteLoadBalancerPool(ctx context.Context, rc *ResourceContainer, poolID string) error { + if rc.Level == ZoneRouteLevel { + return fmt.Errorf(errInvalidResourceContainerAccess, ZoneRouteLevel) + } + + if poolID == "" { + return ErrMissingPoolID + } + + var uri string + if rc.Level == UserRouteLevel { + uri = fmt.Sprintf("/user/load_balancers/pools/%s", poolID) + } else { + uri = fmt.Sprintf("/accounts/%s/load_balancers/pools/%s", rc.Identifier, poolID) + } + + if _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil); err != nil { + return err + } + + return nil +} + +// UpdateLoadBalancerPool modifies a configured load balancer pool. +// +// API reference: https://api.cloudflare.com/#load-balancer-pools-update-pool +func (api *API) UpdateLoadBalancerPool(ctx context.Context, rc *ResourceContainer, params UpdateLoadBalancerPoolParams) (LoadBalancerPool, error) { + if rc.Level == ZoneRouteLevel { + return LoadBalancerPool{}, fmt.Errorf(errInvalidResourceContainerAccess, ZoneRouteLevel) + } + + if params.LoadBalancer.ID == "" { + return LoadBalancerPool{}, ErrMissingPoolID + } + + var uri string + if rc.Level == UserRouteLevel { + uri = fmt.Sprintf("/user/load_balancers/pools/%s", params.LoadBalancer.ID) + } else { + uri = fmt.Sprintf("/accounts/%s/load_balancers/pools/%s", rc.Identifier, params.LoadBalancer.ID) + } + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params.LoadBalancer) + if err != nil { + return LoadBalancerPool{}, err + } + var r loadBalancerPoolResponse + if err := json.Unmarshal(res, &r); err != nil { + return LoadBalancerPool{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// CreateLoadBalancerMonitor creates a new load balancer monitor. +// +// API reference: https://api.cloudflare.com/#load-balancer-monitors-create-monitor +func (api *API) CreateLoadBalancerMonitor(ctx context.Context, rc *ResourceContainer, params CreateLoadBalancerMonitorParams) (LoadBalancerMonitor, error) { + if rc.Level == ZoneRouteLevel { + return LoadBalancerMonitor{}, fmt.Errorf(errInvalidResourceContainerAccess, ZoneRouteLevel) + } + + var uri string + if rc.Level == UserRouteLevel { + uri = "/user/load_balancers/monitors" + } else { + uri = fmt.Sprintf("/accounts/%s/load_balancers/monitors", rc.Identifier) + } + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params.LoadBalancerMonitor) + if err != nil { + return LoadBalancerMonitor{}, err + } + var r loadBalancerMonitorResponse + if err := json.Unmarshal(res, &r); err != nil { + return LoadBalancerMonitor{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// ListLoadBalancerMonitors lists load balancer monitors connected to an account. +// +// API reference: https://api.cloudflare.com/#load-balancer-monitors-list-monitors +func (api *API) ListLoadBalancerMonitors(ctx context.Context, rc *ResourceContainer, params ListLoadBalancerMonitorParams) ([]LoadBalancerMonitor, error) { + if rc.Level == ZoneRouteLevel { + return []LoadBalancerMonitor{}, fmt.Errorf(errInvalidResourceContainerAccess, ZoneRouteLevel) + } + + var uri string + if rc.Level == UserRouteLevel { + uri = "/user/load_balancers/monitors" + } else { + uri = fmt.Sprintf("/accounts/%s/load_balancers/monitors", rc.Identifier) + } + + uri = buildURI(uri, params.PaginationOptions) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + var r loadBalancerMonitorListResponse + if err := json.Unmarshal(res, &r); err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// GetLoadBalancerMonitor returns the details for a load balancer monitor. +// +// API reference: https://api.cloudflare.com/#load-balancer-monitors-monitor-details +func (api *API) GetLoadBalancerMonitor(ctx context.Context, rc *ResourceContainer, monitorID string) (LoadBalancerMonitor, error) { + if rc.Level == ZoneRouteLevel { + return LoadBalancerMonitor{}, fmt.Errorf(errInvalidResourceContainerAccess, ZoneRouteLevel) + } + + if monitorID == "" { + return LoadBalancerMonitor{}, ErrMissingMonitorID + } + + var uri string + if rc.Level == UserRouteLevel { + uri = fmt.Sprintf("/user/load_balancers/monitors/%s", monitorID) + } else { + uri = fmt.Sprintf("/accounts/%s/load_balancers/monitors/%s", rc.Identifier, monitorID) + } + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return LoadBalancerMonitor{}, err + } + var r loadBalancerMonitorResponse + if err := json.Unmarshal(res, &r); err != nil { + return LoadBalancerMonitor{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// DeleteLoadBalancerMonitor disables and deletes a load balancer monitor. +// +// API reference: https://api.cloudflare.com/#load-balancer-monitors-delete-monitor +func (api *API) DeleteLoadBalancerMonitor(ctx context.Context, rc *ResourceContainer, monitorID string) error { + if rc.Level == ZoneRouteLevel { + return fmt.Errorf(errInvalidResourceContainerAccess, ZoneRouteLevel) + } + + if monitorID == "" { + return ErrMissingMonitorID + } + + var uri string + if rc.Level == UserRouteLevel { + uri = fmt.Sprintf("/user/load_balancers/monitors/%s", monitorID) + } else { + uri = fmt.Sprintf("/accounts/%s/load_balancers/monitors/%s", rc.Identifier, monitorID) + } + + if _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil); err != nil { + return err + } + return nil +} + +// UpdateLoadBalancerMonitor modifies a configured load balancer monitor. +// +// API reference: https://api.cloudflare.com/#load-balancer-monitors-update-monitor +func (api *API) UpdateLoadBalancerMonitor(ctx context.Context, rc *ResourceContainer, params UpdateLoadBalancerMonitorParams) (LoadBalancerMonitor, error) { + if rc.Level == ZoneRouteLevel { + return LoadBalancerMonitor{}, fmt.Errorf(errInvalidResourceContainerAccess, ZoneRouteLevel) + } + + if params.LoadBalancerMonitor.ID == "" { + return LoadBalancerMonitor{}, ErrMissingMonitorID + } + + var uri string + if rc.Level == UserRouteLevel { + uri = fmt.Sprintf("/user/load_balancers/monitors/%s", params.LoadBalancerMonitor.ID) + } else { + uri = fmt.Sprintf("/accounts/%s/load_balancers/monitors/%s", rc.Identifier, params.LoadBalancerMonitor.ID) + } + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params.LoadBalancerMonitor) + if err != nil { + return LoadBalancerMonitor{}, err + } + var r loadBalancerMonitorResponse + if err := json.Unmarshal(res, &r); err != nil { + return LoadBalancerMonitor{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// CreateLoadBalancer creates a new load balancer. +// +// API reference: https://api.cloudflare.com/#load-balancers-create-load-balancer +func (api *API) CreateLoadBalancer(ctx context.Context, rc *ResourceContainer, params CreateLoadBalancerParams) (LoadBalancer, error) { + if rc.Level != ZoneRouteLevel { + return LoadBalancer{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + uri := fmt.Sprintf("/zones/%s/load_balancers", rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params.LoadBalancer) + if err != nil { + return LoadBalancer{}, err + } + var r loadBalancerResponse + if err := json.Unmarshal(res, &r); err != nil { + return LoadBalancer{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// ListLoadBalancers lists load balancers configured on a zone. +// +// API reference: https://api.cloudflare.com/#load-balancers-list-load-balancers +func (api *API) ListLoadBalancers(ctx context.Context, rc *ResourceContainer, params ListLoadBalancerParams) ([]LoadBalancer, error) { + if rc.Level != ZoneRouteLevel { + return []LoadBalancer{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + uri := buildURI(fmt.Sprintf("/zones/%s/load_balancers", rc.Identifier), params.PaginationOptions) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + var r loadBalancerListResponse + if err := json.Unmarshal(res, &r); err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// GetLoadBalancer returns the details for a load balancer. +// +// API reference: https://api.cloudflare.com/#load-balancers-load-balancer-details +func (api *API) GetLoadBalancer(ctx context.Context, rc *ResourceContainer, loadbalancerID string) (LoadBalancer, error) { + if rc.Level != ZoneRouteLevel { + return LoadBalancer{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + if loadbalancerID == "" { + return LoadBalancer{}, ErrMissingLoadBalancerID + } + + uri := fmt.Sprintf("/zones/%s/load_balancers/%s", rc.Identifier, loadbalancerID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return LoadBalancer{}, err + } + var r loadBalancerResponse + if err := json.Unmarshal(res, &r); err != nil { + return LoadBalancer{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// DeleteLoadBalancer disables and deletes a load balancer. +// +// API reference: https://api.cloudflare.com/#load-balancers-delete-load-balancer +func (api *API) DeleteLoadBalancer(ctx context.Context, rc *ResourceContainer, loadbalancerID string) error { + if rc.Level != ZoneRouteLevel { + return fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + if loadbalancerID == "" { + return ErrMissingLoadBalancerID + } + + uri := fmt.Sprintf("/zones/%s/load_balancers/%s", rc.Identifier, loadbalancerID) + + if _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil); err != nil { + return err + } + return nil +} + +// UpdateLoadBalancer modifies a configured load balancer. +// +// API reference: https://api.cloudflare.com/#load-balancers-update-load-balancer +func (api *API) UpdateLoadBalancer(ctx context.Context, rc *ResourceContainer, params UpdateLoadBalancerParams) (LoadBalancer, error) { + if rc.Level != ZoneRouteLevel { + return LoadBalancer{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + if params.LoadBalancer.ID == "" { + return LoadBalancer{}, ErrMissingLoadBalancerID + } + + uri := fmt.Sprintf("/zones/%s/load_balancers/%s", rc.Identifier, params.LoadBalancer.ID) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params.LoadBalancer) + if err != nil { + return LoadBalancer{}, err + } + var r loadBalancerResponse + if err := json.Unmarshal(res, &r); err != nil { + return LoadBalancer{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// GetLoadBalancerPoolHealth fetches the latest healtcheck details for a single +// pool. +// +// API reference: https://api.cloudflare.com/#load-balancer-pools-pool-health-details +func (api *API) GetLoadBalancerPoolHealth(ctx context.Context, rc *ResourceContainer, poolID string) (LoadBalancerPoolHealth, error) { + if rc.Level == ZoneRouteLevel { + return LoadBalancerPoolHealth{}, fmt.Errorf(errInvalidResourceContainerAccess, ZoneRouteLevel) + } + + if poolID == "" { + return LoadBalancerPoolHealth{}, ErrMissingPoolID + } + + var uri string + if rc.Level == UserRouteLevel { + uri = fmt.Sprintf("/user/load_balancers/pools/%s/health", poolID) + } else { + uri = fmt.Sprintf("/accounts/%s/load_balancers/pools/%s/health", rc.Identifier, poolID) + } + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return LoadBalancerPoolHealth{}, err + } + var r loadBalancerPoolHealthResponse + if err := json.Unmarshal(res, &r); err != nil { + return LoadBalancerPoolHealth{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/lockdown.go b/vendor/github.com/cloudflare/cloudflare-go/lockdown.go new file mode 100644 index 0000000..b7c7632 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/lockdown.go @@ -0,0 +1,192 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// ZoneLockdown represents a Zone Lockdown rule. A rule only permits access to +// the provided URL pattern(s) from the given IP address(es) or subnet(s). +type ZoneLockdown struct { + ID string `json:"id"` + Description string `json:"description"` + URLs []string `json:"urls"` + Configurations []ZoneLockdownConfig `json:"configurations"` + Paused bool `json:"paused"` + Priority int `json:"priority,omitempty"` + CreatedOn *time.Time `json:"created_on,omitempty"` + ModifiedOn *time.Time `json:"modified_on,omitempty"` +} + +// ZoneLockdownConfig represents a Zone Lockdown config, which comprises +// a Target ("ip" or "ip_range") and a Value (an IP address or IP+mask, +// respectively.) +type ZoneLockdownConfig struct { + Target string `json:"target"` + Value string `json:"value"` +} + +// ZoneLockdownResponse represents a response from the Zone Lockdown endpoint. +type ZoneLockdownResponse struct { + Result ZoneLockdown `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +// ZoneLockdownListResponse represents a response from the List Zone Lockdown +// endpoint. +type ZoneLockdownListResponse struct { + Result []ZoneLockdown `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +// ZoneLockdownCreateParams contains required and optional params +// for creating a zone lockdown. +type ZoneLockdownCreateParams struct { + Description string `json:"description"` + URLs []string `json:"urls"` + Configurations []ZoneLockdownConfig `json:"configurations"` + Paused bool `json:"paused"` + Priority int `json:"priority,omitempty"` +} + +// ZoneLockdownUpdateParams contains required and optional params +// for updating a zone lockdown. +type ZoneLockdownUpdateParams struct { + ID string `json:"id"` + Description string `json:"description"` + URLs []string `json:"urls"` + Configurations []ZoneLockdownConfig `json:"configurations"` + Paused bool `json:"paused"` + Priority int `json:"priority,omitempty"` +} + +type LockdownListParams struct { + ResultInfo +} + +// CreateZoneLockdown creates a Zone ZoneLockdown rule for the given zone ID. +// +// API reference: https://api.cloudflare.com/#zone-ZoneLockdown-create-a-ZoneLockdown-rule +func (api *API) CreateZoneLockdown(ctx context.Context, rc *ResourceContainer, params ZoneLockdownCreateParams) (ZoneLockdown, error) { + uri := fmt.Sprintf("/zones/%s/firewall/lockdowns", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return ZoneLockdown{}, err + } + + response := &ZoneLockdownResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return ZoneLockdown{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response.Result, nil +} + +// UpdateZoneLockdown updates a Zone ZoneLockdown rule (based on the ID) for the given zone ID. +// +// API reference: https://api.cloudflare.com/#zone-ZoneLockdown-update-ZoneLockdown-rule +func (api *API) UpdateZoneLockdown(ctx context.Context, rc *ResourceContainer, params ZoneLockdownUpdateParams) (ZoneLockdown, error) { + uri := fmt.Sprintf("/zones/%s/firewall/lockdowns/%s", rc.Identifier, params.ID) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return ZoneLockdown{}, err + } + + response := &ZoneLockdownResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return ZoneLockdown{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response.Result, nil +} + +// DeleteZoneLockdown deletes a Zone ZoneLockdown rule (based on the ID) for the given zone ID. +// +// API reference: https://api.cloudflare.com/#zone-ZoneLockdown-delete-ZoneLockdown-rule +func (api *API) DeleteZoneLockdown(ctx context.Context, rc *ResourceContainer, id string) (ZoneLockdown, error) { + uri := fmt.Sprintf("/zones/%s/firewall/lockdowns/%s", rc.Identifier, id) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return ZoneLockdown{}, err + } + + response := &ZoneLockdownResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return ZoneLockdown{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response.Result, nil +} + +// ZoneLockdown retrieves a Zone ZoneLockdown rule (based on the ID) for the given zone ID. +// +// API reference: https://api.cloudflare.com/#zone-ZoneLockdown-ZoneLockdown-rule-details +func (api *API) ZoneLockdown(ctx context.Context, rc *ResourceContainer, id string) (ZoneLockdown, error) { + uri := fmt.Sprintf("/zones/%s/firewall/lockdowns/%s", rc.Identifier, id) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return ZoneLockdown{}, err + } + + response := &ZoneLockdownResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return ZoneLockdown{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response.Result, nil +} + +// ListZoneLockdowns retrieves every Zone ZoneLockdown rules for a given zone ID. +// +// Automatically paginates all results unless `params.PerPage` and `params.Page` +// is set. +// +// API reference: https://api.cloudflare.com/#zone-ZoneLockdown-list-ZoneLockdown-rules +func (api *API) ListZoneLockdowns(ctx context.Context, rc *ResourceContainer, params LockdownListParams) ([]ZoneLockdown, *ResultInfo, error) { + autoPaginate := true + if params.PerPage >= 1 || params.Page >= 1 { + autoPaginate = false + } + if params.PerPage < 1 { + params.PerPage = 50 + } + if params.Page < 1 { + params.Page = 1 + } + + var zoneLockdowns []ZoneLockdown + var zResponse ZoneLockdownListResponse + for { + zResponse = ZoneLockdownListResponse{} + uri := buildURI(fmt.Sprintf("/zones/%s/firewall/lockdowns", rc.Identifier), params) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []ZoneLockdown{}, &ResultInfo{}, err + } + + err = json.Unmarshal(res, &zResponse) + if err != nil { + return []ZoneLockdown{}, &ResultInfo{}, fmt.Errorf("failed to unmarshal filters JSON data: %w", err) + } + + zoneLockdowns = append(zoneLockdowns, zResponse.Result...) + params.ResultInfo = zResponse.ResultInfo.Next() + + if params.ResultInfo.Done() || !autoPaginate { + break + } + } + + return zoneLockdowns, &zResponse.ResultInfo, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/logger.go b/vendor/github.com/cloudflare/cloudflare-go/logger.go new file mode 100644 index 0000000..b1bd9b4 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/logger.go @@ -0,0 +1,130 @@ +package cloudflare + +import ( + "fmt" + "io" + "log" + "os" +) + +// silentRetryLogger is the logger provided with retryable client to stop it +// displaying the retry attempts. +var silentRetryLogger = log.New(io.Discard, "", log.LstdFlags) + +const ( + // LevelNull sets a logger to show no messages at all. + LevelNull Level = 0 + + // LevelError sets a logger to show error messages only. + LevelError Level = 1 + + // LevelWarn sets a logger to show warning messages or anything more + // severe. + LevelWarn Level = 2 + + // LevelInfo sets a logger to show informational messages or anything more + // severe. + LevelInfo Level = 3 + + // LevelDebug sets a logger to show informational messages or anything more + // severe. + LevelDebug Level = 4 +) + +// DefaultLeveledLogger is the default logger that the library will use to log +// errors, warnings, and informational messages. +var DefaultLeveledLogger LeveledLoggerInterface = &LeveledLogger{ + Level: LevelError, +} + +// SilentLeveledLogger is a logger for disregarding all logs written. +var SilentLeveledLogger LeveledLoggerInterface = &LeveledLogger{ + Level: LevelNull, +} + +// Level represents a logging level. +type Level uint32 + +// LeveledLogger is a leveled logger implementation. +// +// It prints warnings and errors to `os.Stderr` and other messages to +// `os.Stdout`. +type LeveledLogger struct { + // Level is the minimum logging level that will be emitted by this logger. + // + // For example, a Level set to LevelWarn will emit warnings and errors, but + // not informational or debug messages. + // + // Always set this with a constant like LevelWarn because the individual + // values are not guaranteed to be stable. + Level Level + + // Internal testing use only. + stderrOverride io.Writer + stdoutOverride io.Writer +} + +// Debugf logs a debug message using Printf conventions. +func (l *LeveledLogger) Debugf(format string, v ...interface{}) { + if l.Level >= LevelDebug { + fmt.Fprintf(l.stdout(), "[debug] "+format, v...) + } +} + +// Errorf logs a warning message using Printf conventions. +func (l *LeveledLogger) Errorf(format string, v ...interface{}) { + // Infof logs a debug message using Printf conventions. + if l.Level >= LevelError { + fmt.Fprintf(l.stderr(), "[error] "+format, v...) + } +} + +// Infof logs an informational message using Printf conventions. +func (l *LeveledLogger) Infof(format string, v ...interface{}) { + if l.Level >= LevelInfo { + fmt.Fprintf(l.stdout(), "[info] "+format, v...) + } +} + +// Warnf logs a warning message using Printf conventions. +func (l *LeveledLogger) Warnf(format string, v ...interface{}) { + if l.Level >= LevelWarn { + fmt.Fprintf(l.stderr(), "[warn] "+format, v...) + } +} + +func (l *LeveledLogger) stderr() io.Writer { + if l.stderrOverride != nil { + return l.stderrOverride + } + + return os.Stderr +} + +func (l *LeveledLogger) stdout() io.Writer { + if l.stdoutOverride != nil { + return l.stdoutOverride + } + + return os.Stdout +} + +// LeveledLoggerInterface provides a basic leveled logging interface for +// printing debug, informational, warning, and error messages. +// +// It's implemented by LeveledLogger and also provides out-of-the-box +// compatibility with a Logrus Logger, but may require a thin shim for use with +// other logging libraries that you use less standard conventions like Zap. +type LeveledLoggerInterface interface { + // Debugf logs a debug message using Printf conventions. + Debugf(format string, v ...interface{}) + + // Errorf logs a warning message using Printf conventions. + Errorf(format string, v ...interface{}) + + // Infof logs an informational message using Printf conventions. + Infof(format string, v ...interface{}) + + // Warnf logs a warning message using Printf conventions. + Warnf(format string, v ...interface{}) +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/logpull.go b/vendor/github.com/cloudflare/cloudflare-go/logpull.go new file mode 100644 index 0000000..d369115 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/logpull.go @@ -0,0 +1,58 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +// LogpullRetentionConfiguration describes a the structure of a Logpull Retention +// payload. +type LogpullRetentionConfiguration struct { + Flag bool `json:"flag"` +} + +// LogpullRetentionConfigurationResponse is the API response, containing the +// Logpull retention result. +type LogpullRetentionConfigurationResponse struct { + Response + Result LogpullRetentionConfiguration `json:"result"` +} + +// GetLogpullRetentionFlag gets the current setting flag. +// +// API reference: https://developers.cloudflare.com/logs/logpull-api/enabling-log-retention/ +func (api *API) GetLogpullRetentionFlag(ctx context.Context, zoneID string) (*LogpullRetentionConfiguration, error) { + uri := fmt.Sprintf("/zones/%s/logs/control/retention/flag", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return &LogpullRetentionConfiguration{}, err + } + var r LogpullRetentionConfigurationResponse + err = json.Unmarshal(res, &r) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return &r.Result, nil +} + +// SetLogpullRetentionFlag updates the retention flag to the defined boolean. +// +// API reference: https://developers.cloudflare.com/logs/logpull-api/enabling-log-retention/ +func (api *API) SetLogpullRetentionFlag(ctx context.Context, zoneID string, enabled bool) (*LogpullRetentionConfiguration, error) { + uri := fmt.Sprintf("/zones/%s/logs/control/retention/flag", zoneID) + flagPayload := LogpullRetentionConfiguration{Flag: enabled} + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, flagPayload) + if err != nil { + return &LogpullRetentionConfiguration{}, err + } + var r LogpullRetentionConfigurationResponse + err = json.Unmarshal(res, &r) + if err != nil { + return &LogpullRetentionConfiguration{}, err + } + return &r.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/logpush.go b/vendor/github.com/cloudflare/cloudflare-go/logpush.go new file mode 100644 index 0000000..34e32e9 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/logpush.go @@ -0,0 +1,572 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// LogpushJob describes a Logpush job. +type LogpushJob struct { + ID int `json:"id,omitempty"` + Dataset string `json:"dataset"` + Enabled bool `json:"enabled"` + Kind string `json:"kind,omitempty"` + Name string `json:"name"` + LogpullOptions string `json:"logpull_options,omitempty"` + OutputOptions *LogpushOutputOptions `json:"output_options,omitempty"` + DestinationConf string `json:"destination_conf"` + OwnershipChallenge string `json:"ownership_challenge,omitempty"` + LastComplete *time.Time `json:"last_complete,omitempty"` + LastError *time.Time `json:"last_error,omitempty"` + ErrorMessage string `json:"error_message,omitempty"` + Frequency string `json:"frequency,omitempty"` + Filter *LogpushJobFilters `json:"filter,omitempty"` + MaxUploadBytes int `json:"max_upload_bytes,omitempty"` + MaxUploadRecords int `json:"max_upload_records,omitempty"` + MaxUploadIntervalSeconds int `json:"max_upload_interval_seconds,omitempty"` +} + +type LogpushJobFilters struct { + Where LogpushJobFilter `json:"where"` +} + +type Operator string + +const ( + Equal Operator = "eq" + NotEqual Operator = "!eq" + LessThan Operator = "lt" + LessThanOrEqual Operator = "leq" + GreaterThan Operator = "gt" + GreaterThanOrEqual Operator = "geq" + StartsWith Operator = "startsWith" + EndsWith Operator = "endsWith" + NotStartsWith Operator = "!startsWith" + NotEndsWith Operator = "!endsWith" + Contains Operator = "contains" + NotContains Operator = "!contains" + ValueIsIn Operator = "in" + ValueIsNotIn Operator = "!in" +) + +type LogpushJobFilter struct { + // either this + And []LogpushJobFilter `json:"and,omitempty"` + Or []LogpushJobFilter `json:"or,omitempty"` + // or this + Key string `json:"key,omitempty"` + Operator Operator `json:"operator,omitempty"` + Value interface{} `json:"value,omitempty"` +} + +type LogpushOutputOptions struct { + FieldNames []string `json:"field_names"` + OutputType string `json:"output_type,omitempty"` + BatchPrefix string `json:"batch_prefix,omitempty"` + BatchSuffix string `json:"batch_suffix,omitempty"` + RecordPrefix string `json:"record_prefix,omitempty"` + RecordSuffix string `json:"record_suffix,omitempty"` + RecordTemplate string `json:"record_template,omitempty"` + RecordDelimiter string `json:"record_delimiter,omitempty"` + FieldDelimiter string `json:"field_delimiter,omitempty"` + TimestampFormat string `json:"timestamp_format,omitempty"` + SampleRate float64 `json:"sample_rate,omitempty"` + CVE202144228 *bool `json:"CVE-2021-44228,omitempty"` +} + +// LogpushJobsResponse is the API response, containing an array of Logpush Jobs. +type LogpushJobsResponse struct { + Response + Result []LogpushJob `json:"result"` +} + +// LogpushJobDetailsResponse is the API response, containing a single Logpush Job. +type LogpushJobDetailsResponse struct { + Response + Result LogpushJob `json:"result"` +} + +// LogpushFieldsResponse is the API response for a datasets fields. +type LogpushFieldsResponse struct { + Response + Result LogpushFields `json:"result"` +} + +// LogpushFields is a map of available Logpush field names & descriptions. +type LogpushFields map[string]string + +// LogpushGetOwnershipChallenge describes a ownership validation. +type LogpushGetOwnershipChallenge struct { + Filename string `json:"filename"` + Valid bool `json:"valid"` + Message string `json:"message"` +} + +// LogpushGetOwnershipChallengeResponse is the API response, containing a ownership challenge. +type LogpushGetOwnershipChallengeResponse struct { + Response + Result LogpushGetOwnershipChallenge `json:"result"` +} + +// LogpushGetOwnershipChallengeRequest is the API request for get ownership challenge. +type LogpushGetOwnershipChallengeRequest struct { + DestinationConf string `json:"destination_conf"` +} + +// LogpushOwnershipChallengeValidationResponse is the API response, +// containing a ownership challenge validation result. +type LogpushOwnershipChallengeValidationResponse struct { + Response + Result struct { + Valid bool `json:"valid"` + } +} + +// LogpushValidateOwnershipChallengeRequest is the API request for validate ownership challenge. +type LogpushValidateOwnershipChallengeRequest struct { + DestinationConf string `json:"destination_conf"` + OwnershipChallenge string `json:"ownership_challenge"` +} + +// LogpushDestinationExistsResponse is the API response, +// containing a destination exists check result. +type LogpushDestinationExistsResponse struct { + Response + Result struct { + Exists bool `json:"exists"` + } +} + +// LogpushDestinationExistsRequest is the API request for check destination exists. +type LogpushDestinationExistsRequest struct { + DestinationConf string `json:"destination_conf"` +} + +// Custom Marshaller for LogpushJob filter key. +func (f LogpushJob) MarshalJSON() ([]byte, error) { + type Alias LogpushJob + + var filter string + + if f.Filter != nil { + b, err := json.Marshal(f.Filter) + + if err != nil { + return nil, err + } + + filter = string(b) + } + + return json.Marshal(&struct { + Filter string `json:"filter,omitempty"` + Alias + }{ + Filter: filter, + Alias: (Alias)(f), + }) +} + +// Custom Unmarshaller for LogpushJob filter key. +func (f *LogpushJob) UnmarshalJSON(data []byte) error { + type Alias LogpushJob + aux := &struct { + Filter string `json:"filter,omitempty"` + *Alias + }{ + Alias: (*Alias)(f), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + + if aux != nil && aux.Filter != "" { + var filter LogpushJobFilters + if err := json.Unmarshal([]byte(aux.Filter), &filter); err != nil { + return err + } + if err := filter.Where.Validate(); err != nil { + return err + } + f.Filter = &filter + } + return nil +} + +func (f CreateLogpushJobParams) MarshalJSON() ([]byte, error) { + type Alias CreateLogpushJobParams + + var filter string + + if f.Filter != nil { + b, err := json.Marshal(f.Filter) + + if err != nil { + return nil, err + } + + filter = string(b) + } + + return json.Marshal(&struct { + Filter string `json:"filter,omitempty"` + Alias + }{ + Filter: filter, + Alias: (Alias)(f), + }) +} + +// Custom Unmarshaller for CreateLogpushJobParams filter key. +func (f *CreateLogpushJobParams) UnmarshalJSON(data []byte) error { + type Alias CreateLogpushJobParams + aux := &struct { + Filter string `json:"filter,omitempty"` + *Alias + }{ + Alias: (*Alias)(f), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + + if aux != nil && aux.Filter != "" { + var filter LogpushJobFilters + if err := json.Unmarshal([]byte(aux.Filter), &filter); err != nil { + return err + } + if err := filter.Where.Validate(); err != nil { + return err + } + f.Filter = &filter + } + return nil +} + +func (f UpdateLogpushJobParams) MarshalJSON() ([]byte, error) { + type Alias UpdateLogpushJobParams + + var filter string + + if f.Filter != nil { + b, err := json.Marshal(f.Filter) + + if err != nil { + return nil, err + } + + filter = string(b) + } + + return json.Marshal(&struct { + Filter string `json:"filter,omitempty"` + Alias + }{ + Filter: filter, + Alias: (Alias)(f), + }) +} + +// Custom Unmarshaller for UpdateLogpushJobParams filter key. +func (f *UpdateLogpushJobParams) UnmarshalJSON(data []byte) error { + type Alias UpdateLogpushJobParams + aux := &struct { + Filter string `json:"filter,omitempty"` + *Alias + }{ + Alias: (*Alias)(f), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + + if aux != nil && aux.Filter != "" { + var filter LogpushJobFilters + if err := json.Unmarshal([]byte(aux.Filter), &filter); err != nil { + return err + } + if err := filter.Where.Validate(); err != nil { + return err + } + f.Filter = &filter + } + return nil +} + +func (filter *LogpushJobFilter) Validate() error { + if filter.And != nil { + if filter.Or != nil || filter.Key != "" || filter.Operator != "" || filter.Value != nil { + return errors.New("And can't be set with Or, Key, Operator or Value") + } + for i, element := range filter.And { + err := element.Validate() + if err != nil { + return fmt.Errorf("element %v in And is invalid: %w", i, err) + } + } + return nil + } + if filter.Or != nil { + if filter.And != nil || filter.Key != "" || filter.Operator != "" || filter.Value != nil { + return errors.New("Or can't be set with And, Key, Operator or Value") + } + for i, element := range filter.Or { + err := element.Validate() + if err != nil { + return fmt.Errorf("element %v in Or is invalid: %w", i, err) + } + } + return nil + } + if filter.Key == "" { + return errors.New("Key is missing") + } + + if filter.Operator == "" { + return errors.New("Operator is missing") + } + + if filter.Value == nil { + return errors.New("Value is missing") + } + + return nil +} + +type CreateLogpushJobParams struct { + Dataset string `json:"dataset"` + Enabled bool `json:"enabled"` + Kind string `json:"kind,omitempty"` + Name string `json:"name"` + LogpullOptions string `json:"logpull_options,omitempty"` + OutputOptions *LogpushOutputOptions `json:"output_options,omitempty"` + DestinationConf string `json:"destination_conf"` + OwnershipChallenge string `json:"ownership_challenge,omitempty"` + ErrorMessage string `json:"error_message,omitempty"` + Frequency string `json:"frequency,omitempty"` + Filter *LogpushJobFilters `json:"filter,omitempty"` + MaxUploadBytes int `json:"max_upload_bytes,omitempty"` + MaxUploadRecords int `json:"max_upload_records,omitempty"` + MaxUploadIntervalSeconds int `json:"max_upload_interval_seconds,omitempty"` +} + +type ListLogpushJobsParams struct{} + +type ListLogpushJobsForDatasetParams struct { + Dataset string `json:"-"` +} + +type GetLogpushFieldsParams struct { + Dataset string `json:"-"` +} + +type UpdateLogpushJobParams struct { + ID int `json:"-"` + Dataset string `json:"dataset"` + Enabled bool `json:"enabled"` + Kind string `json:"kind,omitempty"` + Name string `json:"name"` + LogpullOptions string `json:"logpull_options,omitempty"` + OutputOptions *LogpushOutputOptions `json:"output_options,omitempty"` + DestinationConf string `json:"destination_conf"` + OwnershipChallenge string `json:"ownership_challenge,omitempty"` + LastComplete *time.Time `json:"last_complete,omitempty"` + LastError *time.Time `json:"last_error,omitempty"` + ErrorMessage string `json:"error_message,omitempty"` + Frequency string `json:"frequency,omitempty"` + Filter *LogpushJobFilters `json:"filter,omitempty"` + MaxUploadBytes int `json:"max_upload_bytes,omitempty"` + MaxUploadRecords int `json:"max_upload_records,omitempty"` + MaxUploadIntervalSeconds int `json:"max_upload_interval_seconds,omitempty"` +} + +type ValidateLogpushOwnershipChallengeParams struct { + DestinationConf string `json:"destination_conf"` + OwnershipChallenge string `json:"ownership_challenge"` +} + +type GetLogpushOwnershipChallengeParams struct { + DestinationConf string `json:"destination_conf"` +} + +// CreateLogpushJob creates a new zone-level Logpush Job. +// +// API reference: https://api.cloudflare.com/#logpush-jobs-create-logpush-job +func (api *API) CreateLogpushJob(ctx context.Context, rc *ResourceContainer, params CreateLogpushJobParams) (*LogpushJob, error) { + uri := fmt.Sprintf("/%s/%s/logpush/jobs", rc.Level, rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return nil, err + } + var r LogpushJobDetailsResponse + err = json.Unmarshal(res, &r) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return &r.Result, nil +} + +// ListAccountLogpushJobs returns all Logpush Jobs for all datasets. +// +// API reference: https://api.cloudflare.com/#logpush-jobs-list-logpush-jobs +func (api *API) ListLogpushJobs(ctx context.Context, rc *ResourceContainer, params ListLogpushJobsParams) ([]LogpushJob, error) { + uri := fmt.Sprintf("/%s/%s/logpush/jobs", rc.Level, rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []LogpushJob{}, err + } + var r LogpushJobsResponse + err = json.Unmarshal(res, &r) + if err != nil { + return []LogpushJob{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// LogpushJobsForDataset returns all Logpush Jobs for a dataset. +// +// API reference: https://api.cloudflare.com/#logpush-jobs-list-logpush-jobs-for-a-dataset +func (api *API) ListLogpushJobsForDataset(ctx context.Context, rc *ResourceContainer, params ListLogpushJobsForDatasetParams) ([]LogpushJob, error) { + uri := fmt.Sprintf("/%s/%s/logpush/datasets/%s/jobs", rc.Level, rc.Identifier, params.Dataset) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []LogpushJob{}, err + } + var r LogpushJobsResponse + err = json.Unmarshal(res, &r) + if err != nil { + return []LogpushJob{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// LogpushFields returns fields for a given dataset. +// +// API reference: https://api.cloudflare.com/#logpush-jobs-list-logpush-jobs +func (api *API) GetLogpushFields(ctx context.Context, rc *ResourceContainer, params GetLogpushFieldsParams) (LogpushFields, error) { + uri := fmt.Sprintf("/%s/%s/logpush/datasets/%s/fields", rc.Level, rc.Identifier, params.Dataset) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return LogpushFields{}, err + } + var r LogpushFieldsResponse + err = json.Unmarshal(res, &r) + if err != nil { + return LogpushFields{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// LogpushJob fetches detail about one Logpush Job for a zone. +// +// API reference: https://api.cloudflare.com/#logpush-jobs-logpush-job-details +func (api *API) GetLogpushJob(ctx context.Context, rc *ResourceContainer, jobID int) (LogpushJob, error) { + uri := fmt.Sprintf("/%s/%s/logpush/jobs/%d", rc.Level, rc.Identifier, jobID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return LogpushJob{}, err + } + var r LogpushJobDetailsResponse + err = json.Unmarshal(res, &r) + if err != nil { + return LogpushJob{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// UpdateLogpushJob lets you update a Logpush Job. +// +// API reference: https://api.cloudflare.com/#logpush-jobs-update-logpush-job +func (api *API) UpdateLogpushJob(ctx context.Context, rc *ResourceContainer, params UpdateLogpushJobParams) error { + uri := fmt.Sprintf("/%s/%s/logpush/jobs/%d", rc.Level, rc.Identifier, params.ID) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return err + } + var r LogpushJobDetailsResponse + err = json.Unmarshal(res, &r) + if err != nil { + return fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return nil +} + +// DeleteLogpushJob deletes a Logpush Job for a zone. +// +// API reference: https://api.cloudflare.com/#logpush-jobs-delete-logpush-job +func (api *API) DeleteLogpushJob(ctx context.Context, rc *ResourceContainer, jobID int) error { + uri := fmt.Sprintf("/%s/%s/logpush/jobs/%d", rc.Level, rc.Identifier, jobID) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + var r LogpushJobDetailsResponse + err = json.Unmarshal(res, &r) + if err != nil { + return fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return nil +} + +// GetLogpushOwnershipChallenge returns ownership challenge. +// +// API reference: https://api.cloudflare.com/#logpush-jobs-get-ownership-challenge +func (api *API) GetLogpushOwnershipChallenge(ctx context.Context, rc *ResourceContainer, params GetLogpushOwnershipChallengeParams) (*LogpushGetOwnershipChallenge, error) { + uri := fmt.Sprintf("/%s/%s/logpush/ownership", rc.Level, rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return nil, err + } + var r LogpushGetOwnershipChallengeResponse + err = json.Unmarshal(res, &r) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + if !r.Result.Valid { + return nil, errors.New(r.Result.Message) + } + + return &r.Result, nil +} + +// ValidateLogpushOwnershipChallenge returns zone-level ownership challenge validation result. +// +// API reference: https://api.cloudflare.com/#logpush-jobs-validate-ownership-challenge +func (api *API) ValidateLogpushOwnershipChallenge(ctx context.Context, rc *ResourceContainer, params ValidateLogpushOwnershipChallengeParams) (bool, error) { + uri := fmt.Sprintf("/%s/%s/logpush/ownership/validate", rc.Level, rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return false, err + } + var r LogpushGetOwnershipChallengeResponse + err = json.Unmarshal(res, &r) + if err != nil { + return false, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result.Valid, nil +} + +// CheckLogpushDestinationExists returns zone-level destination exists check result. +// +// API reference: https://api.cloudflare.com/#logpush-jobs-check-destination-exists +func (api *API) CheckLogpushDestinationExists(ctx context.Context, rc *ResourceContainer, destinationConf string) (bool, error) { + uri := fmt.Sprintf("/%s/%s/logpush/validate/destination/exists", rc.Level, rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, LogpushDestinationExistsRequest{ + DestinationConf: destinationConf, + }) + if err != nil { + return false, err + } + var r LogpushDestinationExistsResponse + err = json.Unmarshal(res, &r) + if err != nil { + return false, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result.Exists, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/magic_firewall_rulesets.go b/vendor/github.com/cloudflare/cloudflare-go/magic_firewall_rulesets.go new file mode 100644 index 0000000..1851198 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/magic_firewall_rulesets.go @@ -0,0 +1,206 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +const ( + // MagicFirewallRulesetKindRoot specifies a root Ruleset. + MagicFirewallRulesetKindRoot = "root" + + // MagicFirewallRulesetPhaseMagicTransit specifies the Magic Transit Ruleset phase. + MagicFirewallRulesetPhaseMagicTransit = "magic_transit" + + // MagicFirewallRulesetRuleActionSkip specifies a skip (allow) action. + MagicFirewallRulesetRuleActionSkip MagicFirewallRulesetRuleAction = "skip" + + // MagicFirewallRulesetRuleActionBlock specifies a block action. + MagicFirewallRulesetRuleActionBlock MagicFirewallRulesetRuleAction = "block" +) + +// MagicFirewallRulesetRuleAction specifies the action for a Firewall rule. +type MagicFirewallRulesetRuleAction string + +// MagicFirewallRuleset contains information about a Firewall Ruleset. +type MagicFirewallRuleset struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Kind string `json:"kind"` + Version string `json:"version,omitempty"` + LastUpdated *time.Time `json:"last_updated,omitempty"` + Phase string `json:"phase"` + Rules []MagicFirewallRulesetRule `json:"rules"` +} + +// MagicFirewallRulesetRuleActionParameters specifies the action parameters for a Firewall rule. +type MagicFirewallRulesetRuleActionParameters struct { + Ruleset string `json:"ruleset,omitempty"` +} + +// MagicFirewallRulesetRule contains information about a single Magic Firewall rule. +type MagicFirewallRulesetRule struct { + ID string `json:"id,omitempty"` + Version string `json:"version,omitempty"` + Action MagicFirewallRulesetRuleAction `json:"action"` + ActionParameters *MagicFirewallRulesetRuleActionParameters `json:"action_parameters,omitempty"` + Expression string `json:"expression"` + Description string `json:"description"` + LastUpdated *time.Time `json:"last_updated,omitempty"` + Ref string `json:"ref,omitempty"` + Enabled bool `json:"enabled"` +} + +// CreateMagicFirewallRulesetRequest contains data for a new Firewall ruleset. +type CreateMagicFirewallRulesetRequest struct { + Name string `json:"name"` + Description string `json:"description"` + Kind string `json:"kind"` + Phase string `json:"phase"` + Rules []MagicFirewallRulesetRule `json:"rules"` +} + +// UpdateMagicFirewallRulesetRequest contains data for a Magic Firewall ruleset update. +type UpdateMagicFirewallRulesetRequest struct { + Description string `json:"description"` + Rules []MagicFirewallRulesetRule `json:"rules"` +} + +// ListMagicFirewallRulesetResponse contains a list of Magic Firewall rulesets. +type ListMagicFirewallRulesetResponse struct { + Response + Result []MagicFirewallRuleset `json:"result"` +} + +// GetMagicFirewallRulesetResponse contains a single Magic Firewall Ruleset. +type GetMagicFirewallRulesetResponse struct { + Response + Result MagicFirewallRuleset `json:"result"` +} + +// CreateMagicFirewallRulesetResponse contains response data when creating a new Magic Firewall ruleset. +type CreateMagicFirewallRulesetResponse struct { + Response + Result MagicFirewallRuleset `json:"result"` +} + +// UpdateMagicFirewallRulesetResponse contains response data when updating an existing Magic Firewall ruleset. +type UpdateMagicFirewallRulesetResponse struct { + Response + Result MagicFirewallRuleset `json:"result"` +} + +// ListMagicFirewallRulesets lists all Rulesets for a given account +// +// API reference: https://api.cloudflare.com/#rulesets-list-rulesets +// +// Deprecated: Use `ListZoneRuleset` or `ListAccountRuleset` instead. +func (api *API) ListMagicFirewallRulesets(ctx context.Context, accountID string) ([]MagicFirewallRuleset, error) { + uri := fmt.Sprintf("/accounts/%s/rulesets", accountID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []MagicFirewallRuleset{}, err + } + + result := ListMagicFirewallRulesetResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return []MagicFirewallRuleset{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// GetMagicFirewallRuleset returns a specific Magic Firewall Ruleset +// +// API reference: https://api.cloudflare.com/#rulesets-get-a-ruleset +// +// Deprecated: Use `GetZoneRuleset` or `GetAccountRuleset` instead. +func (api *API) GetMagicFirewallRuleset(ctx context.Context, accountID, ID string) (MagicFirewallRuleset, error) { + uri := fmt.Sprintf("/accounts/%s/rulesets/%s", accountID, ID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return MagicFirewallRuleset{}, err + } + + result := GetMagicFirewallRulesetResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return MagicFirewallRuleset{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// CreateMagicFirewallRuleset creates a Magic Firewall ruleset +// +// API reference: https://api.cloudflare.com/#rulesets-list-rulesets +// +// Deprecated: Use `CreateZoneRuleset` or `CreateAccountRuleset` instead. +func (api *API) CreateMagicFirewallRuleset(ctx context.Context, accountID, name, description string, rules []MagicFirewallRulesetRule) (MagicFirewallRuleset, error) { + uri := fmt.Sprintf("/accounts/%s/rulesets", accountID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, + CreateMagicFirewallRulesetRequest{ + Name: name, + Description: description, + Kind: MagicFirewallRulesetKindRoot, + Phase: MagicFirewallRulesetPhaseMagicTransit, + Rules: rules}) + if err != nil { + return MagicFirewallRuleset{}, err + } + + result := CreateMagicFirewallRulesetResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return MagicFirewallRuleset{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// DeleteMagicFirewallRuleset deletes a Magic Firewall ruleset +// +// API reference: https://api.cloudflare.com/#rulesets-delete-ruleset +// +// Deprecated: Use `DeleteZoneRuleset` or `DeleteAccountRuleset` instead. +func (api *API) DeleteMagicFirewallRuleset(ctx context.Context, accountID, ID string) error { + uri := fmt.Sprintf("/accounts/%s/rulesets/%s", accountID, ID) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + + if err != nil { + return err + } + + // Firewall API is not implementing the standard response blob but returns an empty response (204) in case + // of a success. So we are checking for the response body size here + if len(res) > 0 { + return fmt.Errorf(errMakeRequestError+": %w", errors.New(string(res))) + } + + return nil +} + +// UpdateMagicFirewallRuleset updates a Magic Firewall ruleset +// +// API reference: https://api.cloudflare.com/#rulesets-update-ruleset +// +// Deprecated: Use `UpdateZoneRuleset` or `UpdateAccountRuleset` instead. +func (api *API) UpdateMagicFirewallRuleset(ctx context.Context, accountID, ID string, description string, rules []MagicFirewallRulesetRule) (MagicFirewallRuleset, error) { + uri := fmt.Sprintf("/accounts/%s/rulesets/%s", accountID, ID) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, + UpdateMagicFirewallRulesetRequest{Description: description, Rules: rules}) + if err != nil { + return MagicFirewallRuleset{}, err + } + + result := UpdateMagicFirewallRulesetResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return MagicFirewallRuleset{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/magic_transit_gre_tunnel.go b/vendor/github.com/cloudflare/cloudflare-go/magic_transit_gre_tunnel.go new file mode 100644 index 0000000..52e24d5 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/magic_transit_gre_tunnel.go @@ -0,0 +1,181 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// Magic Transit GRE Tunnel Error messages. +const ( + errMagicTransitGRETunnelNotModified = "When trying to modify GRE tunnel, API returned modified: false" + errMagicTransitGRETunnelNotDeleted = "When trying to delete GRE tunnel, API returned deleted: false" +) + +// MagicTransitGRETunnel contains information about a GRE tunnel. +type MagicTransitGRETunnel struct { + ID string `json:"id,omitempty"` + CreatedOn *time.Time `json:"created_on,omitempty"` + ModifiedOn *time.Time `json:"modified_on,omitempty"` + Name string `json:"name"` + CustomerGREEndpoint string `json:"customer_gre_endpoint"` + CloudflareGREEndpoint string `json:"cloudflare_gre_endpoint"` + InterfaceAddress string `json:"interface_address"` + Description string `json:"description,omitempty"` + TTL uint8 `json:"ttl,omitempty"` + MTU uint16 `json:"mtu,omitempty"` + HealthCheck *MagicTransitGRETunnelHealthcheck `json:"health_check,omitempty"` +} + +// MagicTransitGRETunnelHealthcheck contains information about a GRE tunnel health check. +type MagicTransitGRETunnelHealthcheck struct { + Enabled bool `json:"enabled"` + Target string `json:"target,omitempty"` + Type string `json:"type,omitempty"` +} + +// ListMagicTransitGRETunnelsResponse contains a response including GRE tunnels. +type ListMagicTransitGRETunnelsResponse struct { + Response + Result struct { + GRETunnels []MagicTransitGRETunnel `json:"gre_tunnels"` + } `json:"result"` +} + +// GetMagicTransitGRETunnelResponse contains a response including zero or one GRE tunnels. +type GetMagicTransitGRETunnelResponse struct { + Response + Result struct { + GRETunnel MagicTransitGRETunnel `json:"gre_tunnel"` + } `json:"result"` +} + +// CreateMagicTransitGRETunnelsRequest is an array of GRE tunnels to create. +type CreateMagicTransitGRETunnelsRequest struct { + GRETunnels []MagicTransitGRETunnel `json:"gre_tunnels"` +} + +// UpdateMagicTransitGRETunnelResponse contains a response after updating a GRE Tunnel. +type UpdateMagicTransitGRETunnelResponse struct { + Response + Result struct { + Modified bool `json:"modified"` + ModifiedGRETunnel MagicTransitGRETunnel `json:"modified_gre_tunnel"` + } `json:"result"` +} + +// DeleteMagicTransitGRETunnelResponse contains a response after deleting a GRE Tunnel. +type DeleteMagicTransitGRETunnelResponse struct { + Response + Result struct { + Deleted bool `json:"deleted"` + DeletedGRETunnel MagicTransitGRETunnel `json:"deleted_gre_tunnel"` + } `json:"result"` +} + +// ListMagicTransitGRETunnels lists all GRE tunnels for a given account. +// +// API reference: https://api.cloudflare.com/#magic-gre-tunnels-list-gre-tunnels +func (api *API) ListMagicTransitGRETunnels(ctx context.Context, accountID string) ([]MagicTransitGRETunnel, error) { + uri := fmt.Sprintf("/accounts/%s/magic/gre_tunnels", accountID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []MagicTransitGRETunnel{}, err + } + + result := ListMagicTransitGRETunnelsResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return []MagicTransitGRETunnel{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result.GRETunnels, nil +} + +// GetMagicTransitGRETunnel returns zero or one GRE tunnel. +// +// API reference: https://api.cloudflare.com/#magic-gre-tunnels-gre-tunnel-details +func (api *API) GetMagicTransitGRETunnel(ctx context.Context, accountID string, id string) (MagicTransitGRETunnel, error) { + uri := fmt.Sprintf("/accounts/%s/magic/gre_tunnels/%s", accountID, id) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return MagicTransitGRETunnel{}, err + } + + result := GetMagicTransitGRETunnelResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return MagicTransitGRETunnel{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result.GRETunnel, nil +} + +// CreateMagicTransitGRETunnels creates one or more GRE tunnels. +// +// API reference: https://api.cloudflare.com/#magic-gre-tunnels-create-gre-tunnels +func (api *API) CreateMagicTransitGRETunnels(ctx context.Context, accountID string, tunnels []MagicTransitGRETunnel) ([]MagicTransitGRETunnel, error) { + uri := fmt.Sprintf("/accounts/%s/magic/gre_tunnels", accountID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, CreateMagicTransitGRETunnelsRequest{ + GRETunnels: tunnels, + }) + + if err != nil { + return []MagicTransitGRETunnel{}, err + } + + result := ListMagicTransitGRETunnelsResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return []MagicTransitGRETunnel{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result.GRETunnels, nil +} + +// UpdateMagicTransitGRETunnel updates a GRE tunnel. +// +// API reference: https://api.cloudflare.com/#magic-gre-tunnels-update-gre-tunnel +func (api *API) UpdateMagicTransitGRETunnel(ctx context.Context, accountID string, id string, tunnel MagicTransitGRETunnel) (MagicTransitGRETunnel, error) { + uri := fmt.Sprintf("/accounts/%s/magic/gre_tunnels/%s", accountID, id) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, tunnel) + + if err != nil { + return MagicTransitGRETunnel{}, err + } + + result := UpdateMagicTransitGRETunnelResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return MagicTransitGRETunnel{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + if !result.Result.Modified { + return MagicTransitGRETunnel{}, errors.New(errMagicTransitGRETunnelNotModified) + } + + return result.Result.ModifiedGRETunnel, nil +} + +// DeleteMagicTransitGRETunnel deletes a GRE tunnel. +// +// API reference: https://api.cloudflare.com/#magic-gre-tunnels-delete-gre-tunnel +func (api *API) DeleteMagicTransitGRETunnel(ctx context.Context, accountID string, id string) (MagicTransitGRETunnel, error) { + uri := fmt.Sprintf("/accounts/%s/magic/gre_tunnels/%s", accountID, id) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + + if err != nil { + return MagicTransitGRETunnel{}, err + } + + result := DeleteMagicTransitGRETunnelResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return MagicTransitGRETunnel{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + if !result.Result.Deleted { + return MagicTransitGRETunnel{}, errors.New(errMagicTransitGRETunnelNotDeleted) + } + + return result.Result.DeletedGRETunnel, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/magic_transit_ipsec_tunnel.go b/vendor/github.com/cloudflare/cloudflare-go/magic_transit_ipsec_tunnel.go new file mode 100644 index 0000000..6b13fe1 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/magic_transit_ipsec_tunnel.go @@ -0,0 +1,216 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// Magic Transit IPsec Tunnel Error messages. +const ( + errMagicTransitIPsecTunnelNotModified = "When trying to modify IPsec tunnel, API returned modified: false" + errMagicTransitIPsecTunnelNotDeleted = "When trying to delete IPsec tunnel, API returned deleted: false" +) + +type RemoteIdentities struct { + HexID string `json:"hex_id"` + FQDNID string `json:"fqdn_id"` + UserID string `json:"user_id"` +} + +// MagicTransitIPsecTunnelPskMetadata contains metadata associated with PSK. +type MagicTransitIPsecTunnelPskMetadata struct { + LastGeneratedOn *time.Time `json:"last_generated_on,omitempty"` +} + +// MagicTransitIPsecTunnel contains information about an IPsec tunnel. +type MagicTransitIPsecTunnel struct { + ID string `json:"id,omitempty"` + CreatedOn *time.Time `json:"created_on,omitempty"` + ModifiedOn *time.Time `json:"modified_on,omitempty"` + Name string `json:"name"` + CustomerEndpoint string `json:"customer_endpoint,omitempty"` + CloudflareEndpoint string `json:"cloudflare_endpoint"` + InterfaceAddress string `json:"interface_address"` + Description string `json:"description,omitempty"` + HealthCheck *MagicTransitTunnelHealthcheck `json:"health_check,omitempty"` + Psk string `json:"psk,omitempty"` + PskMetadata *MagicTransitIPsecTunnelPskMetadata `json:"psk_metadata,omitempty"` + RemoteIdentities *RemoteIdentities `json:"remote_identities,omitempty"` + AllowNullCipher bool `json:"allow_null_cipher"` + ReplayProtection *bool `json:"replay_protection,omitempty"` +} + +// ListMagicTransitIPsecTunnelsResponse contains a response including IPsec tunnels. +type ListMagicTransitIPsecTunnelsResponse struct { + Response + Result struct { + IPsecTunnels []MagicTransitIPsecTunnel `json:"ipsec_tunnels"` + } `json:"result"` +} + +// GetMagicTransitIPsecTunnelResponse contains a response including zero or one IPsec tunnels. +type GetMagicTransitIPsecTunnelResponse struct { + Response + Result struct { + IPsecTunnel MagicTransitIPsecTunnel `json:"ipsec_tunnel"` + } `json:"result"` +} + +// CreateMagicTransitIPsecTunnelsRequest is an array of IPsec tunnels to create. +type CreateMagicTransitIPsecTunnelsRequest struct { + IPsecTunnels []MagicTransitIPsecTunnel `json:"ipsec_tunnels"` +} + +// UpdateMagicTransitIPsecTunnelResponse contains a response after updating an IPsec Tunnel. +type UpdateMagicTransitIPsecTunnelResponse struct { + Response + Result struct { + Modified bool `json:"modified"` + ModifiedIPsecTunnel MagicTransitIPsecTunnel `json:"modified_ipsec_tunnel"` + } `json:"result"` +} + +// DeleteMagicTransitIPsecTunnelResponse contains a response after deleting an IPsec Tunnel. +type DeleteMagicTransitIPsecTunnelResponse struct { + Response + Result struct { + Deleted bool `json:"deleted"` + DeletedIPsecTunnel MagicTransitIPsecTunnel `json:"deleted_ipsec_tunnel"` + } `json:"result"` +} + +// GenerateMagicTransitIPsecTunnelPSKResponse contains a response after generating IPsec Tunnel. +type GenerateMagicTransitIPsecTunnelPSKResponse struct { + Response + Result struct { + Psk string `json:"psk"` + PskMetadata *MagicTransitIPsecTunnelPskMetadata `json:"psk_metadata"` + } `json:"result"` +} + +// ListMagicTransitIPsecTunnels lists all IPsec tunnels for a given account +// +// API reference: https://api.cloudflare.com/#magic-ipsec-tunnels-list-ipsec-tunnels +func (api *API) ListMagicTransitIPsecTunnels(ctx context.Context, accountID string) ([]MagicTransitIPsecTunnel, error) { + uri := fmt.Sprintf("/accounts/%s/magic/ipsec_tunnels", accountID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []MagicTransitIPsecTunnel{}, err + } + + result := ListMagicTransitIPsecTunnelsResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return []MagicTransitIPsecTunnel{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result.IPsecTunnels, nil +} + +// GetMagicTransitIPsecTunnel returns zero or one IPsec tunnel +// +// API reference: https://api.cloudflare.com/#magic-ipsec-tunnels-ipsec-tunnel-details +func (api *API) GetMagicTransitIPsecTunnel(ctx context.Context, accountID string, id string) (MagicTransitIPsecTunnel, error) { + uri := fmt.Sprintf("/accounts/%s/magic/ipsec_tunnels/%s", accountID, id) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return MagicTransitIPsecTunnel{}, err + } + + result := GetMagicTransitIPsecTunnelResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return MagicTransitIPsecTunnel{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result.IPsecTunnel, nil +} + +// CreateMagicTransitIPsecTunnels creates one or more IPsec tunnels +// +// API reference: https://api.cloudflare.com/#magic-ipsec-tunnels-create-ipsec-tunnels +func (api *API) CreateMagicTransitIPsecTunnels(ctx context.Context, accountID string, tunnels []MagicTransitIPsecTunnel) ([]MagicTransitIPsecTunnel, error) { + uri := fmt.Sprintf("/accounts/%s/magic/ipsec_tunnels", accountID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, CreateMagicTransitIPsecTunnelsRequest{ + IPsecTunnels: tunnels, + }) + + if err != nil { + return []MagicTransitIPsecTunnel{}, err + } + + result := ListMagicTransitIPsecTunnelsResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return []MagicTransitIPsecTunnel{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result.IPsecTunnels, nil +} + +// UpdateMagicTransitIPsecTunnel updates an IPsec tunnel +// +// API reference: https://api.cloudflare.com/#magic-ipsec-tunnels-update-ipsec-tunnel +func (api *API) UpdateMagicTransitIPsecTunnel(ctx context.Context, accountID string, id string, tunnel MagicTransitIPsecTunnel) (MagicTransitIPsecTunnel, error) { + uri := fmt.Sprintf("/accounts/%s/magic/ipsec_tunnels/%s", accountID, id) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, tunnel) + + if err != nil { + return MagicTransitIPsecTunnel{}, err + } + + result := UpdateMagicTransitIPsecTunnelResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return MagicTransitIPsecTunnel{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + if !result.Result.Modified { + return MagicTransitIPsecTunnel{}, errors.New(errMagicTransitIPsecTunnelNotModified) + } + + return result.Result.ModifiedIPsecTunnel, nil +} + +// DeleteMagicTransitIPsecTunnel deletes an IPsec Tunnel +// +// API reference: https://api.cloudflare.com/#magic-ipsec-tunnels-delete-ipsec-tunnel +func (api *API) DeleteMagicTransitIPsecTunnel(ctx context.Context, accountID string, id string) (MagicTransitIPsecTunnel, error) { + uri := fmt.Sprintf("/accounts/%s/magic/ipsec_tunnels/%s", accountID, id) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + + if err != nil { + return MagicTransitIPsecTunnel{}, err + } + + result := DeleteMagicTransitIPsecTunnelResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return MagicTransitIPsecTunnel{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + if !result.Result.Deleted { + return MagicTransitIPsecTunnel{}, errors.New(errMagicTransitIPsecTunnelNotDeleted) + } + + return result.Result.DeletedIPsecTunnel, nil +} + +// GenerateMagicTransitIPsecTunnelPSK generates a pre shared key (psk) for an IPsec tunnel +// +// API reference: https://api.cloudflare.com/#magic-ipsec-tunnels-generate-pre-shared-key-psk-for-ipsec-tunnels +func (api *API) GenerateMagicTransitIPsecTunnelPSK(ctx context.Context, accountID string, id string) (string, *MagicTransitIPsecTunnelPskMetadata, error) { + uri := fmt.Sprintf("/accounts/%s/magic/ipsec_tunnels/%s/psk_generate", accountID, id) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, nil) + + if err != nil { + return "", nil, err + } + + result := GenerateMagicTransitIPsecTunnelPSKResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return "", nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result.Psk, result.Result.PskMetadata, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/magic_transit_static_routes.go b/vendor/github.com/cloudflare/cloudflare-go/magic_transit_static_routes.go new file mode 100644 index 0000000..3554a9d --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/magic_transit_static_routes.go @@ -0,0 +1,180 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// Magic Transit Static Routes Error messages. +const ( + errMagicTransitStaticRouteNotModified = "When trying to modify static route, API returned modified: false" + errMagicTransitStaticRouteNotDeleted = "When trying to delete static route, API returned deleted: false" +) + +// MagicTransitStaticRouteScope contains information about a static route's scope. +type MagicTransitStaticRouteScope struct { + ColoRegions []string `json:"colo_regions,omitempty"` + ColoNames []string `json:"colo_names,omitempty"` +} + +// MagicTransitStaticRoute contains information about a static route. +type MagicTransitStaticRoute struct { + ID string `json:"id,omitempty"` + Prefix string `json:"prefix"` + CreatedOn *time.Time `json:"created_on,omitempty"` + ModifiedOn *time.Time `json:"modified_on,omitempty"` + Nexthop string `json:"nexthop"` + Priority int `json:"priority,omitempty"` + Description string `json:"description,omitempty"` + Weight int `json:"weight,omitempty"` + Scope MagicTransitStaticRouteScope `json:"scope,omitempty"` +} + +// ListMagicTransitStaticRoutesResponse contains a response including Magic Transit static routes. +type ListMagicTransitStaticRoutesResponse struct { + Response + Result struct { + Routes []MagicTransitStaticRoute `json:"routes"` + } `json:"result"` +} + +// GetMagicTransitStaticRouteResponse contains a response including exactly one static route. +type GetMagicTransitStaticRouteResponse struct { + Response + Result struct { + Route MagicTransitStaticRoute `json:"route"` + } `json:"result"` +} + +// UpdateMagicTransitStaticRouteResponse contains a static route update response. +type UpdateMagicTransitStaticRouteResponse struct { + Response + Result struct { + Modified bool `json:"modified"` + ModifiedRoute MagicTransitStaticRoute `json:"modified_route"` + } `json:"result"` +} + +// DeleteMagicTransitStaticRouteResponse contains a static route deletion response. +type DeleteMagicTransitStaticRouteResponse struct { + Response + Result struct { + Deleted bool `json:"deleted"` + DeletedRoute MagicTransitStaticRoute `json:"deleted_route"` + } `json:"result"` +} + +// CreateMagicTransitStaticRoutesRequest is an array of static routes to create. +type CreateMagicTransitStaticRoutesRequest struct { + Routes []MagicTransitStaticRoute `json:"routes"` +} + +// ListMagicTransitStaticRoutes lists all static routes for a given account +// +// API reference: https://api.cloudflare.com/#magic-transit-static-routes-list-routes +func (api *API) ListMagicTransitStaticRoutes(ctx context.Context, accountID string) ([]MagicTransitStaticRoute, error) { + uri := fmt.Sprintf("/accounts/%s/magic/routes", accountID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []MagicTransitStaticRoute{}, err + } + + result := ListMagicTransitStaticRoutesResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return []MagicTransitStaticRoute{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result.Routes, nil +} + +// GetMagicTransitStaticRoute returns exactly one static route +// +// API reference: https://api.cloudflare.com/#magic-transit-static-routes-route-details +func (api *API) GetMagicTransitStaticRoute(ctx context.Context, accountID, ID string) (MagicTransitStaticRoute, error) { + uri := fmt.Sprintf("/accounts/%s/magic/routes/%s", accountID, ID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return MagicTransitStaticRoute{}, err + } + + result := GetMagicTransitStaticRouteResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return MagicTransitStaticRoute{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result.Route, nil +} + +// CreateMagicTransitStaticRoute creates a new static route +// +// API reference: https://api.cloudflare.com/#magic-transit-static-routes-create-routes +func (api *API) CreateMagicTransitStaticRoute(ctx context.Context, accountID string, route MagicTransitStaticRoute) ([]MagicTransitStaticRoute, error) { + uri := fmt.Sprintf("/accounts/%s/magic/routes", accountID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, CreateMagicTransitStaticRoutesRequest{ + Routes: []MagicTransitStaticRoute{ + route, + }, + }) + + if err != nil { + return []MagicTransitStaticRoute{}, err + } + + result := ListMagicTransitStaticRoutesResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return []MagicTransitStaticRoute{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result.Routes, nil +} + +// UpdateMagicTransitStaticRoute updates a static route +// +// API reference: https://api.cloudflare.com/#magic-transit-static-routes-update-route +func (api *API) UpdateMagicTransitStaticRoute(ctx context.Context, accountID, ID string, route MagicTransitStaticRoute) (MagicTransitStaticRoute, error) { + uri := fmt.Sprintf("/accounts/%s/magic/routes/%s", accountID, ID) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, route) + + if err != nil { + return MagicTransitStaticRoute{}, err + } + + result := UpdateMagicTransitStaticRouteResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return MagicTransitStaticRoute{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + if !result.Result.Modified { + return MagicTransitStaticRoute{}, errors.New(errMagicTransitStaticRouteNotModified) + } + + return result.Result.ModifiedRoute, nil +} + +// DeleteMagicTransitStaticRoute deletes a static route +// +// API reference: https://api.cloudflare.com/#magic-transit-static-routes-delete-route +func (api *API) DeleteMagicTransitStaticRoute(ctx context.Context, accountID, ID string) (MagicTransitStaticRoute, error) { + uri := fmt.Sprintf("/accounts/%s/magic/routes/%s", accountID, ID) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + + if err != nil { + return MagicTransitStaticRoute{}, err + } + + result := DeleteMagicTransitStaticRouteResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return MagicTransitStaticRoute{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + if !result.Result.Deleted { + return MagicTransitStaticRoute{}, errors.New(errMagicTransitStaticRouteNotDeleted) + } + + return result.Result.DeletedRoute, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/magic_transit_tunnel_healthcheck.go b/vendor/github.com/cloudflare/cloudflare-go/magic_transit_tunnel_healthcheck.go new file mode 100644 index 0000000..ced8a40 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/magic_transit_tunnel_healthcheck.go @@ -0,0 +1,10 @@ +package cloudflare + +// MagicTransitTunnelHealthcheck contains information about a tunnel health check. +type MagicTransitTunnelHealthcheck struct { + Enabled bool `json:"enabled"` + Target string `json:"target,omitempty"` + Type string `json:"type,omitempty"` + Rate string `json:"rate,omitempty"` + Direction string `json:"direction,omitempty"` +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/managed_headers.go b/vendor/github.com/cloudflare/cloudflare-go/managed_headers.go new file mode 100644 index 0000000..54de0dd --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/managed_headers.go @@ -0,0 +1,78 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +type ListManagedHeadersResponse struct { + Response + Result ManagedHeaders `json:"result"` +} + +type UpdateManagedHeadersParams struct { + ManagedHeaders +} + +type ManagedHeaders struct { + ManagedRequestHeaders []ManagedHeader `json:"managed_request_headers"` + ManagedResponseHeaders []ManagedHeader `json:"managed_response_headers"` +} + +type ManagedHeader struct { + ID string `json:"id"` + Enabled bool `json:"enabled"` + HasCoflict bool `json:"has_conflict,omitempty"` + ConflictsWith []string `json:"conflicts_with,omitempty"` +} + +type ListManagedHeadersParams struct { + Status string `url:"status,omitempty"` +} + +func (api *API) ListZoneManagedHeaders(ctx context.Context, rc *ResourceContainer, params ListManagedHeadersParams) (ManagedHeaders, error) { + if rc.Identifier == "" { + return ManagedHeaders{}, ErrMissingZoneID + } + + uri := buildURI(fmt.Sprintf("/zones/%s/managed_headers", rc.Identifier), params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return ManagedHeaders{}, err + } + + result := ListManagedHeadersResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return ManagedHeaders{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +func (api *API) UpdateZoneManagedHeaders(ctx context.Context, rc *ResourceContainer, params UpdateManagedHeadersParams) (ManagedHeaders, error) { + if rc.Identifier == "" { + return ManagedHeaders{}, ErrMissingZoneID + } + + uri := fmt.Sprintf("/zones/%s/managed_headers", rc.Identifier) + + payload, err := json.Marshal(params.ManagedHeaders) + if err != nil { + return ManagedHeaders{}, err + } + + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, payload) + if err != nil { + return ManagedHeaders{}, err + } + + result := ListManagedHeadersResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return ManagedHeaders{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/miscategorization.go b/vendor/github.com/cloudflare/cloudflare-go/miscategorization.go new file mode 100644 index 0000000..5164c53 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/miscategorization.go @@ -0,0 +1,50 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" +) + +var ( + // ErrMissingIP is for when ipv4 or ipv6 indicator was given but ip is missing. + ErrMissingIP = errors.New("ip is required when using 'ipv4' or 'ipv6' indicator type and is missing") + // ErrMissingURL is for when url or domain indicator was given but url is missing. + ErrMissingURL = errors.New("url is required when using 'domain' or 'url' indicator type and is missing") +) + +// MisCategorizationParameters represents the parameters for a miscategorization request. +type MisCategorizationParameters struct { + AccountID string + IndicatorType string `json:"indicator_type,omitempty"` + IP string `json:"ip,omitempty"` + URL string `json:"url,omitempty"` + ContentAdds []int `json:"content_adds,omitempty"` + ContentRemoves []int `json:"content_removes,omitempty"` + SecurityAdds []int `json:"security_adds,omitempty"` + SecurityRemoves []int `json:"security_removes,omitempty"` +} + +// CreateMiscategorization creates a miscatergorization. +// +// API Reference: https://api.cloudflare.com/#miscategorization-create-miscategorization +func (api *API) CreateMiscategorization(ctx context.Context, params MisCategorizationParameters) error { + if params.AccountID == "" { + return ErrMissingAccountID + } + if (params.IndicatorType == "ipv6" || params.IndicatorType == "ipv4") && params.IP == "" { + return ErrMissingIP + } + if (params.IndicatorType == "domain" || params.IndicatorType == "url") && params.URL == "" { + return ErrMissingURL + } + + uri := fmt.Sprintf("/accounts/%s/intel/miscategorization", params.AccountID) + _, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/mtls_certificates.go b/vendor/github.com/cloudflare/cloudflare-go/mtls_certificates.go new file mode 100644 index 0000000..4ab8794 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/mtls_certificates.go @@ -0,0 +1,215 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// MTLSAssociation represents the metadata for an existing association +// between a user-uploaded mTLS certificate and a Cloudflare service. +type MTLSAssociation struct { + Service string `json:"service"` + Status string `json:"status"` +} + +// MTLSAssociationResponse represents the response from the retrieval endpoint +// for mTLS certificate associations. +type MTLSAssociationResponse struct { + Response + Result []MTLSAssociation `json:"result"` +} + +// MTLSCertificate represents the metadata for a user-uploaded mTLS +// certificate. +type MTLSCertificate struct { + ID string `json:"id"` + Name string `json:"name"` + Issuer string `json:"issuer"` + Signature string `json:"signature"` + SerialNumber string `json:"serial_number"` + Certificates string `json:"certificates"` + CA bool `json:"ca"` + UploadedOn time.Time `json:"uploaded_on"` + UpdatedAt time.Time `json:"updated_at"` + ExpiresOn time.Time `json:"expires_on"` +} + +// MTLSCertificateResponse represents the response from endpoints relating to +// retrieving, creating, and deleting an mTLS certificate. +type MTLSCertificateResponse struct { + Response + Result MTLSCertificate `json:"result"` +} + +// MTLSCertificatesResponse represents the response from the mTLS certificate +// list endpoint. +type MTLSCertificatesResponse struct { + Response + Result []MTLSCertificate `json:"result"` + ResultInfo `json:"result_info"` +} + +// MTLSCertificateParams represents the data related to the mTLS certificate +// being uploaded. Name is an optional field. +type CreateMTLSCertificateParams struct { + Name string `json:"name"` + Certificates string `json:"certificates"` + PrivateKey string `json:"private_key"` + CA bool `json:"ca"` +} + +type ListMTLSCertificatesParams struct { + PaginationOptions + Limit int `url:"limit,omitempty"` + Offset int `url:"offset,omitempty"` + Name string `url:"name,omitempty"` + CA bool `url:"ca,omitempty"` +} + +type ListMTLSCertificateAssociationsParams struct { + CertificateID string +} + +var ( + ErrMissingCertificateID = errors.New("missing required certificate ID") +) + +// ListMTLSCertificates returns a list of all user-uploaded mTLS certificates. +// +// API reference: https://api.cloudflare.com/#mtls-certificate-management-list-mtls-certificates +func (api *API) ListMTLSCertificates(ctx context.Context, rc *ResourceContainer, params ListMTLSCertificatesParams) ([]MTLSCertificate, ResultInfo, error) { + if rc.Level != AccountRouteLevel { + return []MTLSCertificate{}, ResultInfo{}, ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return []MTLSCertificate{}, ResultInfo{}, ErrMissingAccountID + } + + uri := fmt.Sprintf("/accounts/%s/mtls_certificates", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, params) + if err != nil { + return []MTLSCertificate{}, ResultInfo{}, err + } + var r MTLSCertificatesResponse + if err := json.Unmarshal(res, &r); err != nil { + return []MTLSCertificate{}, ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, r.ResultInfo, err +} + +// GetMTLSCertificate returns the metadata associated with a user-uploaded mTLS +// certificate. +// +// API reference: https://api.cloudflare.com/#mtls-certificate-management-get-mtls-certificate +func (api *API) GetMTLSCertificate(ctx context.Context, rc *ResourceContainer, certificateID string) (MTLSCertificate, error) { + if rc.Level != AccountRouteLevel { + return MTLSCertificate{}, ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return MTLSCertificate{}, ErrMissingAccountID + } + + if certificateID == "" { + return MTLSCertificate{}, ErrMissingCertificateID + } + + uri := fmt.Sprintf("/accounts/%s/mtls_certificates/%s", rc.Identifier, certificateID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return MTLSCertificate{}, err + } + var r MTLSCertificateResponse + if err := json.Unmarshal(res, &r); err != nil { + return MTLSCertificate{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// ListMTLSCertificateAssociations returns a list of all existing associations +// between the mTLS certificate and Cloudflare services. +// +// API reference: https://api.cloudflare.com/#mtls-certificate-management-list-mtls-certificate-associations +func (api *API) ListMTLSCertificateAssociations(ctx context.Context, rc *ResourceContainer, params ListMTLSCertificateAssociationsParams) ([]MTLSAssociation, error) { + if rc.Level != AccountRouteLevel { + return []MTLSAssociation{}, ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return []MTLSAssociation{}, ErrMissingAccountID + } + + if params.CertificateID == "" { + return []MTLSAssociation{}, ErrMissingCertificateID + } + + uri := fmt.Sprintf("/accounts/%s/mtls_certificates/%s/associations", rc.Identifier, params.CertificateID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []MTLSAssociation{}, err + } + var r MTLSAssociationResponse + if err := json.Unmarshal(res, &r); err != nil { + return []MTLSAssociation{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// CreateMTLSCertificate will create the provided certificate for use with mTLS +// enabled Cloudflare services. +// +// API reference: https://api.cloudflare.com/#mtls-certificate-management-upload-mtls-certificate +func (api *API) CreateMTLSCertificate(ctx context.Context, rc *ResourceContainer, params CreateMTLSCertificateParams) (MTLSCertificate, error) { + if rc.Level != AccountRouteLevel { + return MTLSCertificate{}, ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return MTLSCertificate{}, ErrMissingAccountID + } + + uri := fmt.Sprintf("/accounts/%s/mtls_certificates", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return MTLSCertificate{}, err + } + var r MTLSCertificateResponse + if err := json.Unmarshal(res, &r); err != nil { + return MTLSCertificate{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// DeleteMTLSCertificate will delete the specified mTLS certificate. +// +// API reference: https://api.cloudflare.com/#mtls-certificate-management-delete-mtls-certificate +func (api *API) DeleteMTLSCertificate(ctx context.Context, rc *ResourceContainer, certificateID string) (MTLSCertificate, error) { + if rc.Level != AccountRouteLevel { + return MTLSCertificate{}, ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return MTLSCertificate{}, ErrMissingAccountID + } + + if certificateID == "" { + return MTLSCertificate{}, ErrMissingCertificateID + } + + uri := fmt.Sprintf("/accounts/%s/mtls_certificates/%s", rc.Identifier, certificateID) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return MTLSCertificate{}, err + } + var r MTLSCertificateResponse + if err := json.Unmarshal(res, &r); err != nil { + return MTLSCertificate{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/notifications.go b/vendor/github.com/cloudflare/cloudflare-go/notifications.go new file mode 100644 index 0000000..7afd66f --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/notifications.go @@ -0,0 +1,447 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// NotificationMechanismData holds a single public facing mechanism data +// integation. +type NotificationMechanismData struct { + Name string `json:"name"` + ID string `json:"id"` +} + +// NotificationMechanismIntegrations is a list of all the integrations of a +// certain mechanism type e.g. all email integrations. +type NotificationMechanismIntegrations []NotificationMechanismData + +// NotificationPolicy represents the notification policy created along with +// the destinations. +type NotificationPolicy struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Enabled bool `json:"enabled"` + AlertType string `json:"alert_type"` + Mechanisms map[string]NotificationMechanismIntegrations `json:"mechanisms"` + Created time.Time `json:"created"` + Modified time.Time `json:"modified"` + Conditions map[string]interface{} `json:"conditions"` + Filters map[string][]string `json:"filters"` +} + +// NotificationPoliciesResponse holds the response for listing all +// notification policies for an account. +type NotificationPoliciesResponse struct { + Response + ResultInfo + Result []NotificationPolicy +} + +// NotificationPolicyResponse holds the response type when a single policy +// is retrieved. +type NotificationPolicyResponse struct { + Response + Result NotificationPolicy +} + +// NotificationWebhookIntegration describes the webhook information along +// with its status. +type NotificationWebhookIntegration struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + URL string `json:"url"` + CreatedAt time.Time `json:"created_at"` + LastSuccess *time.Time `json:"last_success"` + LastFailure *time.Time `json:"last_failure"` +} + +// NotificationWebhookResponse describes a single webhook retrieved. +type NotificationWebhookResponse struct { + Response + ResultInfo + Result NotificationWebhookIntegration +} + +// NotificationWebhooksResponse describes a list of webhooks retrieved. +type NotificationWebhooksResponse struct { + Response + ResultInfo + Result []NotificationWebhookIntegration +} + +// NotificationUpsertWebhooks describes a valid webhook request. +type NotificationUpsertWebhooks struct { + Name string `json:"name"` + URL string `json:"url"` + Secret string `json:"secret"` +} + +// NotificationPagerDutyResource describes a PagerDuty integration. +type NotificationPagerDutyResource struct { + ID string `json:"id"` + Name string `json:"name"` +} + +// NotificationPagerDutyResponse describes the PagerDuty integration +// retrieved. +type NotificationPagerDutyResponse struct { + Response + ResultInfo + Result NotificationPagerDutyResource +} + +// NotificationResource describes the id of an inserted/updated/deleted +// resource. +type NotificationResource struct { + ID string +} + +// SaveResponse is returned when a resource is inserted/updated/deleted. +type SaveResponse struct { + Response + Result NotificationResource +} + +// NotificationMechanismMetaData represents the state of the delivery +// mechanism. +type NotificationMechanismMetaData struct { + Eligible bool `json:"eligible"` + Ready bool `json:"ready"` + Type string `json:"type"` +} + +// NotificationMechanisms are the different possible delivery mechanisms. +type NotificationMechanisms struct { + Email NotificationMechanismMetaData `json:"email"` + PagerDuty NotificationMechanismMetaData `json:"pagerduty"` + Webhooks NotificationMechanismMetaData `json:"webhooks,omitempty"` +} + +// NotificationEligibilityResponse describes the eligible mechanisms that +// can be configured for a notification. +type NotificationEligibilityResponse struct { + Response + Result NotificationMechanisms +} + +// NotificationsGroupedByProduct are grouped by products. +type NotificationsGroupedByProduct map[string][]NotificationAlertWithDescription + +// NotificationAlertWithDescription represents the alert/notification +// available. +type NotificationAlertWithDescription struct { + DisplayName string `json:"display_name"` + Type string `json:"type"` + Description string `json:"description"` +} + +// NotificationAvailableAlertsResponse describes the available +// alerts/notifications grouped by products. +type NotificationAvailableAlertsResponse struct { + Response + Result NotificationsGroupedByProduct +} + +// NotificationHistory describes the history +// of notifications sent for an account. +type NotificationHistory struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + AlertBody string `json:"alert_body"` + AlertType string `json:"alert_type"` + Mechanism string `json:"mechanism"` + MechanismType string `json:"mechanism_type"` + Sent time.Time `json:"sent"` +} + +// NotificationHistoryResponse describes the notification history +// response for an account for a specific time period. +type NotificationHistoryResponse struct { + Response + ResultInfo `json:"result_info"` + Result []NotificationHistory +} + +// ListNotificationPolicies will return the notification policies +// created by a user for a specific account. +// +// API Reference: https://api.cloudflare.com/#notification-policies-properties +func (api *API) ListNotificationPolicies(ctx context.Context, accountID string) (NotificationPoliciesResponse, error) { + baseURL := fmt.Sprintf("/accounts/%s/alerting/v3/policies", accountID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, baseURL, nil) + if err != nil { + return NotificationPoliciesResponse{}, err + } + var r NotificationPoliciesResponse + err = json.Unmarshal(res, &r) + if err != nil { + return r, err + } + return r, nil +} + +// GetNotificationPolicy returns a specific created by a user, given the account +// id and the policy id. +// +// API Reference: https://api.cloudflare.com/#notification-policies-properties +func (api *API) GetNotificationPolicy(ctx context.Context, accountID, policyID string) (NotificationPolicyResponse, error) { + baseURL := fmt.Sprintf("/accounts/%s/alerting/v3/policies/%s", accountID, policyID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, baseURL, nil) + if err != nil { + return NotificationPolicyResponse{}, err + } + var r NotificationPolicyResponse + err = json.Unmarshal(res, &r) + if err != nil { + return r, err + } + return r, nil +} + +// CreateNotificationPolicy creates a notification policy for an account. +// +// API Reference: https://api.cloudflare.com/#notification-policies-create-notification-policy +func (api *API) CreateNotificationPolicy(ctx context.Context, accountID string, policy NotificationPolicy) (SaveResponse, error) { + baseURL := fmt.Sprintf("/accounts/%s/alerting/v3/policies", accountID) + + res, err := api.makeRequestContext(ctx, http.MethodPost, baseURL, policy) + if err != nil { + return SaveResponse{}, err + } + return unmarshalNotificationSaveResponse(res) +} + +// UpdateNotificationPolicy updates a notification policy, given the +// account id and the policy id and returns the policy id. +// +// API Reference: https://api.cloudflare.com/#notification-policies-update-notification-policy +func (api *API) UpdateNotificationPolicy(ctx context.Context, accountID string, policy *NotificationPolicy) (SaveResponse, error) { + if policy == nil { + return SaveResponse{}, fmt.Errorf("policy cannot be nil") + } + baseURL := fmt.Sprintf("/accounts/%s/alerting/v3/policies/%s", accountID, policy.ID) + + res, err := api.makeRequestContext(ctx, http.MethodPut, baseURL, policy) + if err != nil { + return SaveResponse{}, err + } + return unmarshalNotificationSaveResponse(res) +} + +// DeleteNotificationPolicy deletes a notification policy for an account. +// +// API Reference: https://api.cloudflare.com/#notification-policies-delete-notification-policy +func (api *API) DeleteNotificationPolicy(ctx context.Context, accountID, policyID string) (SaveResponse, error) { + baseURL := fmt.Sprintf("/accounts/%s/alerting/v3/policies/%s", accountID, policyID) + + res, err := api.makeRequestContext(ctx, http.MethodDelete, baseURL, nil) + if err != nil { + return SaveResponse{}, err + } + return unmarshalNotificationSaveResponse(res) +} + +// ListNotificationWebhooks will return the webhook destinations configured +// for an account. +// +// API Reference: https://api.cloudflare.com/#notification-webhooks-list-webhooks +func (api *API) ListNotificationWebhooks(ctx context.Context, accountID string) (NotificationWebhooksResponse, error) { + baseURL := fmt.Sprintf("/accounts/%s/alerting/v3/destinations/webhooks", accountID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, baseURL, nil) + if err != nil { + return NotificationWebhooksResponse{}, err + } + var r NotificationWebhooksResponse + err = json.Unmarshal(res, &r) + if err != nil { + return r, err + } + return r, nil +} + +// CreateNotificationWebhooks will help connect a webhooks destination. +// A test message will be sent to the webhooks endpoint during creation. +// If added successfully, the webhooks can be setup as a destination mechanism +// while creating policies. +// +// Notifications will be posted to this URL. +// +// API Reference: https://api.cloudflare.com/#notification-webhooks-create-webhook +func (api *API) CreateNotificationWebhooks(ctx context.Context, accountID string, webhooks *NotificationUpsertWebhooks) (SaveResponse, error) { + if webhooks == nil { + return SaveResponse{}, fmt.Errorf("webhooks cannot be nil") + } + baseURL := fmt.Sprintf("/accounts/%s/alerting/v3/destinations/webhooks", accountID) + + res, err := api.makeRequestContext(ctx, http.MethodPost, baseURL, webhooks) + if err != nil { + return SaveResponse{}, err + } + + return unmarshalNotificationSaveResponse(res) +} + +// GetNotificationWebhooks will return a specific webhook destination, +// given the account and webhooks ids. +// +// API Reference: https://api.cloudflare.com/#notification-webhooks-get-webhook +func (api *API) GetNotificationWebhooks(ctx context.Context, accountID, webhookID string) (NotificationWebhookResponse, error) { + baseURL := fmt.Sprintf("/accounts/%s/alerting/v3/destinations/webhooks/%s", accountID, webhookID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, baseURL, nil) + if err != nil { + return NotificationWebhookResponse{}, err + } + var r NotificationWebhookResponse + err = json.Unmarshal(res, &r) + if err != nil { + return r, err + } + return r, nil +} + +// UpdateNotificationWebhooks will update a particular webhook's name, +// given the account and webhooks ids. +// +// The webhook url and secret cannot be updated. +// +// API Reference: https://api.cloudflare.com/#notification-webhooks-update-webhook +func (api *API) UpdateNotificationWebhooks(ctx context.Context, accountID, webhookID string, webhooks *NotificationUpsertWebhooks) (SaveResponse, error) { + if webhooks == nil { + return SaveResponse{}, fmt.Errorf("webhooks cannot be nil") + } + baseURL := fmt.Sprintf("/accounts/%s/alerting/v3/destinations/webhooks/%s", accountID, webhookID) + + res, err := api.makeRequestContext(ctx, http.MethodPut, baseURL, webhooks) + if err != nil { + return SaveResponse{}, err + } + + return unmarshalNotificationSaveResponse(res) +} + +// DeleteNotificationWebhooks will delete a webhook, given the account and +// webhooks ids. Deleting the webhooks will remove it from any connected +// notification policies. +// +// API Reference: https://api.cloudflare.com/#notification-webhooks-delete-webhook +func (api *API) DeleteNotificationWebhooks(ctx context.Context, accountID, webhookID string) (SaveResponse, error) { + baseURL := fmt.Sprintf("/accounts/%s/alerting/v3/destinations/webhooks/%s", accountID, webhookID) + + res, err := api.makeRequestContext(ctx, http.MethodDelete, baseURL, nil) + if err != nil { + return SaveResponse{}, err + } + + return unmarshalNotificationSaveResponse(res) +} + +// ListPagerDutyNotificationDestinations will return the pagerduty +// destinations configured for an account. +// +// API Reference: https://api.cloudflare.com/#notification-destinations-with-pagerduty-list-pagerduty-services +func (api *API) ListPagerDutyNotificationDestinations(ctx context.Context, accountID string) (NotificationPagerDutyResponse, error) { + baseURL := fmt.Sprintf("/accounts/%s/alerting/v3/destinations/pagerduty", accountID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, baseURL, nil) + if err != nil { + return NotificationPagerDutyResponse{}, err + } + var r NotificationPagerDutyResponse + err = json.Unmarshal(res, &r) + if err != nil { + return r, err + } + return r, nil +} + +// GetEligibleNotificationDestinations will return the types of +// destinations an account is eligible to configure. +// +// API Reference: https://api.cloudflare.com/#notification-mechanism-eligibility-properties +func (api *API) GetEligibleNotificationDestinations(ctx context.Context, accountID string) (NotificationEligibilityResponse, error) { + baseURL := fmt.Sprintf("/accounts/%s/alerting/v3/destinations/eligible", accountID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, baseURL, nil) + if err != nil { + return NotificationEligibilityResponse{}, err + } + var r NotificationEligibilityResponse + err = json.Unmarshal(res, &r) + if err != nil { + return r, err + } + return r, nil +} + +// GetAvailableNotificationTypes will return the alert types available for +// a given account. +// +// API Reference: https://api.cloudflare.com/#notification-mechanism-eligibility-properties +func (api *API) GetAvailableNotificationTypes(ctx context.Context, accountID string) (NotificationAvailableAlertsResponse, error) { + baseURL := fmt.Sprintf("/accounts/%s/alerting/v3/available_alerts", accountID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, baseURL, nil) + if err != nil { + return NotificationAvailableAlertsResponse{}, err + } + var r NotificationAvailableAlertsResponse + err = json.Unmarshal(res, &r) + if err != nil { + return r, err + } + return r, nil +} + +// TimeRange is an object for filtering the alert history based on timestamp. +type TimeRange struct { + Since string `json:"since,omitempty" url:"since,omitempty"` + Before string `json:"before,omitempty" url:"before,omitempty"` +} + +// AlertHistoryFilter is an object for filtering the alert history response from the api. +type AlertHistoryFilter struct { + TimeRange + PaginationOptions +} + +// ListNotificationHistory will return the history of alerts sent for +// a given account. The time period varies based on zone plan. +// Free, Biz, Pro = 30 days +// Ent = 90 days +// +// API Reference: https://api.cloudflare.com/#notification-history-list-history +func (api *API) ListNotificationHistory(ctx context.Context, accountID string, alertHistoryFilter AlertHistoryFilter) ([]NotificationHistory, ResultInfo, error) { + uri := buildURI(fmt.Sprintf("/accounts/%s/alerting/v3/history", accountID), alertHistoryFilter) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []NotificationHistory{}, ResultInfo{}, err + } + var r NotificationHistoryResponse + err = json.Unmarshal(res, &r) + if err != nil { + return []NotificationHistory{}, ResultInfo{}, err + } + return r.Result, r.ResultInfo, nil +} + +// unmarshal will unmarshal bytes and return a SaveResponse. +func unmarshalNotificationSaveResponse(res []byte) (SaveResponse, error) { + var r SaveResponse + err := json.Unmarshal(res, &r) + if err != nil { + return r, err + } + return r, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/observatory.go b/vendor/github.com/cloudflare/cloudflare-go/observatory.go new file mode 100644 index 0000000..d1f7d08 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/observatory.go @@ -0,0 +1,401 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "time" + + "github.com/goccy/go-json" + "github.com/google/go-querystring/query" +) + +var ( + ErrMissingObservatoryUrl = errors.New("missing required page url") + ErrMissingObservatoryTestID = errors.New("missing required test id") +) + +// ObservatoryPage describes all the tests for a web page. +type ObservatoryPage struct { + URL string `json:"url"` + Region labeledRegion `json:"region"` + ScheduleFrequency string `json:"scheduleFrequency"` + Tests []ObservatoryPageTest `json:"tests"` +} + +// ObservatoryPageTest describes a single test for a web page. +type ObservatoryPageTest struct { + ID string `json:"id"` + Date *time.Time `json:"date"` + URL string `json:"url"` + Region labeledRegion `json:"region"` + ScheduleFrequency *string `json:"scheduleFrequency"` + MobileReport ObservatoryLighthouseReport `json:"mobileReport"` + DesktopReport ObservatoryLighthouseReport `json:"desktopReport"` +} + +// labeledRegion describes a region the test was run in. +type labeledRegion struct { + Value string `json:"value"` + Label string `json:"label"` +} + +// ObservatorySchedule describe a test schedule. +type ObservatorySchedule struct { + URL string `json:"url"` + Region string `json:"region"` + Frequency string `json:"frequency"` +} + +// ObservatoryLighthouseReport describes the web vital metrics result. +type ObservatoryLighthouseReport struct { + PerformanceScore int `json:"performanceScore"` + State string `json:"state"` + DeviceType string `json:"deviceType"` + // TTFB is time to first byte + TTFB int `json:"ttfb"` + // FCP is first contentful paint + FCP int `json:"fcp"` + // LCP is largest contentful pain + LCP int `json:"lcp"` + // TTI is time to interactive + TTI int `json:"tti"` + // TBT is total blocking time + TBT int `json:"tbt"` + // SI is speed index + SI int `json:"si"` + // CLS is cumulative layout shift + CLS float64 `json:"cls"` + Error *lighthouseError `json:"error,omitempty"` +} + +// lighthouseError describes the test error. +type lighthouseError struct { + Code string `json:"code"` + Detail string `json:"detail"` + FinalDisplayedURL string `json:"finalDisplayedUrl"` +} + +// ObservatoryPageTrend describes the web vital metrics trend. +type ObservatoryPageTrend struct { + PerformanceScore []*int `json:"performanceScore"` + TTFB []*int `json:"ttfb"` + FCP []*int `json:"fcp"` + LCP []*int `json:"lcp"` + TTI []*int `json:"tti"` + TBT []*int `json:"tbt"` + SI []*int `json:"si"` + CLS []*float64 `json:"cls"` +} + +type ListObservatoryPagesParams struct { +} + +// ObservatoryPagesResponse is the API response, containing a list of ObservatoryPage. +type ObservatoryPagesResponse struct { + Response + Result []ObservatoryPage `json:"result"` +} + +// ListObservatoryPages returns a list of pages which have been tested. +// +// API reference: https://api.cloudflare.com/#speed-list-pages +func (api *API) ListObservatoryPages(ctx context.Context, rc *ResourceContainer, params ListObservatoryPagesParams) ([]ObservatoryPage, error) { + uri := fmt.Sprintf("/zones/%s/speed_api/pages", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + var r ObservatoryPagesResponse + err = json.Unmarshal(res, &r) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +type GetObservatoryPageTrendParams struct { + URL string `url:"-"` + Region string `url:"region"` + DeviceType string `url:"deviceType"` + Start *time.Time `url:"start"` + End *time.Time `url:"end,omitempty"` + Timezone string `url:"tz"` + Metrics []string `url:"metrics"` +} + +type ObservatoryPageTrendResponse struct { + Response + Result ObservatoryPageTrend `json:"result"` +} + +// GetObservatoryPageTrend returns a the trend of web vital metrics for a page in a specific region. +// +// API reference: https://api.cloudflare.com/#speed-list-page-trend +func (api *API) GetObservatoryPageTrend(ctx context.Context, rc *ResourceContainer, params GetObservatoryPageTrendParams) (*ObservatoryPageTrend, error) { + if params.URL == "" { + return nil, ErrMissingObservatoryUrl + } + // cannot use buildURI because params.URL contains "/" that should be encoded and buildURI will double encode %2F into %252F + v, _ := query.Values(params) + uri := fmt.Sprintf("/zones/%s/speed_api/pages/%s/trend?%s", rc.Identifier, url.PathEscape(params.URL), v.Encode()) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + var r ObservatoryPageTrendResponse + err = json.Unmarshal(res, &r) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return &r.Result, nil +} + +var listObservatoryPageTestDefaultPageSize = 20 + +type ListObservatoryPageTestParams struct { + URL string `url:"-"` + Region string `url:"region"` + ResultInfo +} + +type ObservatoryPageTestsResponse struct { + Response + Result []ObservatoryPageTest `json:"result"` + ResultInfo `json:"result_info"` +} + +// ListObservatoryPageTests returns a list of tests for a page in a specific region. +// +// API reference: https://api.cloudflare.com/#speed-list-test-history +func (api *API) ListObservatoryPageTests(ctx context.Context, rc *ResourceContainer, params ListObservatoryPageTestParams) ([]ObservatoryPageTest, *ResultInfo, error) { + if params.URL == "" { + return nil, nil, ErrMissingObservatoryUrl + } + autoPaginate := true + if params.PerPage >= 1 || params.Page >= 1 { + autoPaginate = false + } + if params.PerPage < 1 { + params.PerPage = listObservatoryPageTestDefaultPageSize + } + if params.Page < 1 { + params.Page = 1 + } + var tests []ObservatoryPageTest + var lastResultInfo ResultInfo + for { + // cannot use buildURI because params.URL contains "/" that should be encoded and buildURI will double encode %2F into %252F + v, _ := query.Values(params) + uri := fmt.Sprintf("/zones/%s/speed_api/pages/%s/tests?%s", rc.Identifier, url.PathEscape(params.URL), v.Encode()) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, nil, err + } + var r ObservatoryPageTestsResponse + err = json.Unmarshal(res, &r) + if err != nil { + return nil, nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + tests = append(tests, r.Result...) + lastResultInfo = r.ResultInfo.Next() + if params.ResultInfo.Done() || !autoPaginate { + break + } + } + return tests, &lastResultInfo, nil +} + +type CreateObservatoryPageTestParams struct { + URL string + Settings CreateObservatoryPageTestSettings +} +type CreateObservatoryPageTestSettings struct { + Region string `json:"region"` +} + +type ObservatoryPageTestResponse struct { + Response + Result ObservatoryPageTest `json:"result"` +} + +// CreateObservatoryPageTest starts a test for a page in a specific region. +// +// API reference: https://api.cloudflare.com/#speed-create-test +func (api *API) CreateObservatoryPageTest(ctx context.Context, rc *ResourceContainer, params CreateObservatoryPageTestParams) (*ObservatoryPageTest, error) { + if params.URL == "" { + return nil, ErrMissingObservatoryUrl + } + uri := fmt.Sprintf("/zones/%s/speed_api/pages/%s/tests", rc.Identifier, url.PathEscape(params.URL)) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params.Settings) + if err != nil { + return nil, err + } + var r ObservatoryPageTestResponse + err = json.Unmarshal(res, &r) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return &r.Result, nil +} + +type DeleteObservatoryPageTestsParams struct { + URL string `url:"-"` + Region string `url:"region"` +} + +type ObservatoryCountResponse struct { + Response + Result struct { + Count int `json:"count"` + } `json:"result"` +} + +// DeleteObservatoryPageTests deletes all tests for a page in a specific region. +// +// API reference: https://api.cloudflare.com/#speed-delete-tests +func (api *API) DeleteObservatoryPageTests(ctx context.Context, rc *ResourceContainer, params DeleteObservatoryPageTestsParams) (*int, error) { + if params.URL == "" { + return nil, ErrMissingObservatoryUrl + } + // cannot use buildURI because params.URL contains "/" that should be encoded and buildURI will double encode %2F into %252F + v, _ := query.Values(params) + uri := fmt.Sprintf("/zones/%s/speed_api/pages/%s/tests?%s", rc.Identifier, url.PathEscape(params.URL), v.Encode()) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return nil, err + } + var r ObservatoryCountResponse + err = json.Unmarshal(res, &r) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return &r.Result.Count, nil +} + +type GetObservatoryPageTestParams struct { + URL string + TestID string +} + +// GetObservatoryPageTest returns a specific test for a page. +// +// API reference: https://api.cloudflare.com/#speed-get-test +func (api *API) GetObservatoryPageTest(ctx context.Context, rc *ResourceContainer, params GetObservatoryPageTestParams) (*ObservatoryPageTest, error) { + if params.URL == "" { + return nil, ErrMissingObservatoryUrl + } + if params.TestID == "" { + return nil, ErrMissingObservatoryTestID + } + uri := fmt.Sprintf("/zones/%s/speed_api/pages/%s/tests/%s", rc.Identifier, url.PathEscape(params.URL), params.TestID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + var r ObservatoryPageTestResponse + err = json.Unmarshal(res, &r) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return &r.Result, nil +} + +type CreateObservatoryScheduledPageTestParams struct { + URL string `url:"-" json:"-"` + Region string `url:"region" json:"-"` + Frequency string `url:"frequency" json:"-"` +} + +type ObservatoryScheduledPageTest struct { + Schedule ObservatorySchedule `json:"schedule"` + Test ObservatoryPageTest `json:"test"` +} + +type CreateObservatoryScheduledPageTestResponse struct { + Response + Result ObservatoryScheduledPageTest `json:"result"` +} + +// CreateObservatoryScheduledPageTest creates a scheduled test for a page in a specific region. +// +// API reference: https://api.cloudflare.com/#speed-create-scheduled-test +func (api *API) CreateObservatoryScheduledPageTest(ctx context.Context, rc *ResourceContainer, params CreateObservatoryScheduledPageTestParams) (*ObservatoryScheduledPageTest, error) { + if params.URL == "" { + return nil, ErrMissingObservatoryUrl + } + // cannot use buildURI because params.URL contains "/" that should be encoded and buildURI will double encode %2F into %252F + v, _ := query.Values(params) + uri := fmt.Sprintf("/zones/%s/speed_api/schedule/%s?%s", rc.Identifier, url.PathEscape(params.URL), v.Encode()) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, nil) + if err != nil { + return nil, err + } + var r CreateObservatoryScheduledPageTestResponse + err = json.Unmarshal(res, &r) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return &r.Result, nil +} + +type GetObservatoryScheduledPageTestParams struct { + URL string `url:"-"` + Region string `url:"region"` +} + +type ObservatoryScheduleResponse struct { + Response + Result ObservatorySchedule `json:"result"` +} + +// GetObservatoryScheduledPageTest returns the test schedule for a page in a specific region. +// +// API reference: https://api.cloudflare.com/#speed-get-scheduled-test +func (api *API) GetObservatoryScheduledPageTest(ctx context.Context, rc *ResourceContainer, params GetObservatoryScheduledPageTestParams) (*ObservatorySchedule, error) { + if params.URL == "" { + return nil, ErrMissingObservatoryUrl + } + // cannot use buildURI because params.URL contains "/" that should be encoded and buildURI will double encode %2F into %252F + v, _ := query.Values(params) + uri := fmt.Sprintf("/zones/%s/speed_api/schedule/%s?%s", rc.Identifier, url.PathEscape(params.URL), v.Encode()) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + var r ObservatoryScheduleResponse + err = json.Unmarshal(res, &r) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return &r.Result, nil +} + +type DeleteObservatoryScheduledPageTestParams struct { + URL string `url:"-"` + Region string `url:"region"` +} + +// DeleteObservatoryScheduledPageTest deletes the test schedule for a page in a specific region. +// +// API reference: https://api.cloudflare.com/#speed-delete-scheduled-test +func (api *API) DeleteObservatoryScheduledPageTest(ctx context.Context, rc *ResourceContainer, params DeleteObservatoryScheduledPageTestParams) (*int, error) { + if params.URL == "" { + return nil, ErrMissingObservatoryUrl + } + // cannot use buildURI because params.URL contains "/" that should be encoded and buildURI will double encode %2F into %252F + v, _ := query.Values(params) + uri := fmt.Sprintf("/zones/%s/speed_api/schedule/%s?%s", rc.Identifier, url.PathEscape(params.URL), v.Encode()) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return nil, err + } + var r ObservatoryCountResponse + err = json.Unmarshal(res, &r) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return &r.Result.Count, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/options.go b/vendor/github.com/cloudflare/cloudflare-go/options.go new file mode 100644 index 0000000..0638a2d --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/options.go @@ -0,0 +1,106 @@ +package cloudflare + +import ( + "net/http" + "time" + + "golang.org/x/time/rate" +) + +// Option is a functional option for configuring the API client. +type Option func(*API) error + +// HTTPClient accepts a custom *http.Client for making API calls. +func HTTPClient(client *http.Client) Option { + return func(api *API) error { + api.httpClient = client + return nil + } +} + +// Headers allows you to set custom HTTP headers when making API calls (e.g. for +// satisfying HTTP proxies, or for debugging). +func Headers(headers http.Header) Option { + return func(api *API) error { + api.headers = headers + return nil + } +} + +// UsingRateLimit applies a non-default rate limit to client API requests +// If not specified the default of 4rps will be applied. +func UsingRateLimit(rps float64) Option { + return func(api *API) error { + // because ratelimiter doesnt do any windowing + // setting burst makes it difficult to enforce a fixed rate + // so setting it equal to 1 this effectively disables bursting + // this doesn't check for sensible values, ultimately the api will enforce that the value is ok + api.rateLimiter = rate.NewLimiter(rate.Limit(rps), 1) + return nil + } +} + +// UsingRetryPolicy applies a non-default number of retries and min/max retry delays +// This will be used when the client exponentially backs off after errored requests. +func UsingRetryPolicy(maxRetries int, minRetryDelaySecs int, maxRetryDelaySecs int) Option { + // seconds is very granular for a minimum delay - but this is only in case of failure + return func(api *API) error { + api.retryPolicy = RetryPolicy{ + MaxRetries: maxRetries, + MinRetryDelay: time.Duration(minRetryDelaySecs) * time.Second, + MaxRetryDelay: time.Duration(maxRetryDelaySecs) * time.Second, + } + return nil + } +} + +// UsingLogger can be set if you want to get log output from this API instance +// By default no log output is emitted. +func UsingLogger(logger Logger) Option { + return func(api *API) error { + api.logger = logger + return nil + } +} + +// UserAgent can be set if you want to send a software name and version for HTTP access logs. +// It is recommended to set it in order to help future Customer Support diagnostics +// and prevent collateral damage by sharing generic User-Agent string with abusive users. +// E.g. "my-software/1.2.3". By default generic Go User-Agent is used. +func UserAgent(userAgent string) Option { + return func(api *API) error { + api.UserAgent = userAgent + return nil + } +} + +// BaseURL allows you to override the default HTTP base URL used for API calls. +func BaseURL(baseURL string) Option { + return func(api *API) error { + api.BaseURL = baseURL + return nil + } +} + +func Debug(debug bool) Option { + return func(api *API) error { + api.Debug = debug + return nil + } +} + +// parseOptions parses the supplied options functions and returns a configured +// *API instance. +func (api *API) parseOptions(opts ...Option) error { + // Range over each options function and apply it to our API type to + // configure it. Options functions are applied in order, with any + // conflicting options overriding earlier calls. + for _, option := range opts { + err := option(api) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/origin_ca.go b/vendor/github.com/cloudflare/cloudflare-go/origin_ca.go new file mode 100644 index 0000000..ff8589a --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/origin_ca.go @@ -0,0 +1,233 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// OriginCACertificate represents a Cloudflare-issued certificate. +// +// API reference: https://api.cloudflare.com/#cloudflare-ca +type OriginCACertificate struct { + ID string `json:"id"` + Certificate string `json:"certificate"` + Hostnames []string `json:"hostnames"` + ExpiresOn time.Time `json:"expires_on"` + RequestType string `json:"request_type"` + RequestValidity int `json:"requested_validity"` + RevokedAt time.Time `json:"revoked_at,omitempty"` + CSR string `json:"csr"` +} + +type CreateOriginCertificateParams struct { + ID string `json:"id"` + Certificate string `json:"certificate"` + Hostnames []string `json:"hostnames"` + ExpiresOn time.Time `json:"expires_on"` + RequestType string `json:"request_type"` + RequestValidity int `json:"requested_validity"` + RevokedAt time.Time `json:"revoked_at,omitempty"` + CSR string `json:"csr"` +} + +// UnmarshalJSON handles custom parsing from an API response to an OriginCACertificate +// http://choly.ca/post/go-json-marshalling/ +func (c *OriginCACertificate) UnmarshalJSON(data []byte) error { + type Alias OriginCACertificate + + aux := &struct { + ExpiresOn string `json:"expires_on"` + *Alias + }{ + Alias: (*Alias)(c), + } + + var err error + + if err = json.Unmarshal(data, &aux); err != nil { + return err + } + + // This format comes from time.Time.String() source + c.ExpiresOn, err = time.Parse("2006-01-02 15:04:05.999999999 -0700 MST", aux.ExpiresOn) + + if err != nil { + c.ExpiresOn, err = time.Parse(time.RFC3339, aux.ExpiresOn) + } + + if err != nil { + return err + } + + return nil +} + +// ListOriginCertificatesParams represents the parameters used to list +// Cloudflare-issued certificates. +type ListOriginCertificatesParams struct { + ZoneID string `url:"zone_id,omitempty"` +} + +// OriginCACertificateID represents the ID of the revoked certificate from the Revoke Certificate endpoint. +type OriginCACertificateID struct { + ID string `json:"id"` +} + +// originCACertificateResponse represents the response from the Create Certificate and the Certificate Details endpoints. +type originCACertificateResponse struct { + Response + Result OriginCACertificate `json:"result"` +} + +// originCACertificateResponseList represents the response from the List Certificates endpoint. +type originCACertificateResponseList struct { + Response + Result []OriginCACertificate `json:"result"` + ResultInfo ResultInfo `json:"result_info"` +} + +// originCACertificateResponseRevoke represents the response from the Revoke Certificate endpoint. +type originCACertificateResponseRevoke struct { + Response + Result OriginCACertificateID `json:"result"` +} + +// CreateOriginCACertificate creates a Cloudflare-signed certificate. +// +// API reference: https://api.cloudflare.com/#cloudflare-ca-create-certificate +func (api *API) CreateOriginCACertificate(ctx context.Context, params CreateOriginCertificateParams) (*OriginCACertificate, error) { + res, err := api.makeRequestContext(ctx, http.MethodPost, "/certificates", params) + if err != nil { + return &OriginCACertificate{}, err + } + + var originResponse *originCACertificateResponse + + err = json.Unmarshal(res, &originResponse) + + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + if !originResponse.Success { + return nil, errors.New(errRequestNotSuccessful) + } + + return &originResponse.Result, nil +} + +// ListOriginCACertificates lists all Cloudflare-issued certificates. +// +// API reference: https://api.cloudflare.com/#cloudflare-ca-list-certificates +func (api *API) ListOriginCACertificates(ctx context.Context, params ListOriginCertificatesParams) ([]OriginCACertificate, error) { + uri := buildURI("/certificates", params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + + if err != nil { + return nil, err + } + + var originResponse *originCACertificateResponseList + + err = json.Unmarshal(res, &originResponse) + + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + if !originResponse.Success { + return nil, errors.New(errRequestNotSuccessful) + } + + return originResponse.Result, nil +} + +// GetOriginCACertificate returns the details for a Cloudflare-issued +// certificate. +// +// API reference: https://api.cloudflare.com/#cloudflare-ca-certificate-details +func (api *API) GetOriginCACertificate(ctx context.Context, certificateID string) (*OriginCACertificate, error) { + uri := fmt.Sprintf("/certificates/%s", certificateID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + + if err != nil { + return nil, err + } + + var originResponse *originCACertificateResponse + + err = json.Unmarshal(res, &originResponse) + + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + if !originResponse.Success { + return nil, errors.New(errRequestNotSuccessful) + } + + return &originResponse.Result, nil +} + +// RevokeOriginCACertificate revokes a created certificate for a zone. +// +// API reference: https://api.cloudflare.com/#cloudflare-ca-revoke-certificate +func (api *API) RevokeOriginCACertificate(ctx context.Context, certificateID string) (*OriginCACertificateID, error) { + uri := fmt.Sprintf("/certificates/%s", certificateID) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + + if err != nil { + return nil, err + } + + var originResponse *originCACertificateResponseRevoke + + err = json.Unmarshal(res, &originResponse) + + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + if !originResponse.Success { + return nil, errors.New(errRequestNotSuccessful) + } + + return &originResponse.Result, nil +} + +// Gets the Cloudflare Origin CA Root Certificate for a given algorithm in PEM format. +// Algorithm must be one of ['ecc', 'rsa']. +func GetOriginCARootCertificate(algorithm string) ([]byte, error) { + var url string + switch algorithm { + case "ecc": + url = originCARootCertEccURL + case "rsa": + url = originCARootCertRsaURL + default: + return nil, fmt.Errorf("invalid algorithm: must be one of ['ecc', 'rsa']") + } + + resp, err := http.Get(url) //nolint:gosec + if err != nil { + return nil, fmt.Errorf("HTTP request failed: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, errors.New(errRequestNotSuccessful) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("Response body could not be read: %w", err) + } + + return body, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/page_rules.go b/vendor/github.com/cloudflare/cloudflare-go/page_rules.go new file mode 100644 index 0000000..c4fa391 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/page_rules.go @@ -0,0 +1,240 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// PageRuleTarget is the target to evaluate on a request. +// +// Currently Target must always be "url" and Operator must be "matches". Value +// is the URL pattern to match against. +type PageRuleTarget struct { + Target string `json:"target"` + Constraint struct { + Operator string `json:"operator"` + Value string `json:"value"` + } `json:"constraint"` +} + +/* +PageRuleAction is the action to take when the target is matched. + +Valid IDs are: + + always_online + always_use_https + automatic_https_rewrites + browser_cache_ttl + browser_check + bypass_cache_on_cookie + cache_by_device_type + cache_deception_armor + cache_level + cache_key_fields + cache_on_cookie + disable_apps + disable_performance + disable_railgun + disable_security + edge_cache_ttl + email_obfuscation + explicit_cache_control + forwarding_url + host_header_override + ip_geolocation + minify + mirage + opportunistic_encryption + origin_error_page_pass_thru + polish + resolve_override + respect_strong_etag + response_buffering + rocket_loader + security_level + server_side_exclude + sort_query_string_for_cache + ssl + true_client_ip_header + waf +*/ +type PageRuleAction struct { + ID string `json:"id"` + Value interface{} `json:"value"` +} + +// PageRuleActions maps API action IDs to human-readable strings. +var PageRuleActions = map[string]string{ + "always_online": "Always Online", // Value of type string + "always_use_https": "Always Use HTTPS", // Value of type interface{} + "automatic_https_rewrites": "Automatic HTTPS Rewrites", // Value of type string + "browser_cache_ttl": "Browser Cache TTL", // Value of type int + "browser_check": "Browser Integrity Check", // Value of type string + "bypass_cache_on_cookie": "Bypass Cache on Cookie", // Value of type string + "cache_by_device_type": "Cache By Device Type", // Value of type string + "cache_deception_armor": "Cache Deception Armor", // Value of type string + "cache_level": "Cache Level", // Value of type string + "cache_key_fields": "Custom Cache Key", // Value of type map[string]interface + "cache_on_cookie": "Cache On Cookie", // Value of type string + "disable_apps": "Disable Apps", // Value of type interface{} + "disable_performance": "Disable Performance", // Value of type interface{} + "disable_railgun": "Disable Railgun", // Value of type string + "disable_security": "Disable Security", // Value of type interface{} + "edge_cache_ttl": "Edge Cache TTL", // Value of type int + "email_obfuscation": "Email Obfuscation", // Value of type string + "explicit_cache_control": "Origin Cache Control", // Value of type string + "forwarding_url": "Forwarding URL", // Value of type map[string]interface + "host_header_override": "Host Header Override", // Value of type string + "ip_geolocation": "IP Geolocation Header", // Value of type string + "minify": "Minify", // Value of type map[string]interface + "mirage": "Mirage", // Value of type string + "opportunistic_encryption": "Opportunistic Encryption", // Value of type string + "origin_error_page_pass_thru": "Origin Error Page Pass-thru", // Value of type string + "polish": "Polish", // Value of type string + "resolve_override": "Resolve Override", // Value of type string + "respect_strong_etag": "Respect Strong ETags", // Value of type string + "response_buffering": "Response Buffering", // Value of type string + "rocket_loader": "Rocker Loader", // Value of type string + "security_level": "Security Level", // Value of type string + "server_side_exclude": "Server Side Excludes", // Value of type string + "sort_query_string_for_cache": "Query String Sort", // Value of type string + "ssl": "SSL", // Value of type string + "true_client_ip_header": "True Client IP Header", // Value of type string + "waf": "Web Application Firewall", // Value of type string +} + +// PageRule describes a Page Rule. +type PageRule struct { + ID string `json:"id,omitempty"` + Targets []PageRuleTarget `json:"targets"` + Actions []PageRuleAction `json:"actions"` + Priority int `json:"priority"` + Status string `json:"status"` + ModifiedOn time.Time `json:"modified_on,omitempty"` + CreatedOn time.Time `json:"created_on,omitempty"` +} + +// PageRuleDetailResponse is the API response, containing a single PageRule. +type PageRuleDetailResponse struct { + Success bool `json:"success"` + Errors []string `json:"errors"` + Messages []string `json:"messages"` + Result PageRule `json:"result"` +} + +// PageRulesResponse is the API response, containing an array of PageRules. +type PageRulesResponse struct { + Success bool `json:"success"` + Errors []string `json:"errors"` + Messages []string `json:"messages"` + Result []PageRule `json:"result"` +} + +// CreatePageRule creates a new Page Rule for a zone. +// +// API reference: https://api.cloudflare.com/#page-rules-for-a-zone-create-a-page-rule +func (api *API) CreatePageRule(ctx context.Context, zoneID string, rule PageRule) (*PageRule, error) { + uri := fmt.Sprintf("/zones/%s/pagerules", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, rule) + if err != nil { + return nil, err + } + var r PageRuleDetailResponse + err = json.Unmarshal(res, &r) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return &r.Result, nil +} + +// ListPageRules returns all Page Rules for a zone. +// +// API reference: https://api.cloudflare.com/#page-rules-for-a-zone-list-page-rules +func (api *API) ListPageRules(ctx context.Context, zoneID string) ([]PageRule, error) { + uri := fmt.Sprintf("/zones/%s/pagerules", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []PageRule{}, err + } + var r PageRulesResponse + err = json.Unmarshal(res, &r) + if err != nil { + return []PageRule{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// PageRule fetches detail about one Page Rule for a zone. +// +// API reference: https://api.cloudflare.com/#page-rules-for-a-zone-page-rule-details +func (api *API) PageRule(ctx context.Context, zoneID, ruleID string) (PageRule, error) { + uri := fmt.Sprintf("/zones/%s/pagerules/%s", zoneID, ruleID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return PageRule{}, err + } + var r PageRuleDetailResponse + err = json.Unmarshal(res, &r) + if err != nil { + return PageRule{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// ChangePageRule lets you change individual settings for a Page Rule. This is +// in contrast to UpdatePageRule which replaces the entire Page Rule. +// +// API reference: https://api.cloudflare.com/#page-rules-for-a-zone-change-a-page-rule +func (api *API) ChangePageRule(ctx context.Context, zoneID, ruleID string, rule PageRule) error { + uri := fmt.Sprintf("/zones/%s/pagerules/%s", zoneID, ruleID) + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, rule) + if err != nil { + return err + } + var r PageRuleDetailResponse + err = json.Unmarshal(res, &r) + if err != nil { + return fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return nil +} + +// UpdatePageRule lets you replace a Page Rule. This is in contrast to +// ChangePageRule which lets you change individual settings. +// +// API reference: https://api.cloudflare.com/#page-rules-for-a-zone-update-a-page-rule +func (api *API) UpdatePageRule(ctx context.Context, zoneID, ruleID string, rule PageRule) error { + uri := fmt.Sprintf("/zones/%s/pagerules/%s", zoneID, ruleID) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, rule) + if err != nil { + return err + } + var r PageRuleDetailResponse + err = json.Unmarshal(res, &r) + if err != nil { + return fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return nil +} + +// DeletePageRule deletes a Page Rule for a zone. +// +// API reference: https://api.cloudflare.com/#page-rules-for-a-zone-delete-a-page-rule +func (api *API) DeletePageRule(ctx context.Context, zoneID, ruleID string) error { + uri := fmt.Sprintf("/zones/%s/pagerules/%s", zoneID, ruleID) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + var r PageRuleDetailResponse + err = json.Unmarshal(res, &r) + if err != nil { + return fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/page_shield.go b/vendor/github.com/cloudflare/cloudflare-go/page_shield.go new file mode 100644 index 0000000..d05c949 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/page_shield.go @@ -0,0 +1,76 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +// PageShield represents the page shield object minus any timestamps. +type PageShield struct { + Enabled *bool `json:"enabled,omitempty"` + UseCloudflareReportingEndpoint *bool `json:"use_cloudflare_reporting_endpoint,omitempty"` + UseConnectionURLPath *bool `json:"use_connection_url_path,omitempty"` +} + +type UpdatePageShieldSettingsParams struct { + Enabled *bool `json:"enabled,omitempty"` + UseCloudflareReportingEndpoint *bool `json:"use_cloudflare_reporting_endpoint,omitempty"` + UseConnectionURLPath *bool `json:"use_connection_url_path,omitempty"` +} + +// PageShieldSettings represents the page shield settings for a zone. +type PageShieldSettings struct { + PageShield + UpdatedAt string `json:"updated_at"` +} + +// PageShieldSettingsResponse represents the response from the page shield settings endpoint. +type PageShieldSettingsResponse struct { + PageShield PageShieldSettings `json:"result"` + Response +} + +type GetPageShieldSettingsParams struct{} + +// GetPageShieldSettings returns the page shield settings for a zone. +// +// API documentation: https://developers.cloudflare.com/api/operations/page-shield-get-page-shield-settings +func (api *API) GetPageShieldSettings(ctx context.Context, rc *ResourceContainer, params GetPageShieldSettingsParams) (*PageShieldSettingsResponse, error) { + uri := fmt.Sprintf("/zones/%s/page_shield", rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + + var psResponse PageShieldSettingsResponse + err = json.Unmarshal(res, &psResponse) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return &psResponse, nil +} + +// UpdatePageShieldSettings updates the page shield settings for a zone. +// +// API documentation: https://developers.cloudflare.com/api/operations/page-shield-update-page-shield-settings +func (api *API) UpdatePageShieldSettings(ctx context.Context, rc *ResourceContainer, params UpdatePageShieldSettingsParams) (*PageShieldSettingsResponse, error) { + uri := fmt.Sprintf("/zones/%s/page_shield", rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return nil, err + } + + var psResponse PageShieldSettingsResponse + err = json.Unmarshal(res, &psResponse) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return &psResponse, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/page_shield_connections.go b/vendor/github.com/cloudflare/cloudflare-go/page_shield_connections.go new file mode 100644 index 0000000..9046512 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/page_shield_connections.go @@ -0,0 +1,88 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +// ListPageShieldConnectionsParams represents parameters for a page shield connection request. +type ListPageShieldConnectionsParams struct { + Direction string `url:"direction"` + ExcludeCdnCgi *bool `url:"exclude_cdn_cgi,omitempty"` + ExcludeUrls string `url:"exclude_urls"` + Export string `url:"export"` + Hosts string `url:"hosts"` + OrderBy string `url:"order_by"` + Page string `url:"page"` + PageURL string `url:"page_url"` + PerPage int `url:"per_page"` + PrioritizeMalicious *bool `url:"prioritize_malicious,omitempty"` + Status string `url:"status"` + URLs string `url:"urls"` +} + +// PageShieldConnection represents a page shield connection. +type PageShieldConnection struct { + AddedAt string `json:"added_at"` + DomainReportedMalicious *bool `json:"domain_reported_malicious,omitempty"` + FirstPageURL string `json:"first_page_url"` + FirstSeenAt string `json:"first_seen_at"` + Host string `json:"host"` + ID string `json:"id"` + LastSeenAt string `json:"last_seen_at"` + PageURLs []string `json:"page_urls"` + URL string `json:"url"` + URLContainsCdnCgiPath *bool `json:"url_contains_cdn_cgi_path,omitempty"` +} + +// ListPageShieldConnectionsResponse represents the response from the list page shield connections endpoint. +type ListPageShieldConnectionsResponse struct { + Result []PageShieldConnection `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +// ListPageShieldConnections lists all page shield connections for a zone. +// +// API documentation: https://developers.cloudflare.com/api/operations/page-shield-list-page-shield-connections +func (api *API) ListPageShieldConnections(ctx context.Context, rc *ResourceContainer, params ListPageShieldConnectionsParams) ([]PageShieldConnection, ResultInfo, error) { + path := fmt.Sprintf("/zones/%s/page_shield/connections", rc.Identifier) + + uri := buildURI(path, params) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, ResultInfo{}, err + } + + var psResponse ListPageShieldConnectionsResponse + err = json.Unmarshal(res, &psResponse) + if err != nil { + return nil, ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return psResponse.Result, psResponse.ResultInfo, nil +} + +// GetPageShieldConnection gets a page shield connection for a zone. +// +// API documentation: https://developers.cloudflare.com/api/operations/page-shield-get-a-page-shield-connection +func (api *API) GetPageShieldConnection(ctx context.Context, rc *ResourceContainer, connectionID string) (*PageShieldConnection, error) { + path := fmt.Sprintf("/zones/%s/page_shield/connections/%s", rc.Identifier, connectionID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, err + } + + var psResponse PageShieldConnection + err = json.Unmarshal(res, &psResponse) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return &psResponse, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/page_shield_policies.go b/vendor/github.com/cloudflare/cloudflare-go/page_shield_policies.go new file mode 100644 index 0000000..29ce64a --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/page_shield_policies.go @@ -0,0 +1,140 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +// PageShieldPolicy represents a page shield policy. +type PageShieldPolicy struct { + Action string `json:"action"` + Description string `json:"description"` + Enabled *bool `json:"enabled,omitempty"` + Expression string `json:"expression"` + ID string `json:"id"` + Value string `json:"value"` +} + +type CreatePageShieldPolicyParams struct { + Action string `json:"action"` + Description string `json:"description"` + Enabled *bool `json:"enabled,omitempty"` + Expression string `json:"expression"` + ID string `json:"id"` + Value string `json:"value"` +} + +type UpdatePageShieldPolicyParams struct { + Action string `json:"action"` + Description string `json:"description"` + Enabled *bool `json:"enabled,omitempty"` + Expression string `json:"expression"` + ID string `json:"id"` + Value string `json:"value"` +} + +// ListPageShieldPoliciesResponse represents the response from the list page shield policies endpoint. +type ListPageShieldPoliciesResponse struct { + Result []PageShieldPolicy `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +type ListPageShieldPoliciesParams struct{} + +// ListPageShieldPolicies lists all page shield policies for a zone. +// +// API documentation: https://developers.cloudflare.com/api/operations/page-shield-list-page-shield-policies +func (api *API) ListPageShieldPolicies(ctx context.Context, rc *ResourceContainer, params ListPageShieldPoliciesParams) ([]PageShieldPolicy, ResultInfo, error) { + path := fmt.Sprintf("/zones/%s/page_shield/policies", rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, ResultInfo{}, err + } + + var psResponse ListPageShieldPoliciesResponse + err = json.Unmarshal(res, &psResponse) + if err != nil { + return nil, ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return psResponse.Result, psResponse.ResultInfo, nil +} + +// CreatePageShieldPolicy creates a page shield policy for a zone. +// +// API documentation: https://developers.cloudflare.com/api/operations/page-shield-create-page-shield-policy +func (api *API) CreatePageShieldPolicy(ctx context.Context, rc *ResourceContainer, params CreatePageShieldPolicyParams) (*PageShieldPolicy, error) { + path := fmt.Sprintf("/zones/%s/page_shield/policies", rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodPost, path, params) + if err != nil { + return nil, err + } + + var psResponse PageShieldPolicy + err = json.Unmarshal(res, &psResponse) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return &psResponse, nil +} + +// DeletePageShieldPolicy deletes a page shield policy for a zone. +// +// API documentation: https://developers.cloudflare.com/api/operations/page-shield-delete-page-shield-policy +func (api *API) DeletePageShieldPolicy(ctx context.Context, rc *ResourceContainer, policyID string) error { + path := fmt.Sprintf("/zones/%s/page_shield/policies/%s", rc.Identifier, policyID) + + _, err := api.makeRequestContext(ctx, http.MethodDelete, path, nil) + if err != nil { + return err + } + + return nil +} + +// GetPageShieldPolicy gets a page shield policy for a zone. +// +// API documentation: https://developers.cloudflare.com/api/operations/page-shield-get-page-shield-policy +func (api *API) GetPageShieldPolicy(ctx context.Context, rc *ResourceContainer, policyID string) (*PageShieldPolicy, error) { + path := fmt.Sprintf("/zones/%s/page_shield/policies/%s", rc.Identifier, policyID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, err + } + + var psResponse PageShieldPolicy + err = json.Unmarshal(res, &psResponse) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return &psResponse, nil +} + +// UpdatePageShieldPolicy updates a page shield policy for a zone. +// +// API documentation: https://developers.cloudflare.com/api/operations/page-shield-update-page-shield-policy +func (api *API) UpdatePageShieldPolicy(ctx context.Context, rc *ResourceContainer, params UpdatePageShieldPolicyParams) (*PageShieldPolicy, error) { + path := fmt.Sprintf("/zones/%s/page_shield/policies/%s", rc.Identifier, params.ID) + + res, err := api.makeRequestContext(ctx, http.MethodPut, path, params) + if err != nil { + return nil, err + } + + var psResponse PageShieldPolicy + err = json.Unmarshal(res, &psResponse) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return &psResponse, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/page_shield_scripts.go b/vendor/github.com/cloudflare/cloudflare-go/page_shield_scripts.go new file mode 100644 index 0000000..09559b5 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/page_shield_scripts.go @@ -0,0 +1,107 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +// PageShieldScript represents a Page Shield script. +type PageShieldScript struct { + AddedAt string `json:"added_at"` + DomainReportedMalicious *bool `json:"domain_reported_malicious,omitempty"` + FetchedAt string `json:"fetched_at"` + FirstPageURL string `json:"first_page_url"` + FirstSeenAt string `json:"first_seen_at"` + Hash string `json:"hash"` + Host string `json:"host"` + ID string `json:"id"` + JSIntegrityScore int `json:"js_integrity_score"` + LastSeenAt string `json:"last_seen_at"` + PageURLs []string `json:"page_urls"` + URL string `json:"url"` + URLContainsCdnCgiPath *bool `json:"url_contains_cdn_cgi_path,omitempty"` +} + +// ListPageShieldScriptsParams represents a PageShield Script request parameters. +// +// API reference: https://developers.cloudflare.com/api/operations/page-shield-list-page-shield-scripts#Query-Parameters +type ListPageShieldScriptsParams struct { + Direction string `url:"direction"` + ExcludeCdnCgi *bool `url:"exclude_cdn_cgi,omitempty"` + ExcludeDuplicates *bool `url:"exclude_duplicates,omitempty"` + ExcludeUrls string `url:"exclude_urls"` + Export string `url:"export"` + Hosts string `url:"hosts"` + OrderBy string `url:"order_by"` + Page string `url:"page"` + PageURL string `url:"page_url"` + PerPage int `url:"per_page"` + PrioritizeMalicious *bool `url:"prioritize_malicious,omitempty"` + Status string `url:"status"` + URLs string `url:"urls"` +} + +// PageShieldScriptsResponse represents the response from the PageShield Script API. +type PageShieldScriptsResponse struct { + Results []PageShieldScript `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +// PageShieldScriptResponse represents the response from the PageShield Script API. +type PageShieldScriptResponse struct { + Result PageShieldScript `json:"result"` + Versions []PageShieldScriptVersion `json:"versions"` +} + +// PageShieldScriptVersion represents a Page Shield script version. +type PageShieldScriptVersion struct { + FetchedAt string `json:"fetched_at"` + Hash string `json:"hash"` + JSIntegrityScore int `json:"js_integrity_score"` +} + +// ListPageShieldScripts returns a list of PageShield Scripts. +// +// API reference: https://developers.cloudflare.com/api/operations/page-shield-list-page-shield-scripts +func (api *API) ListPageShieldScripts(ctx context.Context, rc *ResourceContainer, params ListPageShieldScriptsParams) ([]PageShieldScript, ResultInfo, error) { + path := fmt.Sprintf("/zones/%s/page_shield/scripts", rc.Identifier) + + uri := buildURI(path, params) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, ResultInfo{}, err + } + + var psResponse PageShieldScriptsResponse + err = json.Unmarshal(res, &psResponse) + if err != nil { + return nil, ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return psResponse.Results, psResponse.ResultInfo, nil +} + +// GetPageShieldScript returns a PageShield Script. +// +// API reference: https://developers.cloudflare.com/api/operations/page-shield-get-a-page-shield-script +func (api *API) GetPageShieldScript(ctx context.Context, rc *ResourceContainer, scriptID string) (*PageShieldScript, []PageShieldScriptVersion, error) { + path := fmt.Sprintf("/zones/%s/page_shield/scripts/%s", rc.Identifier, scriptID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + var psResponse PageShieldScriptResponse + err = json.Unmarshal(res, &psResponse) + if err != nil { + return nil, nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return &psResponse.Result, psResponse.Versions, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/pages_deployment.go b/vendor/github.com/cloudflare/cloudflare-go/pages_deployment.go new file mode 100644 index 0000000..b81a2c1 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/pages_deployment.go @@ -0,0 +1,343 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// SizeOptions can be passed to a list request to configure size and cursor location. +// These values will be defaulted if omitted. +// +// This should be swapped to ResultInfoCursors once the types are corrected. +type SizeOptions struct { + Size int `json:"size,omitempty" url:"size,omitempty"` + Before *int `json:"before,omitempty" url:"before,omitempty"` + After *int `json:"after,omitempty" url:"after,omitempty"` +} + +// PagesDeploymentStageLogs represents the logs for a Pages deployment stage. +type PagesDeploymentStageLogs struct { + Name string `json:"name"` + StartedOn *time.Time `json:"started_on"` + EndedOn *time.Time `json:"ended_on"` + Status string `json:"status"` + Start int `json:"start"` + End int `json:"end"` + Total int `json:"total"` + Data []PagesDeploymentStageLogEntry `json:"data"` +} + +// PagesDeploymentStageLogEntry represents a single log entry in a Pages deployment stage. +type PagesDeploymentStageLogEntry struct { + ID int `json:"id"` + Timestamp *time.Time `json:"timestamp"` + Message string `json:"message"` +} + +// PagesDeploymentLogs represents the logs for a Pages deployment. +type PagesDeploymentLogs struct { + Total int `json:"total"` + IncludesContainerLogs bool `json:"includes_container_logs"` + Data []PagesDeploymentLogEntry `json:"data"` +} + +// PagesDeploymentLogEntry represents a single log entry in a Pages deployment. +type PagesDeploymentLogEntry struct { + Timestamp *time.Time `json:"ts"` + Line string `json:"line"` +} + +type pagesDeploymentListResponse struct { + Response + Result []PagesProjectDeployment `json:"result"` + ResultInfo `json:"result_info"` +} + +type pagesDeploymentResponse struct { + Response + Result PagesProjectDeployment `json:"result"` +} + +type pagesDeploymentLogsResponse struct { + Response + Result PagesDeploymentLogs `json:"result"` + ResultInfo `json:"result_info"` +} + +type ListPagesDeploymentsParams struct { + ProjectName string `url:"-"` + + ResultInfo +} + +type GetPagesDeploymentInfoParams struct { + ProjectName string + DeploymentID string +} + +type GetPagesDeploymentStageLogsParams struct { + ProjectName string + DeploymentID string + StageName string + + SizeOptions +} + +type GetPagesDeploymentLogsParams struct { + ProjectName string + DeploymentID string + + SizeOptions +} + +type DeletePagesDeploymentParams struct { + ProjectName string `url:"-"` + DeploymentID string `url:"-"` + Force bool `url:"force,omitempty"` +} + +type CreatePagesDeploymentParams struct { + ProjectName string `json:"-"` + Branch string `json:"branch,omitempty"` +} + +type RetryPagesDeploymentParams struct { + ProjectName string + DeploymentID string +} + +type RollbackPagesDeploymentParams struct { + ProjectName string + DeploymentID string +} + +var ( + ErrMissingProjectName = errors.New("required missing project name") + ErrMissingDeploymentID = errors.New("required missing deployment ID") +) + +// ListPagesDeployments returns all deployments for a Pages project. +// +// API reference: https://api.cloudflare.com/#pages-deployment-get-deployments +func (api *API) ListPagesDeployments(ctx context.Context, rc *ResourceContainer, params ListPagesDeploymentsParams) ([]PagesProjectDeployment, *ResultInfo, error) { + if rc.Identifier == "" { + return []PagesProjectDeployment{}, &ResultInfo{}, ErrMissingAccountID + } + + if params.ProjectName == "" { + return []PagesProjectDeployment{}, &ResultInfo{}, ErrMissingProjectName + } + + autoPaginate := true + if params.PerPage >= 1 || params.Page >= 1 { + autoPaginate = false + } + + if params.PerPage < 1 { + params.PerPage = 25 + } + + if params.Page < 1 { + params.Page = 1 + } + + var deployments []PagesProjectDeployment + var r pagesDeploymentListResponse + + for { + uri := buildURI(fmt.Sprintf("/accounts/%s/pages/projects/%s/deployments", rc.Identifier, params.ProjectName), params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []PagesProjectDeployment{}, &ResultInfo{}, err + } + err = json.Unmarshal(res, &r) + if err != nil { + return []PagesProjectDeployment{}, &ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + deployments = append(deployments, r.Result...) + params.ResultInfo = r.ResultInfo.Next() + if params.Done() || !autoPaginate { + break + } + } + return deployments, &r.ResultInfo, nil +} + +// GetPagesDeploymentInfo returns a deployment for a Pages project. +// +// API reference: https://api.cloudflare.com/#pages-deployment-get-deployment-info +func (api *API) GetPagesDeploymentInfo(ctx context.Context, rc *ResourceContainer, projectName, deploymentID string) (PagesProjectDeployment, error) { + if rc.Identifier == "" { + return PagesProjectDeployment{}, ErrMissingAccountID + } + + if projectName == "" { + return PagesProjectDeployment{}, ErrMissingProjectName + } + + if deploymentID == "" { + return PagesProjectDeployment{}, ErrMissingDeploymentID + } + + uri := fmt.Sprintf("/accounts/%s/pages/projects/%s/deployments/%s", rc.Identifier, projectName, deploymentID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return PagesProjectDeployment{}, err + } + var r pagesDeploymentResponse + err = json.Unmarshal(res, &r) + if err != nil { + return PagesProjectDeployment{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// GetPagesDeploymentLogs returns the logs for a Pages deployment. +// +// API reference: https://api.cloudflare.com/#pages-deployment-get-deployment-logs +func (api *API) GetPagesDeploymentLogs(ctx context.Context, rc *ResourceContainer, params GetPagesDeploymentLogsParams) (PagesDeploymentLogs, error) { + if rc.Identifier == "" { + return PagesDeploymentLogs{}, ErrMissingAccountID + } + + if params.ProjectName == "" { + return PagesDeploymentLogs{}, ErrMissingProjectName + } + + if params.DeploymentID == "" { + return PagesDeploymentLogs{}, ErrMissingDeploymentID + } + + uri := buildURI( + fmt.Sprintf("/accounts/%s/pages/projects/%s/deployments/%s/history/logs", rc.Identifier, params.ProjectName, params.DeploymentID), + params.SizeOptions, + ) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return PagesDeploymentLogs{}, err + } + var r pagesDeploymentLogsResponse + err = json.Unmarshal(res, &r) + if err != nil { + return PagesDeploymentLogs{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// DeletePagesDeployment deletes a Pages deployment. +// +// API reference: https://api.cloudflare.com/#pages-deployment-delete-deployment +func (api *API) DeletePagesDeployment(ctx context.Context, rc *ResourceContainer, params DeletePagesDeploymentParams) error { + if rc.Identifier == "" { + return ErrMissingAccountID + } + + if params.ProjectName == "" { + return ErrMissingProjectName + } + + if params.DeploymentID == "" { + return ErrMissingDeploymentID + } + + uri := buildURI(fmt.Sprintf("/accounts/%s/pages/projects/%s/deployments/%s", rc.Identifier, params.ProjectName, params.DeploymentID), params) + + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + return nil +} + +// CreatePagesDeployment creates a Pages production deployment. +// +// API reference: https://api.cloudflare.com/#pages-deployment-create-deployment +func (api *API) CreatePagesDeployment(ctx context.Context, rc *ResourceContainer, params CreatePagesDeploymentParams) (PagesProjectDeployment, error) { + if rc.Identifier == "" { + return PagesProjectDeployment{}, ErrMissingAccountID + } + + if params.ProjectName == "" { + return PagesProjectDeployment{}, ErrMissingProjectName + } + + uri := fmt.Sprintf("/accounts/%s/pages/projects/%s/deployments", rc.Identifier, params.ProjectName) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return PagesProjectDeployment{}, err + } + var r pagesDeploymentResponse + err = json.Unmarshal(res, &r) + if err != nil { + return PagesProjectDeployment{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// RetryPagesDeployment retries a specific Pages deployment. +// +// API reference: https://api.cloudflare.com/#pages-deployment-retry-deployment +func (api *API) RetryPagesDeployment(ctx context.Context, rc *ResourceContainer, projectName, deploymentID string) (PagesProjectDeployment, error) { + if rc.Identifier == "" { + return PagesProjectDeployment{}, ErrMissingAccountID + } + + if projectName == "" { + return PagesProjectDeployment{}, ErrMissingProjectName + } + + if deploymentID == "" { + return PagesProjectDeployment{}, ErrMissingDeploymentID + } + + uri := fmt.Sprintf("/accounts/%s/pages/projects/%s/deployments/%s/retry", rc.Identifier, projectName, deploymentID) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, nil) + if err != nil { + return PagesProjectDeployment{}, err + } + var r pagesDeploymentResponse + err = json.Unmarshal(res, &r) + if err != nil { + return PagesProjectDeployment{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// RollbackPagesDeployment rollbacks the Pages production deployment to a previous production deployment. +// +// API reference: https://api.cloudflare.com/#pages-deployment-rollback-deployment +func (api *API) RollbackPagesDeployment(ctx context.Context, rc *ResourceContainer, projectName, deploymentID string) (PagesProjectDeployment, error) { + if rc.Identifier == "" { + return PagesProjectDeployment{}, ErrMissingAccountID + } + + if projectName == "" { + return PagesProjectDeployment{}, ErrMissingProjectName + } + + if deploymentID == "" { + return PagesProjectDeployment{}, ErrMissingDeploymentID + } + + uri := fmt.Sprintf("/accounts/%s/pages/projects/%s/deployments/%s/rollback", rc.Identifier, projectName, deploymentID) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, nil) + if err != nil { + return PagesProjectDeployment{}, err + } + var r pagesDeploymentResponse + err = json.Unmarshal(res, &r) + if err != nil { + return PagesProjectDeployment{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/pages_domain.go b/vendor/github.com/cloudflare/cloudflare-go/pages_domain.go new file mode 100644 index 0000000..a6fa8ac --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/pages_domain.go @@ -0,0 +1,194 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// PagesDomain represents a pages domain. +type PagesDomain struct { + ID string `json:"id"` + Name string `json:"name"` + Status string `json:"status"` + VerificationData VerificationData `json:"verification_data"` + ValidationData ValidationData `json:"validation_data"` + ZoneTag string `json:"zone_tag"` + CreatedOn *time.Time `json:"created_on"` +} + +// VerificationData represents verification data for a domain. +type VerificationData struct { + Status string `json:"status"` +} + +// ValidationData represents validation data for a domain. +type ValidationData struct { + Status string `json:"status"` + Method string `json:"method"` +} + +// PagesDomainsParameters represents parameters for a pages domains request. +type PagesDomainsParameters struct { + AccountID string + ProjectName string +} + +// PagesDomainsResponse represents an API response for a pages domains request. +type PagesDomainsResponse struct { + Response + Result []PagesDomain `json:"result,omitempty"` +} + +// PagesDomainParameters represents parameters for a pages domain request. +type PagesDomainParameters struct { + AccountID string `json:"-"` + ProjectName string `json:"-"` + DomainName string `json:"name,omitempty"` +} + +// PagesDomainResponse represents an API response for a pages domain request. +type PagesDomainResponse struct { + Response + Result PagesDomain `json:"result,omitempty"` +} + +// GetPagesDomains gets all domains for a pages project. +// +// API Reference: https://api.cloudflare.com/#pages-domains-get-domains +func (api *API) GetPagesDomains(ctx context.Context, params PagesDomainsParameters) ([]PagesDomain, error) { + if params.AccountID == "" { + return []PagesDomain{}, ErrMissingAccountID + } + + if params.ProjectName == "" { + return []PagesDomain{}, ErrMissingProjectName + } + + uri := fmt.Sprintf("/accounts/%s/pages/projects/%s/domains", params.AccountID, params.ProjectName) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []PagesDomain{}, err + } + + var pageDomainResponse PagesDomainsResponse + if err := json.Unmarshal(res, &pageDomainResponse); err != nil { + return []PagesDomain{}, err + } + return pageDomainResponse.Result, nil +} + +// GetPagesDomain gets a single domain. +// +// API Reference: https://api.cloudflare.com/#pages-domains-get-domains +func (api *API) GetPagesDomain(ctx context.Context, params PagesDomainParameters) (PagesDomain, error) { + if params.AccountID == "" { + return PagesDomain{}, ErrMissingAccountID + } + + if params.ProjectName == "" { + return PagesDomain{}, ErrMissingProjectName + } + + if params.DomainName == "" { + return PagesDomain{}, ErrMissingDomain + } + + uri := fmt.Sprintf("/accounts/%s/pages/projects/%s/domains/%s", params.AccountID, params.ProjectName, params.DomainName) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return PagesDomain{}, err + } + + var pagesDomainResponse PagesDomainResponse + if err := json.Unmarshal(res, &pagesDomainResponse); err != nil { + return PagesDomain{}, err + } + return pagesDomainResponse.Result, nil +} + +// PagesPatchDomain retries the validation status of a single domain. +// +// API Reference: https://api.cloudflare.com/#pages-domains-patch-domain +func (api *API) PagesPatchDomain(ctx context.Context, params PagesDomainParameters) (PagesDomain, error) { + if params.AccountID == "" { + return PagesDomain{}, ErrMissingAccountID + } + + if params.ProjectName == "" { + return PagesDomain{}, ErrMissingProjectName + } + + if params.DomainName == "" { + return PagesDomain{}, ErrMissingDomain + } + + uri := fmt.Sprintf("/accounts/%s/pages/projects/%s/domains/%s", params.AccountID, params.ProjectName, params.DomainName) + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, nil) + if err != nil { + return PagesDomain{}, err + } + + var pagesDomainResponse PagesDomainResponse + if err := json.Unmarshal(res, &pagesDomainResponse); err != nil { + return PagesDomain{}, err + } + return pagesDomainResponse.Result, nil +} + +// PagesAddDomain adds a domain to a pages project. +// +// API Reference: https://api.cloudflare.com/#pages-domains-add-domain +func (api *API) PagesAddDomain(ctx context.Context, params PagesDomainParameters) (PagesDomain, error) { + if params.AccountID == "" { + return PagesDomain{}, ErrMissingAccountID + } + + if params.ProjectName == "" { + return PagesDomain{}, ErrMissingProjectName + } + + if params.DomainName == "" { + return PagesDomain{}, ErrMissingDomain + } + + uri := fmt.Sprintf("/accounts/%s/pages/projects/%s/domains", params.AccountID, params.ProjectName) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return PagesDomain{}, err + } + + var pagesDomainResponse PagesDomainResponse + if err := json.Unmarshal(res, &pagesDomainResponse); err != nil { + return PagesDomain{}, err + } + return pagesDomainResponse.Result, nil +} + +// PagesDeleteDomain removes a domain from a pages project. +// +// API Reference: https://api.cloudflare.com/#pages-domains-delete-domain +func (api *API) PagesDeleteDomain(ctx context.Context, params PagesDomainParameters) error { + if params.AccountID == "" { + return ErrMissingAccountID + } + + if params.ProjectName == "" { + return ErrMissingProjectName + } + + if params.DomainName == "" { + return ErrMissingDomain + } + + uri := fmt.Sprintf("/accounts/%s/pages/projects/%s/domains/%s", params.AccountID, params.ProjectName, params.DomainName) + + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, params) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/pages_project.go b/vendor/github.com/cloudflare/cloudflare-go/pages_project.go new file mode 100644 index 0000000..2028461 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/pages_project.go @@ -0,0 +1,333 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +type PagesPreviewDeploymentSetting string + +const ( + PagesPreviewAllBranches PagesPreviewDeploymentSetting = "all" + PagesPreviewNoBranches PagesPreviewDeploymentSetting = "none" + PagesPreviewCustomBranches PagesPreviewDeploymentSetting = "custom" +) + +// PagesProject represents a Pages project. +type PagesProject struct { + Name string `json:"name,omitempty"` + ID string `json:"id"` + CreatedOn *time.Time `json:"created_on"` + SubDomain string `json:"subdomain"` + Domains []string `json:"domains,omitempty"` + Source *PagesProjectSource `json:"source,omitempty"` + BuildConfig PagesProjectBuildConfig `json:"build_config"` + DeploymentConfigs PagesProjectDeploymentConfigs `json:"deployment_configs"` + LatestDeployment PagesProjectDeployment `json:"latest_deployment"` + CanonicalDeployment PagesProjectDeployment `json:"canonical_deployment"` + ProductionBranch string `json:"production_branch,omitempty"` +} + +// PagesProjectSource represents the configuration of a Pages project source. +type PagesProjectSource struct { + Type string `json:"type"` + Config *PagesProjectSourceConfig `json:"config"` +} + +// PagesProjectSourceConfig represents the properties use to configure a Pages project source. +type PagesProjectSourceConfig struct { + Owner string `json:"owner"` + RepoName string `json:"repo_name"` + ProductionBranch string `json:"production_branch"` + PRCommentsEnabled bool `json:"pr_comments_enabled"` + DeploymentsEnabled bool `json:"deployments_enabled"` + ProductionDeploymentsEnabled bool `json:"production_deployments_enabled"` + PreviewDeploymentSetting PagesPreviewDeploymentSetting `json:"preview_deployment_setting"` + PreviewBranchIncludes []string `json:"preview_branch_includes"` + PreviewBranchExcludes []string `json:"preview_branch_excludes"` +} + +// PagesProjectBuildConfig represents the configuration of a Pages project build process. +type PagesProjectBuildConfig struct { + BuildCaching *bool `json:"build_caching,omitempty"` + BuildCommand string `json:"build_command"` + DestinationDir string `json:"destination_dir"` + RootDir string `json:"root_dir"` + WebAnalyticsTag string `json:"web_analytics_tag"` + WebAnalyticsToken string `json:"web_analytics_token"` +} + +// PagesProjectDeploymentConfigs represents the configuration for deployments in a Pages project. +type PagesProjectDeploymentConfigs struct { + Preview PagesProjectDeploymentConfigEnvironment `json:"preview"` + Production PagesProjectDeploymentConfigEnvironment `json:"production"` +} + +// PagesProjectDeploymentConfigEnvironment represents the configuration for preview or production deploys. +type PagesProjectDeploymentConfigEnvironment struct { + EnvVars EnvironmentVariableMap `json:"env_vars,omitempty"` + KvNamespaces NamespaceBindingMap `json:"kv_namespaces,omitempty"` + DoNamespaces NamespaceBindingMap `json:"durable_object_namespaces,omitempty"` + D1Databases D1BindingMap `json:"d1_databases,omitempty"` + R2Bindings R2BindingMap `json:"r2_buckets,omitempty"` + ServiceBindings ServiceBindingMap `json:"services,omitempty"` + CompatibilityDate string `json:"compatibility_date,omitempty"` + CompatibilityFlags []string `json:"compatibility_flags,omitempty"` + FailOpen bool `json:"fail_open"` + AlwaysUseLatestCompatibilityDate bool `json:"always_use_latest_compatibility_date"` + UsageModel UsageModel `json:"usage_model,omitempty"` + Placement *Placement `json:"placement,omitempty"` +} + +// PagesProjectDeployment represents a deployment to a Pages project. +type PagesProjectDeployment struct { + ID string `json:"id"` + ShortID string `json:"short_id"` + ProjectID string `json:"project_id"` + ProjectName string `json:"project_name"` + Environment string `json:"environment"` + URL string `json:"url"` + CreatedOn *time.Time `json:"created_on"` + ModifiedOn *time.Time `json:"modified_on"` + Aliases []string `json:"aliases,omitempty"` + LatestStage PagesProjectDeploymentStage `json:"latest_stage"` + EnvVars EnvironmentVariableMap `json:"env_vars"` + KvNamespaces NamespaceBindingMap `json:"kv_namespaces,omitempty"` + DoNamespaces NamespaceBindingMap `json:"durable_object_namespaces,omitempty"` + D1Databases D1BindingMap `json:"d1_databases,omitempty"` + R2Bindings R2BindingMap `json:"r2_buckets,omitempty"` + ServiceBindings ServiceBindingMap `json:"services,omitempty"` + Placement *Placement `json:"placement,omitempty"` + DeploymentTrigger PagesProjectDeploymentTrigger `json:"deployment_trigger"` + Stages []PagesProjectDeploymentStage `json:"stages"` + BuildConfig PagesProjectBuildConfig `json:"build_config"` + Source PagesProjectSource `json:"source"` + CompatibilityDate string `json:"compatibility_date,omitempty"` + CompatibilityFlags []string `json:"compatibility_flags,omitempty"` + UsageModel UsageModel `json:"usage_model,omitempty"` + IsSkipped bool `json:"is_skipped"` + ProductionBranch string `json:"production_branch,omitempty"` +} + +// PagesProjectDeploymentStage represents an individual stage in a Pages project deployment. +type PagesProjectDeploymentStage struct { + Name string `json:"name"` + StartedOn *time.Time `json:"started_on,omitempty"` + EndedOn *time.Time `json:"ended_on,omitempty"` + Status string `json:"status"` +} + +// PagesProjectDeploymentTrigger represents information about what caused a deployment. +type PagesProjectDeploymentTrigger struct { + Type string `json:"type"` + Metadata *PagesProjectDeploymentTriggerMetadata `json:"metadata"` +} + +// PagesProjectDeploymentTriggerMetadata represents additional information about the cause of a deployment. +type PagesProjectDeploymentTriggerMetadata struct { + Branch string `json:"branch"` + CommitHash string `json:"commit_hash"` + CommitMessage string `json:"commit_message"` +} + +type pagesProjectResponse struct { + Response + Result PagesProject `json:"result"` +} + +type pagesProjectListResponse struct { + Response + Result []PagesProject `json:"result"` + ResultInfo `json:"result_info"` +} + +type EnvironmentVariableMap map[string]*EnvironmentVariable + +// PagesProjectDeploymentVar represents a deployment environment variable. +type EnvironmentVariable struct { + Value string `json:"value"` + Type EnvVarType `json:"type"` +} + +type EnvVarType string + +const ( + PlainText EnvVarType = "plain_text" + SecretText EnvVarType = "secret_text" +) + +type NamespaceBindingMap map[string]*NamespaceBindingValue + +type NamespaceBindingValue struct { + Value string `json:"namespace_id"` +} + +type R2BindingMap map[string]*R2BindingValue + +type R2BindingValue struct { + Name string `json:"name"` +} + +type D1BindingMap map[string]*D1Binding + +type D1Binding struct { + ID string `json:"id"` +} + +type ServiceBindingMap map[string]*ServiceBinding + +type ServiceBinding struct { + Service string `json:"service"` + Environment string `json:"environment"` +} + +type UsageModel string + +const ( + Bundled UsageModel = "bundled" + Unbound UsageModel = "unbound" + Standard UsageModel = "standard" +) + +type ListPagesProjectsParams struct { + PaginationOptions +} + +type CreatePagesProjectParams struct { + Name string `json:"name,omitempty"` + SubDomain string `json:"subdomain"` + Domains []string `json:"domains,omitempty"` + Source *PagesProjectSource `json:"source,omitempty"` + BuildConfig PagesProjectBuildConfig `json:"build_config"` + DeploymentConfigs PagesProjectDeploymentConfigs `json:"deployment_configs"` + LatestDeployment PagesProjectDeployment `json:"latest_deployment"` + CanonicalDeployment PagesProjectDeployment `json:"canonical_deployment"` + ProductionBranch string `json:"production_branch,omitempty"` +} + +type UpdatePagesProjectParams struct { + // `ID` is used for addressing the resource via the UI or a stable + // anchor whereas `Name` is used for updating the value. + ID string `json:"-"` + Name string `json:"name,omitempty"` + SubDomain string `json:"subdomain"` + Domains []string `json:"domains,omitempty"` + Source *PagesProjectSource `json:"source,omitempty"` + BuildConfig PagesProjectBuildConfig `json:"build_config"` + DeploymentConfigs PagesProjectDeploymentConfigs `json:"deployment_configs"` + LatestDeployment PagesProjectDeployment `json:"latest_deployment"` + CanonicalDeployment PagesProjectDeployment `json:"canonical_deployment"` + ProductionBranch string `json:"production_branch,omitempty"` +} + +// ListPagesProjects returns all Pages projects for an account. +// +// API reference: https://api.cloudflare.com/#pages-project-get-projects +func (api *API) ListPagesProjects(ctx context.Context, rc *ResourceContainer, params ListPagesProjectsParams) ([]PagesProject, ResultInfo, error) { + if rc.Identifier == "" { + return []PagesProject{}, ResultInfo{}, ErrMissingAccountID + } + + uri := buildURI(fmt.Sprintf("/accounts/%s/pages/projects", rc.Identifier), params) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []PagesProject{}, ResultInfo{}, err + } + var r pagesProjectListResponse + err = json.Unmarshal(res, &r) + if err != nil { + return []PagesProject{}, ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, r.ResultInfo, nil +} + +// GetPagesProject returns a single Pages project by name. +// +// API reference: https://api.cloudflare.com/#pages-project-get-project +func (api *API) GetPagesProject(ctx context.Context, rc *ResourceContainer, projectName string) (PagesProject, error) { + if rc.Identifier == "" { + return PagesProject{}, ErrMissingAccountID + } + + uri := fmt.Sprintf("/accounts/%s/pages/projects/%s", rc.Identifier, projectName) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return PagesProject{}, err + } + var r pagesProjectResponse + err = json.Unmarshal(res, &r) + if err != nil { + return PagesProject{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// CreatePagesProject creates a new Pages project in an account. +// +// API reference: https://api.cloudflare.com/#pages-project-create-project +func (api *API) CreatePagesProject(ctx context.Context, rc *ResourceContainer, params CreatePagesProjectParams) (PagesProject, error) { + if rc.Identifier == "" { + return PagesProject{}, ErrMissingAccountID + } + uri := fmt.Sprintf("/accounts/%s/pages/projects", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return PagesProject{}, err + } + var r pagesProjectResponse + err = json.Unmarshal(res, &r) + if err != nil { + return PagesProject{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// UpdatePagesProject updates an existing Pages project. +// +// API reference: https://api.cloudflare.com/#pages-project-update-project +func (api *API) UpdatePagesProject(ctx context.Context, rc *ResourceContainer, params UpdatePagesProjectParams) (PagesProject, error) { + if rc.Identifier == "" { + return PagesProject{}, ErrMissingAccountID + } + + if params.ID == "" { + return PagesProject{}, ErrMissingIdentifier + } + + uri := fmt.Sprintf("/accounts/%s/pages/projects/%s", rc.Identifier, params.ID) + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, params) + if err != nil { + return PagesProject{}, err + } + var r pagesProjectResponse + err = json.Unmarshal(res, &r) + if err != nil { + return PagesProject{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// DeletePagesProject deletes a Pages project by name. +// +// API reference: https://api.cloudflare.com/#pages-project-delete-project +func (api *API) DeletePagesProject(ctx context.Context, rc *ResourceContainer, projectName string) error { + if rc.Identifier == "" { + return ErrMissingAccountID + } + uri := fmt.Sprintf("/accounts/%s/pages/projects/%s", rc.Identifier, projectName) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + var r pagesProjectResponse + err = json.Unmarshal(res, &r) + if err != nil { + return fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/pagination.go b/vendor/github.com/cloudflare/cloudflare-go/pagination.go new file mode 100644 index 0000000..db06ff8 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/pagination.go @@ -0,0 +1,59 @@ +package cloudflare + +import ( + "math" +) + +// Look first for total_pages, but if total_count and per_page are set then use that to get page count. +func (p ResultInfo) getTotalPages() int { + totalPages := p.TotalPages + if totalPages == 0 && p.Total > 0 && p.PerPage > 0 { + totalPages = int(math.Ceil(float64(p.Total) / float64(p.PerPage))) + } + return totalPages +} + +// Done returns true for the last page and false otherwise. +func (p ResultInfo) Done() bool { + // A little hacky but if the response body is lacking a defined `ResultInfo` + // object the page will be 1 however the counts will be empty so if we have + // that response, we just assume this is the only page. + totalPages := p.getTotalPages() + if p.Page == 1 && totalPages == 0 { + return true + } + + return p.Page > 1 && p.Page > totalPages +} + +// Next advances the page of a paginated API response, but does not fetch the +// next page of results. +func (p ResultInfo) Next() ResultInfo { + // A little hacky but if the response body is lacking a defined `ResultInfo` + // object the page will be 1 however the counts will be empty so if we have + // that response, we just assume this is the only page. + totalPages := p.getTotalPages() + if p.Page == 1 && totalPages == 0 { + return p + } + + // This shouldn't happen normally however, when it does just return the + // current page. + if p.Page > totalPages { + return p + } + + p.Page++ + return p +} + +// HasMorePages returns whether there is another page of results after the +// current one. +func (p ResultInfo) HasMorePages() bool { + totalPages := p.getTotalPages() + if totalPages == 0 { + return false + } + + return p.Page >= 1 && p.Page < totalPages +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/per_hostname_tls_settings.go b/vendor/github.com/cloudflare/cloudflare-go/per_hostname_tls_settings.go new file mode 100644 index 0000000..19c1598 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/per_hostname_tls_settings.go @@ -0,0 +1,251 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// HostnameTLSSetting represents the metadata for a user-created tls setting. +type HostnameTLSSetting struct { + Hostname string `json:"hostname"` + Value string `json:"value"` + Status string `json:"status"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` +} + +// HostnameTLSSettingResponse represents the response from the PUT and DELETE endpoints for per-hostname tls settings. +type HostnameTLSSettingResponse struct { + Response + Result HostnameTLSSetting `json:"result"` +} + +// HostnameTLSSettingsResponse represents the response from the retrieval endpoint for per-hostname tls settings. +type HostnameTLSSettingsResponse struct { + Response + Result []HostnameTLSSetting `json:"result"` + ResultInfo `json:"result_info"` +} + +// ListHostnameTLSSettingsParams represents the data related to per-hostname tls settings being retrieved. +type ListHostnameTLSSettingsParams struct { + Setting string `json:"-" url:"setting,omitempty"` + PaginationOptions `json:"-"` + Limit int `json:"-" url:"limit,omitempty"` + Offset int `json:"-" url:"offset,omitempty"` + Hostname []string `json:"-" url:"hostname,omitempty"` +} + +// UpdateHostnameTLSSettingParams represents the data related to the per-hostname tls setting being updated. +type UpdateHostnameTLSSettingParams struct { + Setting string + Hostname string + Value string `json:"value"` +} + +// DeleteHostnameTLSSettingParams represents the data related to the per-hostname tls setting being deleted. +type DeleteHostnameTLSSettingParams struct { + Setting string + Hostname string +} + +var ( + ErrMissingHostnameTLSSettingName = errors.New("tls setting name required but missing") +) + +// ListHostnameTLSSettings returns a list of all user-created tls setting values for the specified setting and hostnames. +// +// API reference: https://developers.cloudflare.com/api/operations/per-hostname-tls-settings-list +func (api *API) ListHostnameTLSSettings(ctx context.Context, rc *ResourceContainer, params ListHostnameTLSSettingsParams) ([]HostnameTLSSetting, ResultInfo, error) { + if rc.Identifier == "" { + return []HostnameTLSSetting{}, ResultInfo{}, ErrMissingZoneID + } + if params.Setting == "" { + return []HostnameTLSSetting{}, ResultInfo{}, ErrMissingHostnameTLSSettingName + } + + uri := buildURI(fmt.Sprintf("/zones/%s/hostnames/settings/%s", rc.Identifier, params.Setting), params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []HostnameTLSSetting{}, ResultInfo{}, err + } + var r HostnameTLSSettingsResponse + if err := json.Unmarshal(res, &r); err != nil { + return []HostnameTLSSetting{}, ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, r.ResultInfo, err +} + +// UpdateHostnameTLSSetting will update the per-hostname tls setting for the specified hostname. +// +// API reference: https://developers.cloudflare.com/api/operations/per-hostname-tls-settings-put +func (api *API) UpdateHostnameTLSSetting(ctx context.Context, rc *ResourceContainer, params UpdateHostnameTLSSettingParams) (HostnameTLSSetting, error) { + if rc.Identifier == "" { + return HostnameTLSSetting{}, ErrMissingZoneID + } + if params.Setting == "" { + return HostnameTLSSetting{}, ErrMissingHostnameTLSSettingName + } + if params.Hostname == "" { + return HostnameTLSSetting{}, ErrMissingHostname + } + + uri := fmt.Sprintf("/zones/%s/hostnames/settings/%s/%s", rc.Identifier, params.Setting, params.Hostname) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return HostnameTLSSetting{}, err + } + var r HostnameTLSSettingResponse + if err := json.Unmarshal(res, &r); err != nil { + return HostnameTLSSetting{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// DeleteHostnameTLSSetting will delete the specified per-hostname tls setting. +// +// API reference: https://developers.cloudflare.com/api/operations/per-hostname-tls-settings-delete +func (api *API) DeleteHostnameTLSSetting(ctx context.Context, rc *ResourceContainer, params DeleteHostnameTLSSettingParams) (HostnameTLSSetting, error) { + if rc.Identifier == "" { + return HostnameTLSSetting{}, ErrMissingZoneID + } + if params.Setting == "" { + return HostnameTLSSetting{}, ErrMissingHostnameTLSSettingName + } + if params.Hostname == "" { + return HostnameTLSSetting{}, ErrMissingHostname + } + + uri := fmt.Sprintf("/zones/%s/hostnames/settings/%s/%s", rc.Identifier, params.Setting, params.Hostname) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return HostnameTLSSetting{}, err + } + var r HostnameTLSSettingResponse + if err := json.Unmarshal(res, &r); err != nil { + return HostnameTLSSetting{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// HostnameTLSSettingCiphers represents the metadata for a user-created ciphers tls setting. +type HostnameTLSSettingCiphers struct { + Hostname string `json:"hostname"` + Value []string `json:"value"` + Status string `json:"status"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` +} + +// HostnameTLSSettingCiphersResponse represents the response from the PUT and DELETE endpoints for per-hostname ciphers tls settings. +type HostnameTLSSettingCiphersResponse struct { + Response + Result HostnameTLSSettingCiphers `json:"result"` +} + +// HostnameTLSSettingsCiphersResponse represents the response from the retrieval endpoint for per-hostname ciphers tls settings. +type HostnameTLSSettingsCiphersResponse struct { + Response + Result []HostnameTLSSettingCiphers `json:"result"` + ResultInfo `json:"result_info"` +} + +// ListHostnameTLSSettingsCiphersParams represents the data related to per-hostname ciphers tls settings being retrieved. +type ListHostnameTLSSettingsCiphersParams struct { + PaginationOptions + Limit int `json:"-" url:"limit,omitempty"` + Offset int `json:"-" url:"offset,omitempty"` + Hostname []string `json:"-" url:"hostname,omitempty"` +} + +// UpdateHostnameTLSSettingCiphersParams represents the data related to the per-hostname ciphers tls setting being updated. +type UpdateHostnameTLSSettingCiphersParams struct { + Hostname string + Value []string `json:"value"` +} + +// DeleteHostnameTLSSettingCiphersParams represents the data related to the per-hostname ciphers tls setting being deleted. +type DeleteHostnameTLSSettingCiphersParams struct { + Hostname string +} + +// ListHostnameTLSSettingsCiphers returns a list of all user-created tls setting ciphers values for the specified setting and hostnames. +// Ciphers functions are separate due to the API returning a list of strings as the value, rather than a string (as is the case for the other tls settings). +// +// API reference: https://developers.cloudflare.com/api/operations/per-hostname-tls-settings-list +func (api *API) ListHostnameTLSSettingsCiphers(ctx context.Context, rc *ResourceContainer, params ListHostnameTLSSettingsCiphersParams) ([]HostnameTLSSettingCiphers, ResultInfo, error) { + if rc.Identifier == "" { + return []HostnameTLSSettingCiphers{}, ResultInfo{}, ErrMissingZoneID + } + + uri := buildURI(fmt.Sprintf("/zones/%s/hostnames/settings/ciphers", rc.Identifier), params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []HostnameTLSSettingCiphers{}, ResultInfo{}, err + } + var r HostnameTLSSettingsCiphersResponse + if err := json.Unmarshal(res, &r); err != nil { + return []HostnameTLSSettingCiphers{}, ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, r.ResultInfo, err +} + +// UpdateHostnameTLSSettingCiphers will update the per-hostname ciphers tls setting for the specified hostname. +// Ciphers functions are separate due to the API returning a list of strings as the value, rather than a string (as is the case for the other tls settings). +// +// API reference: https://developers.cloudflare.com/api/operations/per-hostname-tls-settings-put +func (api *API) UpdateHostnameTLSSettingCiphers(ctx context.Context, rc *ResourceContainer, params UpdateHostnameTLSSettingCiphersParams) (HostnameTLSSettingCiphers, error) { + if rc.Identifier == "" { + return HostnameTLSSettingCiphers{}, ErrMissingZoneID + } + if params.Hostname == "" { + return HostnameTLSSettingCiphers{}, ErrMissingHostname + } + + uri := fmt.Sprintf("/zones/%s/hostnames/settings/ciphers/%s", rc.Identifier, params.Hostname) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return HostnameTLSSettingCiphers{}, err + } + var r HostnameTLSSettingCiphersResponse + if err := json.Unmarshal(res, &r); err != nil { + return HostnameTLSSettingCiphers{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// DeleteHostnameTLSSettingCiphers will delete the specified per-hostname ciphers tls setting value. +// Ciphers functions are separate due to the API returning a list of strings as the value, rather than a string (as is the case for the other tls settings). +// +// API reference: https://developers.cloudflare.com/api/operations/per-hostname-tls-settings-delete +func (api *API) DeleteHostnameTLSSettingCiphers(ctx context.Context, rc *ResourceContainer, params DeleteHostnameTLSSettingCiphersParams) (HostnameTLSSettingCiphers, error) { + if rc.Identifier == "" { + return HostnameTLSSettingCiphers{}, ErrMissingZoneID + } + if params.Hostname == "" { + return HostnameTLSSettingCiphers{}, ErrMissingHostname + } + + uri := fmt.Sprintf("/zones/%s/hostnames/settings/ciphers/%s", rc.Identifier, params.Hostname) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return HostnameTLSSettingCiphers{}, err + } + // Unmarshal into HostnameTLSSettingResponse first because the API returns an empty string + var r HostnameTLSSettingResponse + if err := json.Unmarshal(res, &r); err != nil { + return HostnameTLSSettingCiphers{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return HostnameTLSSettingCiphers{ + Hostname: r.Result.Hostname, + Value: []string{}, + Status: r.Result.Status, + CreatedAt: r.Result.CreatedAt, + UpdatedAt: r.Result.UpdatedAt, + }, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/permission_group.go b/vendor/github.com/cloudflare/cloudflare-go/permission_group.go new file mode 100644 index 0000000..9fa7aa3 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/permission_group.go @@ -0,0 +1,99 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +type PermissionGroup struct { + ID string `json:"id"` + Name string `json:"name"` + Meta map[string]string `json:"meta"` + Permissions []Permission `json:"permissions"` +} + +type Permission struct { + ID string `json:"id"` + Key string `json:"key"` + Attributes map[string]string `json:"attributes,omitempty"` // same as Meta in other structs +} + +type PermissionGroupListResponse struct { + Success bool `json:"success"` + Errors []string `json:"errors"` + Messages []string `json:"messages"` + Result []PermissionGroup `json:"result"` +} + +type PermissionGroupDetailResponse struct { + Success bool `json:"success"` + Errors []string `json:"errors"` + Messages []string `json:"messages"` + Result PermissionGroup `json:"result"` +} + +type ListPermissionGroupParams struct { + Depth int `url:"depth,omitempty"` + RoleName string `url:"name,omitempty"` +} + +const errMissingPermissionGroupID = "missing required permission group ID" + +var ErrMissingPermissionGroupID = errors.New(errMissingPermissionGroupID) + +// GetPermissionGroup returns a specific permission group from the API given +// the account ID and permission group ID. +func (api *API) GetPermissionGroup(ctx context.Context, rc *ResourceContainer, permissionGroupId string) (PermissionGroup, error) { + if rc.Level != AccountRouteLevel { + return PermissionGroup{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + if rc.Identifier == "" { + return PermissionGroup{}, ErrMissingAccountID + } + + if permissionGroupId == "" { + return PermissionGroup{}, ErrMissingPermissionGroupID + } + + uri := fmt.Sprintf("/accounts/%s/iam/permission_groups/%s?depth=2", rc.Identifier, permissionGroupId) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return PermissionGroup{}, err + } + + var permissionGroupResponse PermissionGroupDetailResponse + err = json.Unmarshal(res, &permissionGroupResponse) + if err != nil { + return PermissionGroup{}, err + } + + return permissionGroupResponse.Result, nil +} + +// ListPermissionGroups returns all valid permission groups for the provided +// parameters. +func (api *API) ListPermissionGroups(ctx context.Context, rc *ResourceContainer, params ListPermissionGroupParams) ([]PermissionGroup, error) { + if rc.Level != AccountRouteLevel { + return []PermissionGroup{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + params.Depth = 2 + uri := buildURI(fmt.Sprintf("/accounts/%s/iam/permission_groups", rc.Identifier), params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []PermissionGroup{}, err + } + + var permissionGroupResponse PermissionGroupListResponse + err = json.Unmarshal(res, &permissionGroupResponse) + if err != nil { + return []PermissionGroup{}, err + } + + return permissionGroupResponse.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/policy.go b/vendor/github.com/cloudflare/cloudflare-go/policy.go new file mode 100644 index 0000000..8077241 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/policy.go @@ -0,0 +1,8 @@ +package cloudflare + +type Policy struct { + ID string `json:"id"` + PermissionGroups []PermissionGroup `json:"permission_groups"` + ResourceGroups []ResourceGroup `json:"resource_groups"` + Access string `json:"access"` +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/queue.go b/vendor/github.com/cloudflare/cloudflare-go/queue.go new file mode 100644 index 0000000..9f6d6e1 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/queue.go @@ -0,0 +1,378 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +var ( + ErrMissingQueueName = errors.New("required queue name is missing") + ErrMissingQueueConsumerName = errors.New("required queue consumer name is missing") +) + +type Queue struct { + ID string `json:"queue_id,omitempty"` + Name string `json:"queue_name,omitempty"` + CreatedOn *time.Time `json:"created_on,omitempty"` + ModifiedOn *time.Time `json:"modified_on,omitempty"` + ProducersTotalCount int `json:"producers_total_count,omitempty"` + Producers []QueueProducer `json:"producers,omitempty"` + ConsumersTotalCount int `json:"consumers_total_count,omitempty"` + Consumers []QueueConsumer `json:"consumers,omitempty"` +} + +type QueueProducer struct { + Service string `json:"service,omitempty"` + Environment string `json:"environment,omitempty"` +} + +type QueueConsumer struct { + Name string `json:"-"` + Service string `json:"service,omitempty"` + ScriptName string `json:"script_name,omitempty"` + Environment string `json:"environment,omitempty"` + Settings QueueConsumerSettings `json:"settings,omitempty"` + QueueName string `json:"queue_name,omitempty"` + CreatedOn *time.Time `json:"created_on,omitempty"` + DeadLetterQueue string `json:"dead_letter_queue,omitempty"` +} + +type QueueConsumerSettings struct { + BatchSize int `json:"batch_size,omitempty"` + MaxRetires int `json:"max_retries,omitempty"` + MaxWaitTime int `json:"max_wait_time_ms,omitempty"` +} + +type QueueListResponse struct { + Response + ResultInfo `json:"result_info"` + Result []Queue `json:"result"` +} + +type CreateQueueParams struct { + Name string `json:"queue_name"` +} + +type QueueResponse struct { + Response + Result Queue `json:"result"` +} + +type ListQueueConsumersResponse struct { + Response + ResultInfo `json:"result_info"` + Result []QueueConsumer `json:"result"` +} + +type ListQueuesParams struct { + ResultInfo +} + +type QueueConsumerResponse struct { + Response + Result QueueConsumer `json:"result"` +} + +type UpdateQueueParams struct { + Name string `json:"-"` + UpdatedName string `json:"queue_name,omitempty"` +} + +type ListQueueConsumersParams struct { + QueueName string `url:"-"` + ResultInfo +} + +type CreateQueueConsumerParams struct { + QueueName string `json:"-"` + Consumer QueueConsumer +} + +type UpdateQueueConsumerParams struct { + QueueName string `json:"-"` + Consumer QueueConsumer +} + +type DeleteQueueConsumerParams struct { + QueueName, ConsumerName string +} + +// ListQueues returns the queues owned by an account. +// +// API reference: https://api.cloudflare.com/#queue-list-queues +func (api *API) ListQueues(ctx context.Context, rc *ResourceContainer, params ListQueuesParams) ([]Queue, *ResultInfo, error) { + if rc.Identifier == "" { + return []Queue{}, &ResultInfo{}, ErrMissingAccountID + } + + autoPaginate := true + if params.PerPage >= 1 || params.Page >= 1 { + autoPaginate = false + } + if params.PerPage < 1 { + params.PerPage = 50 + } + if params.Page < 1 { + params.Page = 1 + } + + var queues []Queue + var qResponse QueueListResponse + for { + qResponse = QueueListResponse{} + uri := buildURI(fmt.Sprintf("/accounts/%s/workers/queues", rc.Identifier), params) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []Queue{}, &ResultInfo{}, err + } + + err = json.Unmarshal(res, &qResponse) + if err != nil { + return []Queue{}, &ResultInfo{}, fmt.Errorf("failed to unmarshal filters JSON data: %w", err) + } + + queues = append(queues, qResponse.Result...) + params.ResultInfo = qResponse.ResultInfo.Next() + + if params.ResultInfo.Done() || !autoPaginate { + break + } + } + + return queues, &qResponse.ResultInfo, nil +} + +// CreateQueue creates a new queue. +// +// API reference: https://api.cloudflare.com/#queue-create-queue +func (api *API) CreateQueue(ctx context.Context, rc *ResourceContainer, queue CreateQueueParams) (Queue, error) { + if rc.Identifier == "" { + return Queue{}, ErrMissingAccountID + } + + if queue.Name == "" { + return Queue{}, ErrMissingQueueName + } + + uri := fmt.Sprintf("/accounts/%s/workers/queues", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, queue) + if err != nil { + return Queue{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var r QueueResponse + err = json.Unmarshal(res, &r) + if err != nil { + return Queue{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// DeleteQueue deletes a queue. +// +// API reference: https://api.cloudflare.com/#queue-delete-queue +func (api *API) DeleteQueue(ctx context.Context, rc *ResourceContainer, queueName string) error { + if rc.Identifier == "" { + return ErrMissingAccountID + } + if queueName == "" { + return ErrMissingQueueName + } + + uri := fmt.Sprintf("/accounts/%s/workers/queues/%s", rc.Identifier, queueName) + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return fmt.Errorf("%s: %w", errMakeRequestError, err) + } + return nil +} + +// GetQueue returns a single queue based on the name. +// +// API reference: https://api.cloudflare.com/#queue-get-queue +func (api *API) GetQueue(ctx context.Context, rc *ResourceContainer, queueName string) (Queue, error) { + if rc.Identifier == "" { + return Queue{}, ErrMissingAccountID + } + + if queueName == "" { + return Queue{}, ErrMissingQueueName + } + + uri := fmt.Sprintf("/accounts/%s/workers/queues/%s", rc.Identifier, queueName) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return Queue{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var r QueueResponse + err = json.Unmarshal(res, &r) + if err != nil { + return Queue{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// UpdateQueue updates a queue. +// +// API reference: https://api.cloudflare.com/#queue-update-queue +func (api *API) UpdateQueue(ctx context.Context, rc *ResourceContainer, params UpdateQueueParams) (Queue, error) { + if rc.Identifier == "" { + return Queue{}, ErrMissingAccountID + } + + if params.Name == "" || params.UpdatedName == "" { + return Queue{}, ErrMissingQueueName + } + + uri := fmt.Sprintf("/accounts/%s/workers/queues/%s", rc.Identifier, params.Name) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return Queue{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var r QueueResponse + err = json.Unmarshal(res, &r) + if err != nil { + return Queue{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// ListQueueConsumers returns the consumers of a queue. +// +// API reference: https://api.cloudflare.com/#queue-list-queue-consumers +func (api *API) ListQueueConsumers(ctx context.Context, rc *ResourceContainer, params ListQueueConsumersParams) ([]QueueConsumer, *ResultInfo, error) { + if rc.Identifier == "" { + return []QueueConsumer{}, &ResultInfo{}, ErrMissingAccountID + } + + if params.QueueName == "" { + return []QueueConsumer{}, &ResultInfo{}, ErrMissingQueueName + } + + autoPaginate := true + if params.PerPage >= 1 || params.Page >= 1 { + autoPaginate = false + } + if params.PerPage < 1 { + params.PerPage = 50 + } + if params.Page < 1 { + params.Page = 1 + } + + var queuesConsumers []QueueConsumer + var qResponse ListQueueConsumersResponse + for { + qResponse = ListQueueConsumersResponse{} + uri := buildURI(fmt.Sprintf("/accounts/%s/workers/queues/%s/consumers", rc.Identifier, params.QueueName), params) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []QueueConsumer{}, &ResultInfo{}, err + } + + err = json.Unmarshal(res, &qResponse) + if err != nil { + return []QueueConsumer{}, &ResultInfo{}, fmt.Errorf("failed to unmarshal filters JSON data: %w", err) + } + + queuesConsumers = append(queuesConsumers, qResponse.Result...) + params.ResultInfo = qResponse.ResultInfo.Next() + + if params.ResultInfo.Done() || !autoPaginate { + break + } + } + + return queuesConsumers, &qResponse.ResultInfo, nil +} + +// CreateQueueConsumer creates a new consumer for a queue. +// +// API reference: https://api.cloudflare.com/#queue-create-queue-consumer +func (api *API) CreateQueueConsumer(ctx context.Context, rc *ResourceContainer, params CreateQueueConsumerParams) (QueueConsumer, error) { + if rc.Identifier == "" { + return QueueConsumer{}, ErrMissingAccountID + } + + if params.QueueName == "" { + return QueueConsumer{}, ErrMissingQueueName + } + + uri := fmt.Sprintf("/accounts/%s/workers/queues/%s/consumers", rc.Identifier, params.QueueName) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params.Consumer) + if err != nil { + return QueueConsumer{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var r QueueConsumerResponse + err = json.Unmarshal(res, &r) + if err != nil { + return QueueConsumer{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// DeleteQueueConsumer deletes the consumer for a queue.. +// +// API reference: https://api.cloudflare.com/#queue-delete-queue-consumer +func (api *API) DeleteQueueConsumer(ctx context.Context, rc *ResourceContainer, params DeleteQueueConsumerParams) error { + if rc.Identifier == "" { + return ErrMissingAccountID + } + + if params.QueueName == "" { + return ErrMissingQueueName + } + + if params.ConsumerName == "" { + return ErrMissingQueueConsumerName + } + + uri := fmt.Sprintf("/accounts/%s/workers/queues/%s/consumers/%s", rc.Identifier, params.QueueName, params.ConsumerName) + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + return nil +} + +// UpdateQueueConsumer updates the consumer for a queue, or creates one if it does not exist.. +// +// API reference: https://api.cloudflare.com/#queue-update-queue-consumer +func (api *API) UpdateQueueConsumer(ctx context.Context, rc *ResourceContainer, params UpdateQueueConsumerParams) (QueueConsumer, error) { + if rc.Identifier == "" { + return QueueConsumer{}, ErrMissingAccountID + } + + if params.QueueName == "" { + return QueueConsumer{}, ErrMissingQueueName + } + + if params.Consumer.Name == "" { + return QueueConsumer{}, ErrMissingQueueConsumerName + } + + uri := fmt.Sprintf("/accounts/%s/workers/queues/%s/consumers/%s", rc.Identifier, params.QueueName, params.Consumer.Name) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params.Consumer) + if err != nil { + return QueueConsumer{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var r QueueConsumerResponse + err = json.Unmarshal(res, &r) + if err != nil { + return QueueConsumer{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/r2_bucket.go b/vendor/github.com/cloudflare/cloudflare-go/r2_bucket.go new file mode 100644 index 0000000..c9ce8fe --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/r2_bucket.go @@ -0,0 +1,147 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +var ( + ErrMissingBucketName = errors.New("require bucket name missing") +) + +// R2Bucket defines a container for objects stored in R2 Storage. +type R2Bucket struct { + Name string `json:"name"` + CreationDate *time.Time `json:"creation_date,omitempty"` + Location string `json:"location,omitempty"` +} + +// R2Buckets represents the map of buckets response from +// the R2 buckets endpoint. +type R2Buckets struct { + Buckets []R2Bucket `json:"buckets"` +} + +// R2BucketListResponse represents the response from the list +// R2 buckets endpoint. +type R2BucketListResponse struct { + Result R2Buckets `json:"result"` + Response +} + +type ListR2BucketsParams struct { + Name string `url:"name_contains,omitempty"` + StartAfter string `url:"start_after,omitempty"` + PerPage int64 `url:"per_page,omitempty"` + Order string `url:"order,omitempty"` + Direction string `url:"direction,omitempty"` + Cursor string `url:"cursor,omitempty"` +} + +type CreateR2BucketParameters struct { + Name string `json:"name,omitempty"` + LocationHint string `json:"locationHint,omitempty"` +} + +type R2BucketResponse struct { + Result R2Bucket `json:"result"` + Response +} + +// ListR2Buckets Lists R2 buckets. +func (api *API) ListR2Buckets(ctx context.Context, rc *ResourceContainer, params ListR2BucketsParams) ([]R2Bucket, error) { + if rc.Identifier == "" { + return []R2Bucket{}, ErrMissingAccountID + } + + uri := buildURI(fmt.Sprintf("/accounts/%s/r2/buckets", rc.Identifier), params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []R2Bucket{}, err + } + + var r2BucketListResponse R2BucketListResponse + err = json.Unmarshal(res, &r2BucketListResponse) + if err != nil { + return []R2Bucket{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r2BucketListResponse.Result.Buckets, nil +} + +// CreateR2Bucket Creates a new R2 bucket. +// +// API reference: https://api.cloudflare.com/#r2-bucket-create-bucket +func (api *API) CreateR2Bucket(ctx context.Context, rc *ResourceContainer, params CreateR2BucketParameters) (R2Bucket, error) { + if rc.Identifier == "" { + return R2Bucket{}, ErrMissingAccountID + } + + if params.Name == "" { + return R2Bucket{}, ErrMissingBucketName + } + + uri := fmt.Sprintf("/accounts/%s/r2/buckets", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return R2Bucket{}, err + } + + var r2BucketResponse R2BucketResponse + err = json.Unmarshal(res, &r2BucketResponse) + if err != nil { + return R2Bucket{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r2BucketResponse.Result, nil +} + +// GetR2Bucket Gets an existing R2 bucket. +// +// API reference: https://api.cloudflare.com/#r2-bucket-get-bucket +func (api *API) GetR2Bucket(ctx context.Context, rc *ResourceContainer, bucketName string) (R2Bucket, error) { + if rc.Identifier == "" { + return R2Bucket{}, ErrMissingAccountID + } + + if bucketName == "" { + return R2Bucket{}, ErrMissingBucketName + } + + uri := fmt.Sprintf("/accounts/%s/r2/buckets/%s", rc.Identifier, bucketName) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return R2Bucket{}, err + } + + var r2BucketResponse R2BucketResponse + err = json.Unmarshal(res, &r2BucketResponse) + if err != nil { + return R2Bucket{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r2BucketResponse.Result, nil +} + +// DeleteR2Bucket Deletes an existing R2 bucket. +// +// API reference: https://api.cloudflare.com/#r2-bucket-delete-bucket +func (api *API) DeleteR2Bucket(ctx context.Context, rc *ResourceContainer, bucketName string) error { + if rc.Identifier == "" { + return ErrMissingAccountID + } + + if bucketName == "" { + return ErrMissingBucketName + } + + uri := fmt.Sprintf("/accounts/%s/r2/buckets/%s", rc.Identifier, bucketName) + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + + return err +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/rate_limiting.go b/vendor/github.com/cloudflare/cloudflare-go/rate_limiting.go new file mode 100644 index 0000000..1a9f5cb --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/rate_limiting.go @@ -0,0 +1,199 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +// RateLimit is a policy than can be applied to limit traffic within a customer domain. +type RateLimit struct { + ID string `json:"id,omitempty"` + Disabled bool `json:"disabled,omitempty"` + Description string `json:"description,omitempty"` + Match RateLimitTrafficMatcher `json:"match"` + Bypass []RateLimitKeyValue `json:"bypass,omitempty"` + Threshold int `json:"threshold"` + Period int `json:"period"` + Action RateLimitAction `json:"action"` + Correlate *RateLimitCorrelate `json:"correlate,omitempty"` +} + +// RateLimitTrafficMatcher contains the rules that will be used to apply a rate limit to traffic. +type RateLimitTrafficMatcher struct { + Request RateLimitRequestMatcher `json:"request"` + Response RateLimitResponseMatcher `json:"response"` +} + +// RateLimitRequestMatcher contains the matching rules pertaining to requests. +type RateLimitRequestMatcher struct { + Methods []string `json:"methods,omitempty"` + Schemes []string `json:"schemes,omitempty"` + URLPattern string `json:"url,omitempty"` +} + +// RateLimitResponseMatcher contains the matching rules pertaining to responses. +type RateLimitResponseMatcher struct { + Statuses []int `json:"status,omitempty"` + OriginTraffic *bool `json:"origin_traffic,omitempty"` // api defaults to true so we need an explicit empty value + Headers []RateLimitResponseMatcherHeader `json:"headers,omitempty"` +} + +// RateLimitResponseMatcherHeader contains the structure of the origin +// HTTP headers used in request matcher checks. +type RateLimitResponseMatcherHeader struct { + Name string `json:"name"` + Op string `json:"op"` + Value string `json:"value"` +} + +// RateLimitKeyValue is k-v formatted as expected in the rate limit description. +type RateLimitKeyValue struct { + Name string `json:"name"` + Value string `json:"value"` +} + +// RateLimitAction is the action that will be taken when the rate limit threshold is reached. +type RateLimitAction struct { + Mode string `json:"mode"` + Timeout int `json:"timeout"` + Response *RateLimitActionResponse `json:"response"` +} + +// RateLimitActionResponse is the response that will be returned when rate limit action is triggered. +type RateLimitActionResponse struct { + ContentType string `json:"content_type"` + Body string `json:"body"` +} + +// RateLimitCorrelate pertainings to NAT support. +type RateLimitCorrelate struct { + By string `json:"by"` +} + +type rateLimitResponse struct { + Response + Result RateLimit `json:"result"` +} + +type rateLimitListResponse struct { + Response + Result []RateLimit `json:"result"` + ResultInfo ResultInfo `json:"result_info"` +} + +// CreateRateLimit creates a new rate limit for a zone. +// +// API reference: https://api.cloudflare.com/#rate-limits-for-a-zone-create-a-ratelimit +func (api *API) CreateRateLimit(ctx context.Context, zoneID string, limit RateLimit) (RateLimit, error) { + uri := fmt.Sprintf("/zones/%s/rate_limits", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, limit) + if err != nil { + return RateLimit{}, err + } + var r rateLimitResponse + if err := json.Unmarshal(res, &r); err != nil { + return RateLimit{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// ListRateLimits returns Rate Limits for a zone, paginated according to the provided options +// +// API reference: https://api.cloudflare.com/#rate-limits-for-a-zone-list-rate-limits +func (api *API) ListRateLimits(ctx context.Context, zoneID string, pageOpts PaginationOptions) ([]RateLimit, ResultInfo, error) { + uri := buildURI(fmt.Sprintf("/zones/%s/rate_limits", zoneID), pageOpts) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []RateLimit{}, ResultInfo{}, err + } + + var r rateLimitListResponse + err = json.Unmarshal(res, &r) + if err != nil { + return []RateLimit{}, ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, r.ResultInfo, nil +} + +// ListAllRateLimits returns all Rate Limits for a zone. +// +// API reference: https://api.cloudflare.com/#rate-limits-for-a-zone-list-rate-limits +func (api *API) ListAllRateLimits(ctx context.Context, zoneID string) ([]RateLimit, error) { + pageOpts := PaginationOptions{ + PerPage: 100, // this is the max page size allowed + Page: 1, + } + + allRateLimits := make([]RateLimit, 0) + for { + rateLimits, resultInfo, err := api.ListRateLimits(ctx, zoneID, pageOpts) + if err != nil { + return []RateLimit{}, err + } + allRateLimits = append(allRateLimits, rateLimits...) + // total pages is not returned on this call + // if number of records is less than the max, this must be the last page + // in case TotalCount % PerPage = 0, the last request will return an empty list + if resultInfo.Count < resultInfo.PerPage { + break + } + // continue with the next page + pageOpts.Page = pageOpts.Page + 1 + } + + return allRateLimits, nil +} + +// RateLimit fetches detail about one Rate Limit for a zone. +// +// API reference: https://api.cloudflare.com/#rate-limits-for-a-zone-rate-limit-details +func (api *API) RateLimit(ctx context.Context, zoneID, limitID string) (RateLimit, error) { + uri := fmt.Sprintf("/zones/%s/rate_limits/%s", zoneID, limitID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return RateLimit{}, err + } + var r rateLimitResponse + err = json.Unmarshal(res, &r) + if err != nil { + return RateLimit{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// UpdateRateLimit lets you replace a Rate Limit for a zone. +// +// API reference: https://api.cloudflare.com/#rate-limits-for-a-zone-update-rate-limit +func (api *API) UpdateRateLimit(ctx context.Context, zoneID, limitID string, limit RateLimit) (RateLimit, error) { + uri := fmt.Sprintf("/zones/%s/rate_limits/%s", zoneID, limitID) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, limit) + if err != nil { + return RateLimit{}, err + } + var r rateLimitResponse + if err := json.Unmarshal(res, &r); err != nil { + return RateLimit{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// DeleteRateLimit deletes a Rate Limit for a zone. +// +// API reference: https://api.cloudflare.com/#rate-limits-for-a-zone-delete-rate-limit +func (api *API) DeleteRateLimit(ctx context.Context, zoneID, limitID string) error { + uri := fmt.Sprintf("/zones/%s/rate_limits/%s", zoneID, limitID) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + var r rateLimitResponse + err = json.Unmarshal(res, &r) + if err != nil { + return fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/regional_hostnames.go b/vendor/github.com/cloudflare/cloudflare-go/regional_hostnames.go new file mode 100644 index 0000000..5122770 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/regional_hostnames.go @@ -0,0 +1,191 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +type Region struct { + Key string `json:"key"` + Label string `json:"label"` +} + +type RegionalHostname struct { + Hostname string `json:"hostname"` + RegionKey string `json:"region_key"` + CreatedOn *time.Time `json:"created_on,omitempty"` +} + +// regionalHostnameResponse contains an API Response from a Create, Get, Update, or Delete call. +type regionalHostnameResponse struct { + Response + Result RegionalHostname `json:"result"` +} + +type ListDataLocalizationRegionsParams struct{} +type ListDataLocalizationRegionalHostnamesParams struct{} + +type CreateDataLocalizationRegionalHostnameParams struct { + Hostname string `json:"hostname"` + RegionKey string `json:"region_key"` +} + +type UpdateDataLocalizationRegionalHostnameParams struct { + Hostname string `json:"-"` + RegionKey string `json:"region_key"` +} + +// ListDataLocalizationRegions lists all available regions. +// +// API reference: https://developers.cloudflare.com/data-localization/regional-services/get-started/#configure-regional-services-via-api +func (api *API) ListDataLocalizationRegions(ctx context.Context, rc *ResourceContainer, params ListDataLocalizationRegionsParams) ([]Region, error) { + if rc.Level != AccountRouteLevel { + return []Region{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + if rc.Identifier == "" { + return []Region{}, ErrMissingAccountID + } + + uri := fmt.Sprintf("/accounts/%s/addressing/regional_hostnames/regions", rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []Region{}, err + } + result := struct { + Result []Region `json:"result"` + }{} + if err := json.Unmarshal(res, &result); err != nil { + return []Region{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return result.Result, nil +} + +// ListDataLocalizationRegionalHostnames lists all regional hostnames for a zone. +// +// API reference: https://developers.cloudflare.com/data-localization/regional-services/get-started/#configure-regional-services-via-api +func (api *API) ListDataLocalizationRegionalHostnames(ctx context.Context, rc *ResourceContainer, params ListDataLocalizationRegionalHostnamesParams) ([]RegionalHostname, error) { + if rc.Level != ZoneRouteLevel { + return []RegionalHostname{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + if rc.Identifier == "" { + return []RegionalHostname{}, ErrMissingZoneID + } + + uri := fmt.Sprintf("/zones/%s/addressing/regional_hostnames", rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []RegionalHostname{}, err + } + result := struct { + Result []RegionalHostname `json:"result"` + }{} + if err := json.Unmarshal(res, &result); err != nil { + return []RegionalHostname{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return result.Result, nil +} + +// CreateDataLocalizationRegionalHostname lists all regional hostnames for a zone. +// +// API reference: https://developers.cloudflare.com/data-localization/regional-services/get-started/#configure-regional-services-via-api +func (api *API) CreateDataLocalizationRegionalHostname(ctx context.Context, rc *ResourceContainer, params CreateDataLocalizationRegionalHostnameParams) (RegionalHostname, error) { + if rc.Level != ZoneRouteLevel { + return RegionalHostname{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + if rc.Identifier == "" { + return RegionalHostname{}, ErrMissingZoneID + } + + uri := fmt.Sprintf("/zones/%s/addressing/regional_hostnames", rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return RegionalHostname{}, err + } + result := regionalHostnameResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return RegionalHostname{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return result.Result, nil +} + +// GetDataLocalizationRegionalHostname returns the details of a specific regional hostname. +// +// API reference: https://developers.cloudflare.com/data-localization/regional-services/get-started/#configure-regional-services-via-api +func (api *API) GetDataLocalizationRegionalHostname(ctx context.Context, rc *ResourceContainer, hostname string) (RegionalHostname, error) { + if rc.Level != ZoneRouteLevel { + return RegionalHostname{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + if rc.Identifier == "" { + return RegionalHostname{}, ErrMissingZoneID + } + + uri := fmt.Sprintf("/zones/%s/addressing/regional_hostnames/%s", rc.Identifier, hostname) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return RegionalHostname{}, err + } + + result := regionalHostnameResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return RegionalHostname{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return result.Result, nil +} + +// UpdateDataLocalizationRegionalHostname returns the details of a specific regional hostname. +// +// API reference: https://developers.cloudflare.com/data-localization/regional-services/get-started/#configure-regional-services-via-api +func (api *API) UpdateDataLocalizationRegionalHostname(ctx context.Context, rc *ResourceContainer, params UpdateDataLocalizationRegionalHostnameParams) (RegionalHostname, error) { + if rc.Level != ZoneRouteLevel { + return RegionalHostname{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + if rc.Identifier == "" { + return RegionalHostname{}, ErrMissingZoneID + } + + uri := fmt.Sprintf("/zones/%s/addressing/regional_hostnames/%s", rc.Identifier, params.Hostname) + + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, params) + if err != nil { + return RegionalHostname{}, err + } + result := regionalHostnameResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return RegionalHostname{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return result.Result, nil +} + +// DeleteDataLocalizationRegionalHostname deletes a regional hostname. +// +// API reference: https://developers.cloudflare.com/data-localization/regional-services/get-started/#configure-regional-services-via-api +func (api *API) DeleteDataLocalizationRegionalHostname(ctx context.Context, rc *ResourceContainer, hostname string) error { + if rc.Level != ZoneRouteLevel { + return fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + if rc.Identifier == "" { + return ErrMissingZoneID + } + + uri := fmt.Sprintf("/zones/%s/addressing/regional_hostnames/%s", rc.Identifier, hostname) + + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/regional_tiered_cache.go b/vendor/github.com/cloudflare/cloudflare-go/regional_tiered_cache.go new file mode 100644 index 0000000..eb3254b --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/regional_tiered_cache.go @@ -0,0 +1,85 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// RegionalTieredCache is the structure of the API object for the regional tiered cache +// setting. +type RegionalTieredCache struct { + ID string `json:"id,omitempty"` + ModifiedOn time.Time `json:"modified_on,omitempty"` + Value string `json:"value"` +} + +// RegionalTieredCacheDetailsResponse is the API response for the regional tiered cache +// setting. +type RegionalTieredCacheDetailsResponse struct { + Result RegionalTieredCache `json:"result"` + Response +} + +type zoneRegionalTieredCacheSingleResponse struct { + Response + Result RegionalTieredCache `json:"result"` +} + +type GetRegionalTieredCacheParams struct{} + +type UpdateRegionalTieredCacheParams struct { + Value string `json:"value"` +} + +// GetRegionalTieredCache returns information about the current regional tiered +// cache settings. +// +// API reference: https://developers.cloudflare.com/api/operations/zone-cache-settings-get-regional-tiered-cache-setting +func (api *API) GetRegionalTieredCache(ctx context.Context, rc *ResourceContainer, params GetRegionalTieredCacheParams) (RegionalTieredCache, error) { + if rc.Level != ZoneRouteLevel { + return RegionalTieredCache{}, ErrRequiredZoneLevelResourceContainer + } + + uri := fmt.Sprintf("/%s/%s/cache/regional_tiered_cache", rc.Level, rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return RegionalTieredCache{}, err + } + + var RegionalTieredCacheDetailsResponse RegionalTieredCacheDetailsResponse + err = json.Unmarshal(res, &RegionalTieredCacheDetailsResponse) + if err != nil { + return RegionalTieredCache{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return RegionalTieredCacheDetailsResponse.Result, nil +} + +// UpdateRegionalTieredCache updates the regional tiered cache setting for a +// zone. +// +// API reference: https://developers.cloudflare.com/api/operations/zone-cache-settings-change-regional-tiered-cache-setting +func (api *API) UpdateRegionalTieredCache(ctx context.Context, rc *ResourceContainer, params UpdateRegionalTieredCacheParams) (RegionalTieredCache, error) { + if rc.Level != ZoneRouteLevel { + return RegionalTieredCache{}, ErrRequiredZoneLevelResourceContainer + } + + uri := fmt.Sprintf("/%s/%s/cache/regional_tiered_cache", rc.Level, rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, params) + if err != nil { + return RegionalTieredCache{}, err + } + + response := &zoneRegionalTieredCacheSingleResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return RegionalTieredCache{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/registrar.go b/vendor/github.com/cloudflare/cloudflare-go/registrar.go new file mode 100644 index 0000000..68144ac --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/registrar.go @@ -0,0 +1,176 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// RegistrarDomain is the structure of the API response for a new +// Cloudflare Registrar domain. +type RegistrarDomain struct { + ID string `json:"id"` + Available bool `json:"available"` + SupportedTLD bool `json:"supported_tld"` + CanRegister bool `json:"can_register"` + TransferIn RegistrarTransferIn `json:"transfer_in"` + CurrentRegistrar string `json:"current_registrar"` + ExpiresAt time.Time `json:"expires_at"` + RegistryStatuses string `json:"registry_statuses"` + Locked bool `json:"locked"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + RegistrantContact RegistrantContact `json:"registrant_contact"` +} + +// RegistrarTransferIn contains the structure for a domain transfer in +// request. +type RegistrarTransferIn struct { + UnlockDomain string `json:"unlock_domain"` + DisablePrivacy string `json:"disable_privacy"` + EnterAuthCode string `json:"enter_auth_code"` + ApproveTransfer string `json:"approve_transfer"` + AcceptFoa string `json:"accept_foa"` + CanCancelTransfer bool `json:"can_cancel_transfer"` +} + +// RegistrantContact is the contact details for the domain registration. +type RegistrantContact struct { + ID string `json:"id"` + FirstName string `json:"first_name"` + LastName string `json:"last_name"` + Organization string `json:"organization"` + Address string `json:"address"` + Address2 string `json:"address2"` + City string `json:"city"` + State string `json:"state"` + Zip string `json:"zip"` + Country string `json:"country"` + Phone string `json:"phone"` + Email string `json:"email"` + Fax string `json:"fax"` +} + +// RegistrarDomainConfiguration is the structure for making updates to +// and existing domain. +type RegistrarDomainConfiguration struct { + NameServers []string `json:"name_servers"` + Privacy bool `json:"privacy"` + Locked bool `json:"locked"` + AutoRenew bool `json:"auto_renew"` +} + +// RegistrarDomainDetailResponse is the structure of the detailed +// response from the API for a single domain. +type RegistrarDomainDetailResponse struct { + Response + Result RegistrarDomain `json:"result"` +} + +// RegistrarDomainsDetailResponse is the structure of the detailed +// response from the API. +type RegistrarDomainsDetailResponse struct { + Response + Result []RegistrarDomain `json:"result"` +} + +// RegistrarDomain returns a single domain based on the account ID and +// domain name. +// +// API reference: https://api.cloudflare.com/#registrar-domains-get-domain +func (api *API) RegistrarDomain(ctx context.Context, accountID, domainName string) (RegistrarDomain, error) { + uri := fmt.Sprintf("/accounts/%s/registrar/domains/%s", accountID, domainName) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return RegistrarDomain{}, err + } + + var r RegistrarDomainDetailResponse + err = json.Unmarshal(res, &r) + if err != nil { + return RegistrarDomain{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// RegistrarDomains returns all registrar domains based on the account +// ID. +// +// API reference: https://api.cloudflare.com/#registrar-domains-list-domains +func (api *API) RegistrarDomains(ctx context.Context, accountID string) ([]RegistrarDomain, error) { + uri := fmt.Sprintf("/accounts/%s/registrar/domains", accountID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []RegistrarDomain{}, err + } + + var r RegistrarDomainsDetailResponse + err = json.Unmarshal(res, &r) + if err != nil { + return []RegistrarDomain{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// TransferRegistrarDomain initiates the transfer from another registrar +// to Cloudflare Registrar. +// +// API reference: https://api.cloudflare.com/#registrar-domains-transfer-domain +func (api *API) TransferRegistrarDomain(ctx context.Context, accountID, domainName string) ([]RegistrarDomain, error) { + uri := fmt.Sprintf("/accounts/%s/registrar/domains/%s/transfer", accountID, domainName) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, nil) + if err != nil { + return []RegistrarDomain{}, err + } + + var r RegistrarDomainsDetailResponse + err = json.Unmarshal(res, &r) + if err != nil { + return []RegistrarDomain{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// CancelRegistrarDomainTransfer cancels a pending domain transfer. +// +// API reference: https://api.cloudflare.com/#registrar-domains-cancel-transfer +func (api *API) CancelRegistrarDomainTransfer(ctx context.Context, accountID, domainName string) ([]RegistrarDomain, error) { + uri := fmt.Sprintf("/accounts/%s/registrar/domains/%s/cancel_transfer", accountID, domainName) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, nil) + if err != nil { + return []RegistrarDomain{}, err + } + + var r RegistrarDomainsDetailResponse + err = json.Unmarshal(res, &r) + if err != nil { + return []RegistrarDomain{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// UpdateRegistrarDomain updates an existing Registrar Domain configuration. +// +// API reference: https://api.cloudflare.com/#registrar-domains-update-domain +func (api *API) UpdateRegistrarDomain(ctx context.Context, accountID, domainName string, domainConfiguration RegistrarDomainConfiguration) (RegistrarDomain, error) { + uri := fmt.Sprintf("/accounts/%s/registrar/domains/%s", accountID, domainName) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, domainConfiguration) + if err != nil { + return RegistrarDomain{}, err + } + + var r RegistrarDomainDetailResponse + err = json.Unmarshal(res, &r) + if err != nil { + return RegistrarDomain{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/resource.go b/vendor/github.com/cloudflare/cloudflare-go/resource.go new file mode 100644 index 0000000..d781975 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/resource.go @@ -0,0 +1,114 @@ +package cloudflare + +import "fmt" + +// RouteLevel holds the "level" where the resource resides. Commonly used in +// routing configurations or builders. +type RouteLevel string + +// ResourceType holds the type of the resource. This is similar to `RouteLevel` +// however this is the singular version of `RouteLevel` and isn't suitable for +// use in routing. +type ResourceType string + +const ( + user = "user" + zone = "zone" + account = "account" + + zones = zone + "s" + accounts = account + "s" + + AccountRouteLevel RouteLevel = accounts + ZoneRouteLevel RouteLevel = zones + UserRouteLevel RouteLevel = user + + AccountType ResourceType = account + ZoneType ResourceType = zone + UserType ResourceType = user +) + +// ResourceContainer defines an API resource you wish to target. Should not be +// used directly, use `UserIdentifier`, `ZoneIdentifier` and `AccountIdentifier` +// instead. +type ResourceContainer struct { + Level RouteLevel + Identifier string + Type ResourceType +} + +func (r RouteLevel) String() string { + switch r { + case AccountRouteLevel: + return accounts + case ZoneRouteLevel: + return zones + case UserRouteLevel: + return user + default: + return "unknown" + } +} + +func (r ResourceType) String() string { + switch r { + case AccountType: + return account + case ZoneType: + return zone + case UserType: + return user + default: + return "unknown" + } +} + +// Returns a URL fragment of the endpoint scoped by the container. +// +// For example, a zone identifier would have a fragment like "zones/foobar" while +// an account identifier would generate "accounts/foobar". +func (rc *ResourceContainer) URLFragment() string { + if rc.Level == "" { + return rc.Identifier + } + + if rc.Level == UserRouteLevel { + return user + } + + return fmt.Sprintf("%s/%s", rc.Level, rc.Identifier) +} + +// ResourceIdentifier returns a generic *ResourceContainer. +func ResourceIdentifier(id string) *ResourceContainer { + return &ResourceContainer{ + Identifier: id, + } +} + +// UserIdentifier returns a user level *ResourceContainer. +func UserIdentifier(id string) *ResourceContainer { + return &ResourceContainer{ + Level: UserRouteLevel, + Identifier: id, + Type: UserType, + } +} + +// ZoneIdentifier returns a zone level *ResourceContainer. +func ZoneIdentifier(id string) *ResourceContainer { + return &ResourceContainer{ + Level: ZoneRouteLevel, + Identifier: id, + Type: ZoneType, + } +} + +// AccountIdentifier returns an account level *ResourceContainer. +func AccountIdentifier(id string) *ResourceContainer { + return &ResourceContainer{ + Level: AccountRouteLevel, + Identifier: id, + Type: AccountType, + } +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/resource_group.go b/vendor/github.com/cloudflare/cloudflare-go/resource_group.go new file mode 100644 index 0000000..6d25ffb --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/resource_group.go @@ -0,0 +1,53 @@ +package cloudflare + +import "fmt" + +type ResourceGroup struct { + ID string `json:"id"` + Name string `json:"name"` + Meta map[string]string `json:"meta"` + Scope Scope `json:"scope"` +} + +type Scope struct { + Key string `json:"key"` + ScopeObjects []ScopeObject `json:"objects"` +} + +type ScopeObject struct { + Key string `json:"key"` +} + +// NewResourceGroupForZone takes an existing zone and provides a resource group +// to be used within a Policy that allows access to that zone. +func NewResourceGroupForZone(zone Zone) ResourceGroup { + return NewResourceGroup(fmt.Sprintf("com.cloudflare.api.account.zone.%s", zone.ID)) +} + +// NewResourceGroupForAccount takes an existing zone and provides a resource group +// to be used within a Policy that allows access to that account. +func NewResourceGroupForAccount(account Account) ResourceGroup { + return NewResourceGroup(fmt.Sprintf("com.cloudflare.api.account.%s", account.ID)) +} + +// NewResourceGroup takes a Cloudflare-formatted key (e.g. 'com.cloudflare.api.%s') and +// returns a resource group to be used within a Policy to allow access to that resource. +func NewResourceGroup(key string) ResourceGroup { + scope := Scope{ + Key: key, + ScopeObjects: []ScopeObject{ + { + Key: "*", + }, + }, + } + resourceGroup := ResourceGroup{ + ID: "", + Name: key, + Meta: map[string]string{ + "editable": "false", + }, + Scope: scope, + } + return resourceGroup +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/rulesets.go b/vendor/github.com/cloudflare/cloudflare-go/rulesets.go new file mode 100644 index 0000000..1573beb --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/rulesets.go @@ -0,0 +1,915 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "strings" + "time" + + "github.com/goccy/go-json" +) + +var ( + ErrMissingRulesetPhase = errors.New("missing required phase") +) + +const ( + RulesetKindCustom RulesetKind = "custom" + RulesetKindManaged RulesetKind = "managed" + RulesetKindRoot RulesetKind = "root" + RulesetKindZone RulesetKind = "zone" + + RulesetPhaseDDoSL4 RulesetPhase = "ddos_l4" + RulesetPhaseDDoSL7 RulesetPhase = "ddos_l7" + RulesetPhaseHTTPConfigSettings RulesetPhase = "http_config_settings" + RulesetPhaseHTTPCustomErrors RulesetPhase = "http_custom_errors" + RulesetPhaseHTTPLogCustomFields RulesetPhase = "http_log_custom_fields" + RulesetPhaseHTTPRatelimit RulesetPhase = "http_ratelimit" + RulesetPhaseHTTPRequestCacheSettings RulesetPhase = "http_request_cache_settings" + RulesetPhaseHTTPRequestDynamicRedirect RulesetPhase = "http_request_dynamic_redirect" //nolint:gosec + RulesetPhaseHTTPRequestFirewallCustom RulesetPhase = "http_request_firewall_custom" + RulesetPhaseHTTPRequestFirewallManaged RulesetPhase = "http_request_firewall_managed" + RulesetPhaseHTTPRequestLateTransform RulesetPhase = "http_request_late_transform" + RulesetPhaseHTTPRequestOrigin RulesetPhase = "http_request_origin" + RulesetPhaseHTTPRequestRedirect RulesetPhase = "http_request_redirect" + RulesetPhaseHTTPRequestSanitize RulesetPhase = "http_request_sanitize" + RulesetPhaseHTTPRequestSBFM RulesetPhase = "http_request_sbfm" + RulesetPhaseHTTPRequestTransform RulesetPhase = "http_request_transform" + RulesetPhaseHTTPResponseCompression RulesetPhase = "http_response_compression" + RulesetPhaseHTTPResponseFirewallManaged RulesetPhase = "http_response_firewall_managed" + RulesetPhaseHTTPResponseHeadersTransform RulesetPhase = "http_response_headers_transform" + RulesetPhaseMagicTransit RulesetPhase = "magic_transit" + + RulesetRuleActionBlock RulesetRuleAction = "block" + RulesetRuleActionChallenge RulesetRuleAction = "challenge" + RulesetRuleActionCompressResponse RulesetRuleAction = "compress_response" + RulesetRuleActionDDoSDynamic RulesetRuleAction = "ddos_dynamic" + RulesetRuleActionDDoSMitigation RulesetRuleAction = "ddos_mitigation" + RulesetRuleActionExecute RulesetRuleAction = "execute" + RulesetRuleActionForceConnectionClose RulesetRuleAction = "force_connection_close" + RulesetRuleActionJSChallenge RulesetRuleAction = "js_challenge" + RulesetRuleActionLog RulesetRuleAction = "log" + RulesetRuleActionLogCustomField RulesetRuleAction = "log_custom_field" + RulesetRuleActionManagedChallenge RulesetRuleAction = "managed_challenge" + RulesetRuleActionRedirect RulesetRuleAction = "redirect" + RulesetRuleActionRewrite RulesetRuleAction = "rewrite" + RulesetRuleActionRoute RulesetRuleAction = "route" + RulesetRuleActionScore RulesetRuleAction = "score" + RulesetRuleActionServeError RulesetRuleAction = "serve_error" + RulesetRuleActionSetCacheSettings RulesetRuleAction = "set_cache_settings" + RulesetRuleActionSetConfig RulesetRuleAction = "set_config" + RulesetRuleActionSkip RulesetRuleAction = "skip" + + RulesetActionParameterProductBIC RulesetActionParameterProduct = "bic" + RulesetActionParameterProductHOT RulesetActionParameterProduct = "hot" + RulesetActionParameterProductRateLimit RulesetActionParameterProduct = "ratelimit" + RulesetActionParameterProductSecurityLevel RulesetActionParameterProduct = "securityLevel" + RulesetActionParameterProductUABlock RulesetActionParameterProduct = "uablock" + RulesetActionParameterProductWAF RulesetActionParameterProduct = "waf" + RulesetActionParameterProductZoneLockdown RulesetActionParameterProduct = "zonelockdown" + + RulesetRuleActionParametersHTTPHeaderOperationRemove RulesetRuleActionParametersHTTPHeaderOperation = "remove" + RulesetRuleActionParametersHTTPHeaderOperationSet RulesetRuleActionParametersHTTPHeaderOperation = "set" + RulesetRuleActionParametersHTTPHeaderOperationAdd RulesetRuleActionParametersHTTPHeaderOperation = "add" +) + +// RulesetKindValues exposes all the available `RulesetKind` values as a slice +// of strings. +func RulesetKindValues() []string { + return []string{ + string(RulesetKindCustom), + string(RulesetKindManaged), + string(RulesetKindRoot), + string(RulesetKindZone), + } +} + +// RulesetPhaseValues exposes all the available `RulesetPhase` values as a slice +// of strings. +func RulesetPhaseValues() []string { + return []string{ + string(RulesetPhaseDDoSL4), + string(RulesetPhaseDDoSL7), + string(RulesetPhaseHTTPConfigSettings), + string(RulesetPhaseHTTPCustomErrors), + string(RulesetPhaseHTTPLogCustomFields), + string(RulesetPhaseHTTPRatelimit), + string(RulesetPhaseHTTPRequestCacheSettings), + string(RulesetPhaseHTTPRequestDynamicRedirect), + string(RulesetPhaseHTTPRequestFirewallCustom), + string(RulesetPhaseHTTPRequestFirewallManaged), + string(RulesetPhaseHTTPRequestLateTransform), + string(RulesetPhaseHTTPRequestOrigin), + string(RulesetPhaseHTTPRequestRedirect), + string(RulesetPhaseHTTPRequestSanitize), + string(RulesetPhaseHTTPRequestSBFM), + string(RulesetPhaseHTTPRequestTransform), + string(RulesetPhaseHTTPResponseCompression), + string(RulesetPhaseHTTPResponseFirewallManaged), + string(RulesetPhaseHTTPResponseHeadersTransform), + string(RulesetPhaseMagicTransit), + } +} + +// RulesetRuleActionValues exposes all the available `RulesetRuleAction` values +// as a slice of strings. +func RulesetRuleActionValues() []string { + return []string{ + string(RulesetRuleActionBlock), + string(RulesetRuleActionChallenge), + string(RulesetRuleActionCompressResponse), + string(RulesetRuleActionDDoSDynamic), + string(RulesetRuleActionDDoSMitigation), + string(RulesetRuleActionExecute), + string(RulesetRuleActionForceConnectionClose), + string(RulesetRuleActionJSChallenge), + string(RulesetRuleActionLog), + string(RulesetRuleActionLogCustomField), + string(RulesetRuleActionManagedChallenge), + string(RulesetRuleActionRedirect), + string(RulesetRuleActionRewrite), + string(RulesetRuleActionRoute), + string(RulesetRuleActionScore), + string(RulesetRuleActionServeError), + string(RulesetRuleActionSetCacheSettings), + string(RulesetRuleActionSetConfig), + string(RulesetRuleActionSkip), + } +} + +// RulesetActionParameterProductValues exposes all the available +// `RulesetActionParameterProduct` values as a slice of strings. +func RulesetActionParameterProductValues() []string { + return []string{ + string(RulesetActionParameterProductBIC), + string(RulesetActionParameterProductHOT), + string(RulesetActionParameterProductRateLimit), + string(RulesetActionParameterProductSecurityLevel), + string(RulesetActionParameterProductUABlock), + string(RulesetActionParameterProductWAF), + string(RulesetActionParameterProductZoneLockdown), + } +} + +func RulesetRuleActionParametersHTTPHeaderOperationValues() []string { + return []string{ + string(RulesetRuleActionParametersHTTPHeaderOperationRemove), + string(RulesetRuleActionParametersHTTPHeaderOperationSet), + string(RulesetRuleActionParametersHTTPHeaderOperationAdd), + } +} + +// RulesetRuleAction defines a custom type that is used to express allowed +// values for the rule action. +type RulesetRuleAction string + +// RulesetKind is the custom type for allowed variances of rulesets. +type RulesetKind string + +// RulesetPhase is the custom type for defining at what point the ruleset will +// be applied in the request pipeline. +type RulesetPhase string + +// RulesetActionParameterProduct is the custom type for defining what products +// can be used within the action parameters of a ruleset. +type RulesetActionParameterProduct string + +// RulesetRuleActionParametersHTTPHeaderOperation defines available options for +// HTTP header operations in actions. +type RulesetRuleActionParametersHTTPHeaderOperation string + +// Ruleset contains the structure of a Ruleset. Using `string` for Kind and +// Phase is a developer nicety to support downstream clients like Terraform who +// don't really have a strong and expansive type system. As always, the +// recommendation is to use the types provided where possible to avoid +// surprises. +type Ruleset struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Kind string `json:"kind,omitempty"` + Version *string `json:"version,omitempty"` + LastUpdated *time.Time `json:"last_updated,omitempty"` + Phase string `json:"phase,omitempty"` + Rules []RulesetRule `json:"rules"` + ShareableEntitlementName string `json:"shareable_entitlement_name,omitempty"` +} + +// RulesetActionParametersLogCustomField wraps an object that is part of +// request_fields, response_fields or cookie_fields. +type RulesetActionParametersLogCustomField struct { + Name string `json:"name,omitempty"` +} + +// RulesetRuleActionParameters specifies the action parameters for a Ruleset +// rule. +type RulesetRuleActionParameters struct { + ID string `json:"id,omitempty"` + Ruleset string `json:"ruleset,omitempty"` + Rulesets []string `json:"rulesets,omitempty"` + Rules map[string][]string `json:"rules,omitempty"` + Increment int `json:"increment,omitempty"` + URI *RulesetRuleActionParametersURI `json:"uri,omitempty"` + Headers map[string]RulesetRuleActionParametersHTTPHeader `json:"headers,omitempty"` + Products []string `json:"products,omitempty"` + Phases []string `json:"phases,omitempty"` + Overrides *RulesetRuleActionParametersOverrides `json:"overrides,omitempty"` + MatchedData *RulesetRuleActionParametersMatchedData `json:"matched_data,omitempty"` + Version *string `json:"version,omitempty"` + Response *RulesetRuleActionParametersBlockResponse `json:"response,omitempty"` + HostHeader string `json:"host_header,omitempty"` + Origin *RulesetRuleActionParametersOrigin `json:"origin,omitempty"` + SNI *RulesetRuleActionParametersSni `json:"sni,omitempty"` + RequestFields []RulesetActionParametersLogCustomField `json:"request_fields,omitempty"` + ResponseFields []RulesetActionParametersLogCustomField `json:"response_fields,omitempty"` + CookieFields []RulesetActionParametersLogCustomField `json:"cookie_fields,omitempty"` + Cache *bool `json:"cache,omitempty"` + AdditionalCacheablePorts []int `json:"additional_cacheable_ports,omitempty"` + EdgeTTL *RulesetRuleActionParametersEdgeTTL `json:"edge_ttl,omitempty"` + BrowserTTL *RulesetRuleActionParametersBrowserTTL `json:"browser_ttl,omitempty"` + ServeStale *RulesetRuleActionParametersServeStale `json:"serve_stale,omitempty"` + Content string `json:"content,omitempty"` + ContentType string `json:"content_type,omitempty"` + StatusCode uint16 `json:"status_code,omitempty"` + RespectStrongETags *bool `json:"respect_strong_etags,omitempty"` + CacheKey *RulesetRuleActionParametersCacheKey `json:"cache_key,omitempty"` + OriginCacheControl *bool `json:"origin_cache_control,omitempty"` + OriginErrorPagePassthru *bool `json:"origin_error_page_passthru,omitempty"` + CacheReserve *RulesetRuleActionParametersCacheReserve `json:"cache_reserve,omitempty"` + FromList *RulesetRuleActionParametersFromList `json:"from_list,omitempty"` + FromValue *RulesetRuleActionParametersFromValue `json:"from_value,omitempty"` + AutomaticHTTPSRewrites *bool `json:"automatic_https_rewrites,omitempty"` + AutoMinify *RulesetRuleActionParametersAutoMinify `json:"autominify,omitempty"` + BrowserIntegrityCheck *bool `json:"bic,omitempty"` + DisableApps *bool `json:"disable_apps,omitempty"` + DisableZaraz *bool `json:"disable_zaraz,omitempty"` + DisableRailgun *bool `json:"disable_railgun,omitempty"` + DisableRUM *bool `json:"disable_rum,omitempty"` + EmailObfuscation *bool `json:"email_obfuscation,omitempty"` + Fonts *bool `json:"fonts,omitempty"` + Mirage *bool `json:"mirage,omitempty"` + OpportunisticEncryption *bool `json:"opportunistic_encryption,omitempty"` + Polish *Polish `json:"polish,omitempty"` + ReadTimeout *uint `json:"read_timeout,omitempty"` + RocketLoader *bool `json:"rocket_loader,omitempty"` + SecurityLevel *SecurityLevel `json:"security_level,omitempty"` + ServerSideExcludes *bool `json:"server_side_excludes,omitempty"` + SSL *SSL `json:"ssl,omitempty"` + SXG *bool `json:"sxg,omitempty"` + HotLinkProtection *bool `json:"hotlink_protection,omitempty"` + Algorithms []RulesetRuleActionParametersCompressionAlgorithm `json:"algorithms,omitempty"` +} + +// RulesetRuleActionParametersFromList holds the FromList struct for +// actions which pull data from a list. +type RulesetRuleActionParametersFromList struct { + Name string `json:"name"` + Key string `json:"key"` +} + +type RulesetRuleActionParametersAutoMinify struct { + HTML bool `json:"html"` + CSS bool `json:"css"` + JS bool `json:"js"` +} + +type RulesetRuleActionParametersFromValue struct { + StatusCode uint16 `json:"status_code,omitempty"` + TargetURL RulesetRuleActionParametersTargetURL `json:"target_url"` + PreserveQueryString *bool `json:"preserve_query_string,omitempty"` +} + +type RulesetRuleActionParametersTargetURL struct { + Value string `json:"value,omitempty"` + Expression string `json:"expression,omitempty"` +} + +type RulesetRuleActionParametersEdgeTTL struct { + Mode string `json:"mode,omitempty"` + Default *uint `json:"default,omitempty"` + StatusCodeTTL []RulesetRuleActionParametersStatusCodeTTL `json:"status_code_ttl,omitempty"` +} + +type RulesetRuleActionParametersStatusCodeTTL struct { + StatusCodeRange *RulesetRuleActionParametersStatusCodeRange `json:"status_code_range,omitempty"` + StatusCodeValue *uint `json:"status_code,omitempty"` + Value *int `json:"value,omitempty"` +} + +type RulesetRuleActionParametersStatusCodeRange struct { + From *uint `json:"from,omitempty"` + To *uint `json:"to,omitempty"` +} + +type RulesetRuleActionParametersBrowserTTL struct { + Mode string `json:"mode"` + Default *uint `json:"default,omitempty"` +} + +type RulesetRuleActionParametersServeStale struct { + DisableStaleWhileUpdating *bool `json:"disable_stale_while_updating,omitempty"` +} + +type RulesetRuleActionParametersCacheKey struct { + CacheByDeviceType *bool `json:"cache_by_device_type,omitempty"` + IgnoreQueryStringsOrder *bool `json:"ignore_query_strings_order,omitempty"` + CacheDeceptionArmor *bool `json:"cache_deception_armor,omitempty"` + CustomKey *RulesetRuleActionParametersCustomKey `json:"custom_key,omitempty"` +} + +type RulesetRuleActionParametersCacheReserve struct { + Eligible *bool `json:"eligible,omitempty"` + MinimumFileSize *uint `json:"minimum_file_size,omitempty"` +} + +type RulesetRuleActionParametersCustomKey struct { + Query *RulesetRuleActionParametersCustomKeyQuery `json:"query_string,omitempty"` + Header *RulesetRuleActionParametersCustomKeyHeader `json:"header,omitempty"` + Cookie *RulesetRuleActionParametersCustomKeyCookie `json:"cookie,omitempty"` + User *RulesetRuleActionParametersCustomKeyUser `json:"user,omitempty"` + Host *RulesetRuleActionParametersCustomKeyHost `json:"host,omitempty"` +} + +type RulesetRuleActionParametersCustomKeyHeader struct { + RulesetRuleActionParametersCustomKeyFields + ExcludeOrigin *bool `json:"exclude_origin,omitempty"` + Contains map[string][]string `json:"contains,omitempty"` +} + +type RulesetRuleActionParametersCustomKeyCookie RulesetRuleActionParametersCustomKeyFields + +type RulesetRuleActionParametersCustomKeyFields struct { + Include []string `json:"include,omitempty"` + CheckPresence []string `json:"check_presence,omitempty"` +} + +type RulesetRuleActionParametersCustomKeyQuery struct { + Include *RulesetRuleActionParametersCustomKeyList `json:"include,omitempty"` + Exclude *RulesetRuleActionParametersCustomKeyList `json:"exclude,omitempty"` + Ignore *bool `json:"ignore,omitempty"` +} + +type RulesetRuleActionParametersCustomKeyList struct { + List []string + All bool +} + +func (s *RulesetRuleActionParametersCustomKeyList) UnmarshalJSON(data []byte) error { + var all string + if err := json.Unmarshal(data, &all); err == nil { + s.All = all == "*" + return nil + } + var list []string + if err := json.Unmarshal(data, &list); err == nil { + s.List = list + } + + return nil +} + +func (s RulesetRuleActionParametersCustomKeyList) MarshalJSON() ([]byte, error) { + if s.All { + return json.Marshal("*") + } + return json.Marshal(s.List) +} + +type RulesetRuleActionParametersCustomKeyUser struct { + DeviceType *bool `json:"device_type,omitempty"` + Geo *bool `json:"geo,omitempty"` + Lang *bool `json:"lang,omitempty"` +} + +type RulesetRuleActionParametersCustomKeyHost struct { + Resolved *bool `json:"resolved,omitempty"` +} + +// RulesetRuleActionParametersBlockResponse holds the BlockResponse struct +// for an action parameter. +type RulesetRuleActionParametersBlockResponse struct { + StatusCode uint16 `json:"status_code"` + ContentType string `json:"content_type"` + Content string `json:"content"` +} + +// RulesetRuleActionParametersURI holds the URI struct for an action parameter. +type RulesetRuleActionParametersURI struct { + Path *RulesetRuleActionParametersURIPath `json:"path,omitempty"` + Query *RulesetRuleActionParametersURIQuery `json:"query,omitempty"` + Origin *bool `json:"origin,omitempty"` +} + +// RulesetRuleActionParametersURIPath holds the path specific portion of a URI +// action parameter. +type RulesetRuleActionParametersURIPath struct { + Value string `json:"value,omitempty"` + Expression string `json:"expression,omitempty"` +} + +// RulesetRuleActionParametersURIQuery holds the query specific portion of a URI +// action parameter. +type RulesetRuleActionParametersURIQuery struct { + Value *string `json:"value,omitempty"` + Expression string `json:"expression,omitempty"` +} + +// RulesetRuleActionParametersHTTPHeader is the definition for define action +// parameters that involve HTTP headers. +type RulesetRuleActionParametersHTTPHeader struct { + Operation string `json:"operation,omitempty"` + Value string `json:"value,omitempty"` + Expression string `json:"expression,omitempty"` +} + +type RulesetRuleActionParametersOverrides struct { + Enabled *bool `json:"enabled,omitempty"` + Action string `json:"action,omitempty"` + SensitivityLevel string `json:"sensitivity_level,omitempty"` + Categories []RulesetRuleActionParametersCategories `json:"categories,omitempty"` + Rules []RulesetRuleActionParametersRules `json:"rules,omitempty"` +} + +type RulesetRuleActionParametersCategories struct { + Category string `json:"category"` + Action string `json:"action,omitempty"` + Enabled *bool `json:"enabled,omitempty"` +} + +type RulesetRuleActionParametersRules struct { + ID string `json:"id"` + Action string `json:"action,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + ScoreThreshold int `json:"score_threshold,omitempty"` + SensitivityLevel string `json:"sensitivity_level,omitempty"` +} + +// RulesetRuleActionParametersMatchedData holds the structure for WAF based +// payload logging. +type RulesetRuleActionParametersMatchedData struct { + PublicKey string `json:"public_key,omitempty"` +} + +// RulesetRuleActionParametersOrigin is the definition for route action +// parameters that involve origin overrides. +type RulesetRuleActionParametersOrigin struct { + Host string `json:"host,omitempty"` + Port uint16 `json:"port,omitempty"` +} + +// RulesetRuleActionParametersSni is the definition for the route action +// parameters that involve SNI overrides. +type RulesetRuleActionParametersSni struct { + Value string `json:"value"` +} + +// RulesetRuleActionParametersCompressionAlgorithm defines a compression +// algorithm for the compress_response action. +type RulesetRuleActionParametersCompressionAlgorithm struct { + Name string `json:"name"` +} + +type Polish int + +const ( + _ Polish = iota + PolishOff + PolishLossless + PolishLossy +) + +func (p Polish) MarshalJSON() ([]byte, error) { + return json.Marshal(p.String()) +} + +func (p Polish) String() string { + return [...]string{"off", "lossless", "lossy"}[p-1] +} + +func (p *Polish) UnmarshalJSON(data []byte) error { + var ( + s string + err error + ) + err = json.Unmarshal(data, &s) + if err != nil { + return err + } + v, err := PolishFromString(s) + if err != nil { + return err + } + *p = *v + return nil +} + +func PolishFromString(s string) (*Polish, error) { + s = strings.ToLower(s) + var v Polish + switch s { + case "off": + v = PolishOff + case "lossless": + v = PolishLossless + case "lossy": + v = PolishLossy + default: + return nil, fmt.Errorf("unknown variant for polish: %s", s) + } + return &v, nil +} + +func (p Polish) IntoRef() *Polish { + return &p +} + +type SecurityLevel int + +const ( + _ SecurityLevel = iota + SecurityLevelOff + SecurityLevelEssentiallyOff + SecurityLevelLow + SecurityLevelMedium + SecurityLevelHigh + SecurityLevelHelp +) + +func (p SecurityLevel) MarshalJSON() ([]byte, error) { + return json.Marshal(p.String()) +} + +func (p SecurityLevel) String() string { + return [...]string{"off", "essentially_off", "low", "medium", "high", "under_attack"}[p-1] +} + +func (p *SecurityLevel) UnmarshalJSON(data []byte) error { + var ( + s string + err error + ) + err = json.Unmarshal(data, &s) + if err != nil { + return err + } + v, err := SecurityLevelFromString(s) + if err != nil { + return err + } + *p = *v + return nil +} + +func SecurityLevelFromString(s string) (*SecurityLevel, error) { + s = strings.ToLower(s) + var v SecurityLevel + switch s { + case "off": + v = SecurityLevelOff + case "essentially_off": + v = SecurityLevelEssentiallyOff + case "low": + v = SecurityLevelLow + case "medium": + v = SecurityLevelMedium + case "high": + v = SecurityLevelHigh + case "under_attack": + v = SecurityLevelHelp + default: + return nil, fmt.Errorf("unknown variant for security_level: %s", s) + } + return &v, nil +} + +func (p SecurityLevel) IntoRef() *SecurityLevel { + return &p +} + +type SSL int + +const ( + _ SSL = iota + SSLOff + SSLFlexible + SSLFull + SSLStrict + SSLOriginPull +) + +func (p SSL) MarshalJSON() ([]byte, error) { + return json.Marshal(p.String()) +} + +func (p SSL) String() string { + return [...]string{"off", "flexible", "full", "strict", "origin_pull"}[p-1] +} + +func (p *SSL) UnmarshalJSON(data []byte) error { + var ( + s string + err error + ) + err = json.Unmarshal(data, &s) + if err != nil { + return err + } + v, err := SSLFromString(s) + if err != nil { + return err + } + *p = *v + return nil +} + +func SSLFromString(s string) (*SSL, error) { + s = strings.ToLower(s) + var v SSL + switch s { + case "off": + v = SSLOff + case "flexible": + v = SSLFlexible + case "full": + v = SSLFull + case "strict": + v = SSLStrict + case "origin_pull": + v = SSLOriginPull + default: + return nil, fmt.Errorf("unknown variant for ssl: %s", s) + } + return &v, nil +} + +func (p SSL) IntoRef() *SSL { + return &p +} + +// RulesetRule contains information about a single Ruleset Rule. +type RulesetRule struct { + ID string `json:"id,omitempty"` + Version *string `json:"version,omitempty"` + Action string `json:"action"` + ActionParameters *RulesetRuleActionParameters `json:"action_parameters,omitempty"` + Expression string `json:"expression"` + Description string `json:"description,omitempty"` + LastUpdated *time.Time `json:"last_updated,omitempty"` + Ref string `json:"ref,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + ScoreThreshold int `json:"score_threshold,omitempty"` + RateLimit *RulesetRuleRateLimit `json:"ratelimit,omitempty"` + ExposedCredentialCheck *RulesetRuleExposedCredentialCheck `json:"exposed_credential_check,omitempty"` + Logging *RulesetRuleLogging `json:"logging,omitempty"` +} + +// RulesetRuleRateLimit contains the structure of a HTTP rate limit Ruleset Rule. +type RulesetRuleRateLimit struct { + Characteristics []string `json:"characteristics,omitempty"` + RequestsPerPeriod int `json:"requests_per_period,omitempty"` + ScorePerPeriod int `json:"score_per_period,omitempty"` + ScoreResponseHeaderName string `json:"score_response_header_name,omitempty"` + Period int `json:"period,omitempty"` + MitigationTimeout int `json:"mitigation_timeout,omitempty"` + CountingExpression string `json:"counting_expression,omitempty"` + RequestsToOrigin bool `json:"requests_to_origin,omitempty"` +} + +// RulesetRuleExposedCredentialCheck contains the structure of an exposed +// credential check Ruleset Rule. +type RulesetRuleExposedCredentialCheck struct { + UsernameExpression string `json:"username_expression,omitempty"` + PasswordExpression string `json:"password_expression,omitempty"` +} + +// RulesetRuleLogging contains the logging configuration for the rule. +type RulesetRuleLogging struct { + Enabled *bool `json:"enabled,omitempty"` +} + +// UpdateRulesetRequest is the representation of a Ruleset update. +type UpdateRulesetRequest struct { + Description string `json:"description"` + Rules []RulesetRule `json:"rules"` +} + +// ListRulesetResponse contains all Rulesets. +type ListRulesetResponse struct { + Response + Result []Ruleset `json:"result"` +} + +// GetRulesetResponse contains a single Ruleset. +type GetRulesetResponse struct { + Response + Result Ruleset `json:"result"` +} + +// CreateRulesetResponse contains response data when creating a new Ruleset. +type CreateRulesetResponse struct { + Response + Result Ruleset `json:"result"` +} + +// UpdateRulesetResponse contains response data when updating an existing +// Ruleset. +type UpdateRulesetResponse struct { + Response + Result Ruleset `json:"result"` +} + +type ListRulesetsParams struct{} + +type CreateRulesetParams struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Kind string `json:"kind,omitempty"` + Phase string `json:"phase,omitempty"` + Rules []RulesetRule `json:"rules"` +} + +type UpdateRulesetParams struct { + ID string `json:"-"` + Description string `json:"description"` + Rules []RulesetRule `json:"rules"` +} + +type UpdateEntrypointRulesetParams struct { + Phase string `json:"-"` + Description string `json:"description,omitempty"` + Rules []RulesetRule `json:"rules"` +} + +type DeleteRulesetRuleParams struct { + RulesetID string `json:"-"` + RulesetRuleID string `json:"-"` +} + +// ListRulesets lists all Rulesets in a given zone or account depending on the +// ResourceContainer type provided. +// +// API reference: https://developers.cloudflare.com/api/operations/listAccountRulesets +// API reference: https://developers.cloudflare.com/api/operations/listZoneRulesets +func (api *API) ListRulesets(ctx context.Context, rc *ResourceContainer, params ListRulesetsParams) ([]Ruleset, error) { + uri := fmt.Sprintf("/%s/%s/rulesets", rc.Level, rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []Ruleset{}, err + } + + result := ListRulesetResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return []Ruleset{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// GetRuleset fetches a single ruleset. +// +// API reference: https://developers.cloudflare.com/api/operations/getAccountRuleset +// API reference: https://developers.cloudflare.com/api/operations/getZoneRuleset +func (api *API) GetRuleset(ctx context.Context, rc *ResourceContainer, rulesetID string) (Ruleset, error) { + uri := fmt.Sprintf("/%s/%s/rulesets/%s", rc.Level, rc.Identifier, rulesetID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return Ruleset{}, err + } + + result := GetRulesetResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return Ruleset{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// CreateRuleset creates a new ruleset. +// +// API reference: https://developers.cloudflare.com/api/operations/createAccountRuleset +// API reference: https://developers.cloudflare.com/api/operations/createZoneRuleset +func (api *API) CreateRuleset(ctx context.Context, rc *ResourceContainer, params CreateRulesetParams) (Ruleset, error) { + uri := fmt.Sprintf("/%s/%s/rulesets", rc.Level, rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return Ruleset{}, err + } + + result := CreateRulesetResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return Ruleset{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// DeleteRuleset removes a ruleset based on the ruleset ID. +// +// API reference: https://developers.cloudflare.com/api/operations/deleteAccountRuleset +// API reference: https://developers.cloudflare.com/api/operations/deleteZoneRuleset +func (api *API) DeleteRuleset(ctx context.Context, rc *ResourceContainer, rulesetID string) error { + uri := fmt.Sprintf("/%s/%s/rulesets/%s", rc.Level, rc.Identifier, rulesetID) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + // The API is not implementing the standard response blob but returns an + // empty response (204) in case of a success. So we are checking for the + // response body size here. + if len(res) > 0 { + return fmt.Errorf(errMakeRequestError+": %w", errors.New(string(res))) + } + + return nil +} + +// UpdateRuleset updates a ruleset based on the ruleset ID. +// +// API reference: https://developers.cloudflare.com/api/operations/updateAccountRuleset +// API reference: https://developers.cloudflare.com/api/operations/updateZoneRuleset +func (api *API) UpdateRuleset(ctx context.Context, rc *ResourceContainer, params UpdateRulesetParams) (Ruleset, error) { + if params.ID == "" { + return Ruleset{}, ErrMissingResourceIdentifier + } + + uri := fmt.Sprintf("/%s/%s/rulesets/%s", rc.Level, rc.Identifier, params.ID) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return Ruleset{}, err + } + + result := UpdateRulesetResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return Ruleset{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// DeleteRulesetRule removes a ruleset rule based on the ruleset ID + +// ruleset rule ID. +// +// API reference: https://developers.cloudflare.com/api/operations/deleteZoneRulesetRule +func (api *API) DeleteRulesetRule(ctx context.Context, rc *ResourceContainer, params DeleteRulesetRuleParams) error { + uri := fmt.Sprintf("/%s/%s/rulesets/%s/rules/%s", rc.Level, rc.Identifier, params.RulesetID, params.RulesetRuleID) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + // The API is not implementing the standard response blob but returns an + // empty response (204) in case of a success. So we are checking for the + // response body size here. + if len(res) > 0 { + return fmt.Errorf(errMakeRequestError+": %w", errors.New(string(res))) + } + + return nil +} + +// GetEntrypointRuleset returns an entry point ruleset base on the phase. +// +// API reference: https://developers.cloudflare.com/api/operations/getAccountEntrypointRuleset +// API reference: https://developers.cloudflare.com/api/operations/getZoneEntrypointRuleset +func (api *API) GetEntrypointRuleset(ctx context.Context, rc *ResourceContainer, phase string) (Ruleset, error) { + uri := fmt.Sprintf("/%s/%s/rulesets/phases/%s/entrypoint", rc.Level, rc.Identifier, phase) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return Ruleset{}, err + } + + result := GetRulesetResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return Ruleset{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// UpdateEntrypointRuleset updates an entry point ruleset phase based on the +// phase. +// +// API reference: https://developers.cloudflare.com/api/operations/updateAccountEntrypointRuleset +// API reference: https://developers.cloudflare.com/api/operations/updateZoneEntrypointRuleset +func (api *API) UpdateEntrypointRuleset(ctx context.Context, rc *ResourceContainer, params UpdateEntrypointRulesetParams) (Ruleset, error) { + if params.Phase == "" { + return Ruleset{}, ErrMissingRulesetPhase + } + + uri := fmt.Sprintf("/%s/%s/rulesets/phases/%s/entrypoint", rc.Level, rc.Identifier, params.Phase) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return Ruleset{}, err + } + + result := GetRulesetResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return Ruleset{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/secondary_dns_primaries.go b/vendor/github.com/cloudflare/cloudflare-go/secondary_dns_primaries.go new file mode 100644 index 0000000..427a41a --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/secondary_dns_primaries.go @@ -0,0 +1,165 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +const ( + errSecondaryDNSInvalidPrimaryID = "secondary DNS primary ID is required" + errSecondaryDNSInvalidPrimaryIP = "secondary DNS primary IP invalid" + errSecondaryDNSInvalidPrimaryPort = "secondary DNS primary port invalid" +) + +// SecondaryDNSPrimary is the representation of the DNS Primary. +type SecondaryDNSPrimary struct { + ID string `json:"id,omitempty"` + IP string `json:"ip"` + Port int `json:"port"` + IxfrEnable bool `json:"ixfr_enable"` + TsigID string `json:"tsig_id"` + Name string `json:"name"` +} + +// SecondaryDNSPrimaryDetailResponse is the API representation of a single +// secondary DNS primary response. +type SecondaryDNSPrimaryDetailResponse struct { + Response + Result SecondaryDNSPrimary `json:"result"` +} + +// SecondaryDNSPrimaryListResponse is the API representation of all secondary DNS +// primaries. +type SecondaryDNSPrimaryListResponse struct { + Response + Result []SecondaryDNSPrimary `json:"result"` +} + +// GetSecondaryDNSPrimary returns a single secondary DNS primary. +// +// API reference: https://api.cloudflare.com/#secondary-dns-primary--primary-details +func (api *API) GetSecondaryDNSPrimary(ctx context.Context, accountID, primaryID string) (SecondaryDNSPrimary, error) { + uri := fmt.Sprintf("/accounts/%s/secondary_dns/primaries/%s", accountID, primaryID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return SecondaryDNSPrimary{}, err + } + + var r SecondaryDNSPrimaryDetailResponse + err = json.Unmarshal(res, &r) + if err != nil { + return SecondaryDNSPrimary{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// ListSecondaryDNSPrimaries returns all secondary DNS primaries for an account. +// +// API reference: https://api.cloudflare.com/#secondary-dns-primary--list-primaries +func (api *API) ListSecondaryDNSPrimaries(ctx context.Context, accountID string) ([]SecondaryDNSPrimary, error) { + uri := fmt.Sprintf("/accounts/%s/secondary_dns/primaries", accountID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []SecondaryDNSPrimary{}, err + } + + var r SecondaryDNSPrimaryListResponse + err = json.Unmarshal(res, &r) + if err != nil { + return []SecondaryDNSPrimary{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// CreateSecondaryDNSPrimary creates a secondary DNS primary. +// +// API reference: https://api.cloudflare.com/#secondary-dns-primary--create-primary +func (api *API) CreateSecondaryDNSPrimary(ctx context.Context, accountID string, primary SecondaryDNSPrimary) (SecondaryDNSPrimary, error) { + if err := validateRequiredSecondaryDNSPrimaries(primary); err != nil { + return SecondaryDNSPrimary{}, err + } + + uri := fmt.Sprintf("/accounts/%s/secondary_dns/primaries", accountID) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, SecondaryDNSPrimary{ + IP: primary.IP, + Port: primary.Port, + IxfrEnable: primary.IxfrEnable, + TsigID: primary.TsigID, + Name: primary.Name, + }) + if err != nil { + return SecondaryDNSPrimary{}, err + } + + var r SecondaryDNSPrimaryDetailResponse + err = json.Unmarshal(res, &r) + if err != nil { + return SecondaryDNSPrimary{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// UpdateSecondaryDNSPrimary creates a secondary DNS primary. +// +// API reference: https://api.cloudflare.com/#secondary-dns-primary--update-primary +func (api *API) UpdateSecondaryDNSPrimary(ctx context.Context, accountID string, primary SecondaryDNSPrimary) (SecondaryDNSPrimary, error) { + if primary.ID == "" { + return SecondaryDNSPrimary{}, errors.New(errSecondaryDNSInvalidPrimaryID) + } + + if err := validateRequiredSecondaryDNSPrimaries(primary); err != nil { + return SecondaryDNSPrimary{}, err + } + + uri := fmt.Sprintf("/accounts/%s/secondary_dns/primaries/%s", accountID, primary.ID) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, SecondaryDNSPrimary{ + IP: primary.IP, + Port: primary.Port, + IxfrEnable: primary.IxfrEnable, + TsigID: primary.TsigID, + Name: primary.Name, + }) + if err != nil { + return SecondaryDNSPrimary{}, err + } + + var r SecondaryDNSPrimaryDetailResponse + err = json.Unmarshal(res, &r) + if err != nil { + return SecondaryDNSPrimary{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// DeleteSecondaryDNSPrimary deletes a secondary DNS primary. +// +// API reference: https://api.cloudflare.com/#secondary-dns-primary--delete-primary +func (api *API) DeleteSecondaryDNSPrimary(ctx context.Context, accountID, primaryID string) error { + uri := fmt.Sprintf("/zones/%s/secondary_dns/primaries/%s", accountID, primaryID) + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + + if err != nil { + return err + } + + return nil +} + +func validateRequiredSecondaryDNSPrimaries(p SecondaryDNSPrimary) error { + if p.IP == "" { + return errors.New(errSecondaryDNSInvalidPrimaryIP) + } + + if p.Port == 0 { + return errors.New(errSecondaryDNSInvalidPrimaryPort) + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/secondary_dns_tsig.go b/vendor/github.com/cloudflare/cloudflare-go/secondary_dns_tsig.go new file mode 100644 index 0000000..07b76bc --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/secondary_dns_tsig.go @@ -0,0 +1,132 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +const ( + errSecondaryDNSTSIGMissingID = "secondary DNS TSIG ID is required" +) + +// SecondaryDNSTSIG contains the structure for a secondary DNS TSIG. +type SecondaryDNSTSIG struct { + ID string `json:"id,omitempty"` + Name string `json:"name"` + Secret string `json:"secret"` + Algo string `json:"algo"` +} + +// SecondaryDNSTSIGDetailResponse is the API response for a single secondary +// DNS TSIG. +type SecondaryDNSTSIGDetailResponse struct { + Response + Result SecondaryDNSTSIG `json:"result"` +} + +// SecondaryDNSTSIGListResponse is the API response for all secondary DNS TSIGs. +type SecondaryDNSTSIGListResponse struct { + Response + Result []SecondaryDNSTSIG `json:"result"` +} + +// GetSecondaryDNSTSIG gets a single account level TSIG for a secondary DNS +// configuration. +// +// API reference: https://api.cloudflare.com/#secondary-dns-tsig--tsig-details +func (api *API) GetSecondaryDNSTSIG(ctx context.Context, accountID, tsigID string) (SecondaryDNSTSIG, error) { + uri := fmt.Sprintf("/accounts/%s/secondary_dns/tsigs/%s", accountID, tsigID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return SecondaryDNSTSIG{}, err + } + + var r SecondaryDNSTSIGDetailResponse + err = json.Unmarshal(res, &r) + if err != nil { + return SecondaryDNSTSIG{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// ListSecondaryDNSTSIGs gets all account level TSIG for a secondary DNS +// configuration. +// +// API reference: https://api.cloudflare.com/#secondary-dns-tsig--list-tsigs +func (api *API) ListSecondaryDNSTSIGs(ctx context.Context, accountID string) ([]SecondaryDNSTSIG, error) { + uri := fmt.Sprintf("/accounts/%s/secondary_dns/tsigs", accountID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []SecondaryDNSTSIG{}, err + } + + var r SecondaryDNSTSIGListResponse + err = json.Unmarshal(res, &r) + if err != nil { + return []SecondaryDNSTSIG{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// CreateSecondaryDNSTSIG creates a secondary DNS TSIG at the account level. +// +// API reference: https://api.cloudflare.com/#secondary-dns-tsig--create-tsig +func (api *API) CreateSecondaryDNSTSIG(ctx context.Context, accountID string, tsig SecondaryDNSTSIG) (SecondaryDNSTSIG, error) { + uri := fmt.Sprintf("/accounts/%s/secondary_dns/tsigs", accountID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, tsig) + + if err != nil { + return SecondaryDNSTSIG{}, err + } + + result := SecondaryDNSTSIGDetailResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return SecondaryDNSTSIG{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// UpdateSecondaryDNSTSIG updates an existing secondary DNS TSIG at +// the account level. +// +// API reference: https://api.cloudflare.com/#secondary-dns-tsig--update-tsig +func (api *API) UpdateSecondaryDNSTSIG(ctx context.Context, accountID string, tsig SecondaryDNSTSIG) (SecondaryDNSTSIG, error) { + if tsig.ID == "" { + return SecondaryDNSTSIG{}, errors.New(errSecondaryDNSTSIGMissingID) + } + + uri := fmt.Sprintf("/accounts/%s/secondary_dns/tsigs/%s", accountID, tsig.ID) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, tsig) + + if err != nil { + return SecondaryDNSTSIG{}, err + } + + result := SecondaryDNSTSIGDetailResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return SecondaryDNSTSIG{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// DeleteSecondaryDNSTSIG deletes a secondary DNS TSIG. +// +// API reference: https://api.cloudflare.com/#secondary-dns-tsig--delete-tsig +func (api *API) DeleteSecondaryDNSTSIG(ctx context.Context, accountID, tsigID string) error { + uri := fmt.Sprintf("/accounts/%s/secondary_dns/tsigs/%s", accountID, tsigID) + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/secondary_dns_zone.go b/vendor/github.com/cloudflare/cloudflare-go/secondary_dns_zone.go new file mode 100644 index 0000000..2371366 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/secondary_dns_zone.go @@ -0,0 +1,172 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +const ( + errSecondaryDNSInvalidAutoRefreshValue = "secondary DNS auto refresh value is invalid" + errSecondaryDNSInvalidZoneName = "secondary DNS zone name is invalid" + errSecondaryDNSInvalidPrimaries = "secondary DNS primaries value is invalid" +) + +// SecondaryDNSZone contains the high level structure of a secondary DNS zone. +type SecondaryDNSZone struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Primaries []string `json:"primaries,omitempty"` + AutoRefreshSeconds int `json:"auto_refresh_seconds,omitempty"` + SoaSerial int `json:"soa_serial,omitempty"` + CreatedTime time.Time `json:"created_time,omitempty"` + CheckedTime time.Time `json:"checked_time,omitempty"` + ModifiedTime time.Time `json:"modified_time,omitempty"` +} + +// SecondaryDNSZoneDetailResponse is the API response for a single secondary +// DNS zone. +type SecondaryDNSZoneDetailResponse struct { + Response + Result SecondaryDNSZone `json:"result"` +} + +// SecondaryDNSZoneAXFRResponse is the API response for a single secondary +// DNS AXFR response. +type SecondaryDNSZoneAXFRResponse struct { + Response + Result string `json:"result"` +} + +// GetSecondaryDNSZone returns the secondary DNS zone configuration for a +// single zone. +// +// API reference: https://api.cloudflare.com/#secondary-dns-secondary-zone-configuration-details +func (api *API) GetSecondaryDNSZone(ctx context.Context, zoneID string) (SecondaryDNSZone, error) { + uri := fmt.Sprintf("/zones/%s/secondary_dns", zoneID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return SecondaryDNSZone{}, err + } + + var r SecondaryDNSZoneDetailResponse + err = json.Unmarshal(res, &r) + if err != nil { + return SecondaryDNSZone{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// CreateSecondaryDNSZone creates a secondary DNS zone. +// +// API reference: https://api.cloudflare.com/#secondary-dns-create-secondary-zone-configuration +func (api *API) CreateSecondaryDNSZone(ctx context.Context, zoneID string, zone SecondaryDNSZone) (SecondaryDNSZone, error) { + if err := validateRequiredSecondaryDNSZoneValues(zone); err != nil { + return SecondaryDNSZone{}, err + } + + uri := fmt.Sprintf("/zones/%s/secondary_dns", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, + SecondaryDNSZone{ + Name: zone.Name, + AutoRefreshSeconds: zone.AutoRefreshSeconds, + Primaries: zone.Primaries, + }, + ) + + if err != nil { + return SecondaryDNSZone{}, err + } + + result := SecondaryDNSZoneDetailResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return SecondaryDNSZone{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// UpdateSecondaryDNSZone updates an existing secondary DNS zone. +// +// API reference: https://api.cloudflare.com/#secondary-dns-update-secondary-zone-configuration +func (api *API) UpdateSecondaryDNSZone(ctx context.Context, zoneID string, zone SecondaryDNSZone) (SecondaryDNSZone, error) { + if err := validateRequiredSecondaryDNSZoneValues(zone); err != nil { + return SecondaryDNSZone{}, err + } + + uri := fmt.Sprintf("/zones/%s/secondary_dns", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, + SecondaryDNSZone{ + Name: zone.Name, + AutoRefreshSeconds: zone.AutoRefreshSeconds, + Primaries: zone.Primaries, + }, + ) + + if err != nil { + return SecondaryDNSZone{}, err + } + + result := SecondaryDNSZoneDetailResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return SecondaryDNSZone{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result, nil +} + +// DeleteSecondaryDNSZone deletes a secondary DNS zone. +// +// API reference: https://api.cloudflare.com/#secondary-dns-delete-secondary-zone-configuration +func (api *API) DeleteSecondaryDNSZone(ctx context.Context, zoneID string) error { + uri := fmt.Sprintf("/zones/%s/secondary_dns", zoneID) + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + + if err != nil { + return err + } + + return nil +} + +// ForceSecondaryDNSZoneAXFR requests an immediate AXFR request. +// +// API reference: https://api.cloudflare.com/#secondary-dns-update-secondary-zone-configuration +func (api *API) ForceSecondaryDNSZoneAXFR(ctx context.Context, zoneID string) error { + uri := fmt.Sprintf("/zones/%s/secondary_dns/force_axfr", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, nil) + + if err != nil { + return err + } + + result := SecondaryDNSZoneAXFRResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return nil +} + +// validateRequiredSecondaryDNSZoneValues ensures that the payload matches the +// API requirements for required fields. +func validateRequiredSecondaryDNSZoneValues(zone SecondaryDNSZone) error { + if zone.Name == "" { + return errors.New(errSecondaryDNSInvalidZoneName) + } + + if zone.AutoRefreshSeconds == 0 || zone.AutoRefreshSeconds < 0 { + return errors.New(errSecondaryDNSInvalidAutoRefreshValue) + } + + if len(zone.Primaries) == 0 { + return errors.New(errSecondaryDNSInvalidPrimaries) + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/spectrum.go b/vendor/github.com/cloudflare/cloudflare-go/spectrum.go new file mode 100644 index 0000000..2ebfb41 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/spectrum.go @@ -0,0 +1,368 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net" + "net/http" + "strconv" + "strings" + "time" + + "github.com/goccy/go-json" +) + +// ProxyProtocol implements json.Unmarshaler in order to support deserializing of the deprecated boolean +// value for `proxy_protocol`. +type ProxyProtocol string + +// UnmarshalJSON handles deserializing of both the deprecated boolean value and the current string value +// for the `proxy_protocol` field. +func (p *ProxyProtocol) UnmarshalJSON(data []byte) error { + var raw interface{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + + switch pp := raw.(type) { + case string: + *p = ProxyProtocol(pp) + case bool: + if pp { + *p = "v1" + } else { + *p = "off" + } + default: + return fmt.Errorf("invalid type for proxy_protocol field: %T", pp) + } + return nil +} + +// SpectrumApplicationOriginPort defines a union of a single port or range of ports. +type SpectrumApplicationOriginPort struct { + Port, Start, End uint16 +} + +// ErrOriginPortInvalid is a common error for failing to parse a single port or port range. +var ErrOriginPortInvalid = errors.New("invalid origin port") + +func (p *SpectrumApplicationOriginPort) parse(s string) error { + switch split := strings.Split(s, "-"); len(split) { + case 1: + i, err := strconv.ParseUint(split[0], 10, 16) + if err != nil { + return err + } + p.Port = uint16(i) + case 2: + start, err := strconv.ParseUint(split[0], 10, 16) + if err != nil { + return err + } + end, err := strconv.ParseUint(split[1], 10, 16) + if err != nil { + return err + } + if start >= end { + return ErrOriginPortInvalid + } + p.Start = uint16(start) + p.End = uint16(end) + default: + return ErrOriginPortInvalid + } + return nil +} + +// UnmarshalJSON converts a byte slice into a single port or port range. +func (p *SpectrumApplicationOriginPort) UnmarshalJSON(b []byte) error { + var port interface{} + if err := json.Unmarshal(b, &port); err != nil { + return err + } + + switch i := port.(type) { + case float64: + p.Port = uint16(i) + case string: + if err := p.parse(i); err != nil { + return err + } + } + + return nil +} + +// MarshalJSON converts a single port or port range to a suitable byte slice. +func (p *SpectrumApplicationOriginPort) MarshalJSON() ([]byte, error) { + if p.End > 0 { + return json.Marshal(fmt.Sprintf("%d-%d", p.Start, p.End)) + } + return json.Marshal(p.Port) +} + +// SpectrumApplication defines a single Spectrum Application. +type SpectrumApplication struct { + DNS SpectrumApplicationDNS `json:"dns,omitempty"` + OriginDirect []string `json:"origin_direct,omitempty"` + ID string `json:"id,omitempty"` + Protocol string `json:"protocol,omitempty"` + TrafficType string `json:"traffic_type,omitempty"` + TLS string `json:"tls,omitempty"` + ProxyProtocol ProxyProtocol `json:"proxy_protocol,omitempty"` + ModifiedOn *time.Time `json:"modified_on,omitempty"` + OriginDNS *SpectrumApplicationOriginDNS `json:"origin_dns,omitempty"` + OriginPort *SpectrumApplicationOriginPort `json:"origin_port,omitempty"` + CreatedOn *time.Time `json:"created_on,omitempty"` + EdgeIPs *SpectrumApplicationEdgeIPs `json:"edge_ips,omitempty"` + ArgoSmartRouting bool `json:"argo_smart_routing,omitempty"` + IPv4 bool `json:"ipv4,omitempty"` + IPFirewall bool `json:"ip_firewall,omitempty"` +} + +// UnmarshalJSON handles setting the `ProxyProtocol` field based on the value of the deprecated `spp` field. +func (a *SpectrumApplication) UnmarshalJSON(data []byte) error { + var body map[string]interface{} + if err := json.Unmarshal(data, &body); err != nil { + return err + } + + var app spectrumApplicationRaw + if err := json.Unmarshal(data, &app); err != nil { + return err + } + + if spp, ok := body["spp"]; ok && spp.(bool) { + app.ProxyProtocol = "simple" + } + + *a = SpectrumApplication(app) + return nil +} + +// spectrumApplicationRaw is used to inspect an application body to support the deprecated boolean value for `spp`. +type spectrumApplicationRaw SpectrumApplication + +// SpectrumApplicationDNS holds the external DNS configuration for a Spectrum +// Application. +type SpectrumApplicationDNS struct { + Type string `json:"type"` + Name string `json:"name"` +} + +// SpectrumApplicationOriginDNS holds the origin DNS configuration for a Spectrum +// Application. +type SpectrumApplicationOriginDNS struct { + Name string `json:"name"` +} + +// SpectrumApplicationDetailResponse is the structure of the detailed response +// from the API. +type SpectrumApplicationDetailResponse struct { + Response + Result SpectrumApplication `json:"result"` +} + +// SpectrumApplicationsDetailResponse is the structure of the detailed response +// from the API. +type SpectrumApplicationsDetailResponse struct { + Response + Result []SpectrumApplication `json:"result"` +} + +// SpectrumApplicationEdgeIPs represents configuration for Bring-Your-Own-IP +// https://developers.cloudflare.com/spectrum/getting-started/byoip/ +type SpectrumApplicationEdgeIPs struct { + Type SpectrumApplicationEdgeType `json:"type"` + Connectivity *SpectrumApplicationConnectivity `json:"connectivity,omitempty"` + IPs []net.IP `json:"ips,omitempty"` +} + +// SpectrumApplicationEdgeType for possible Edge configurations. +type SpectrumApplicationEdgeType string + +const ( + // SpectrumEdgeTypeDynamic IP config. + SpectrumEdgeTypeDynamic SpectrumApplicationEdgeType = "dynamic" + // SpectrumEdgeTypeStatic IP config. + SpectrumEdgeTypeStatic SpectrumApplicationEdgeType = "static" +) + +// UnmarshalJSON function for SpectrumApplicationEdgeType enum. +func (t *SpectrumApplicationEdgeType) UnmarshalJSON(b []byte) error { + var s string + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + newEdgeType := SpectrumApplicationEdgeType(strings.ToLower(s)) + switch newEdgeType { + case SpectrumEdgeTypeDynamic, SpectrumEdgeTypeStatic: + *t = newEdgeType + return nil + } + + return errors.New(errUnmarshalError) +} + +func (t SpectrumApplicationEdgeType) String() string { + return string(t) +} + +// SpectrumApplicationConnectivity specifies IP address type on the edge configuration. +type SpectrumApplicationConnectivity string + +const ( + // SpectrumConnectivityAll specifies IPv4/6 edge IP. + SpectrumConnectivityAll SpectrumApplicationConnectivity = "all" + // SpectrumConnectivityIPv4 specifies IPv4 edge IP. + SpectrumConnectivityIPv4 SpectrumApplicationConnectivity = "ipv4" + // SpectrumConnectivityIPv6 specifies IPv6 edge IP. + SpectrumConnectivityIPv6 SpectrumApplicationConnectivity = "ipv6" + // SpectrumConnectivityStatic specifies static edge IP configuration. + SpectrumConnectivityStatic SpectrumApplicationConnectivity = "static" +) + +func (c SpectrumApplicationConnectivity) String() string { + return string(c) +} + +// UnmarshalJSON function for SpectrumApplicationConnectivity enum. +func (c *SpectrumApplicationConnectivity) UnmarshalJSON(b []byte) error { + var s string + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + newConnectivity := SpectrumApplicationConnectivity(strings.ToLower(s)) + if newConnectivity.Dynamic() { + *c = newConnectivity + return nil + } + + return errors.New(errUnmarshalError) +} + +// Dynamic checks if address family is specified as dynamic config. +func (c SpectrumApplicationConnectivity) Dynamic() bool { + switch c { + case SpectrumConnectivityAll, SpectrumConnectivityIPv4, SpectrumConnectivityIPv6: + return true + } + return false +} + +// Static checks if address family is specified as static config. +func (c SpectrumApplicationConnectivity) Static() bool { + return c == SpectrumConnectivityStatic +} + +// SpectrumApplications fetches all of the Spectrum applications for a zone. +// +// API reference: https://developers.cloudflare.com/spectrum/api-reference/#list-spectrum-applications +func (api *API) SpectrumApplications(ctx context.Context, zoneID string) ([]SpectrumApplication, error) { + uri := fmt.Sprintf("/zones/%s/spectrum/apps", zoneID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []SpectrumApplication{}, err + } + + var spectrumApplications SpectrumApplicationsDetailResponse + err = json.Unmarshal(res, &spectrumApplications) + if err != nil { + return []SpectrumApplication{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return spectrumApplications.Result, nil +} + +// SpectrumApplication fetches a single Spectrum application based on the ID. +// +// API reference: https://developers.cloudflare.com/spectrum/api-reference/#list-spectrum-applications +func (api *API) SpectrumApplication(ctx context.Context, zoneID string, applicationID string) (SpectrumApplication, error) { + uri := fmt.Sprintf( + "/zones/%s/spectrum/apps/%s", + zoneID, + applicationID, + ) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return SpectrumApplication{}, err + } + + var spectrumApplication SpectrumApplicationDetailResponse + err = json.Unmarshal(res, &spectrumApplication) + if err != nil { + return SpectrumApplication{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return spectrumApplication.Result, nil +} + +// CreateSpectrumApplication creates a new Spectrum application. +// +// API reference: https://developers.cloudflare.com/spectrum/api-reference/#create-a-spectrum-application +func (api *API) CreateSpectrumApplication(ctx context.Context, zoneID string, appDetails SpectrumApplication) (SpectrumApplication, error) { + uri := fmt.Sprintf("/zones/%s/spectrum/apps", zoneID) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, appDetails) + if err != nil { + return SpectrumApplication{}, err + } + + var spectrumApplication SpectrumApplicationDetailResponse + err = json.Unmarshal(res, &spectrumApplication) + if err != nil { + return SpectrumApplication{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return spectrumApplication.Result, nil +} + +// UpdateSpectrumApplication updates an existing Spectrum application. +// +// API reference: https://developers.cloudflare.com/spectrum/api-reference/#update-a-spectrum-application +func (api *API) UpdateSpectrumApplication(ctx context.Context, zoneID, appID string, appDetails SpectrumApplication) (SpectrumApplication, error) { + uri := fmt.Sprintf( + "/zones/%s/spectrum/apps/%s", + zoneID, + appID, + ) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, appDetails) + if err != nil { + return SpectrumApplication{}, err + } + + var spectrumApplication SpectrumApplicationDetailResponse + err = json.Unmarshal(res, &spectrumApplication) + if err != nil { + return SpectrumApplication{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return spectrumApplication.Result, nil +} + +// DeleteSpectrumApplication removes a Spectrum application based on the ID. +// +// API reference: https://developers.cloudflare.com/spectrum/api-reference/#delete-a-spectrum-application +func (api *API) DeleteSpectrumApplication(ctx context.Context, zoneID string, applicationID string) error { + uri := fmt.Sprintf( + "/zones/%s/spectrum/apps/%s", + zoneID, + applicationID, + ) + + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/split_tunnel.go b/vendor/github.com/cloudflare/cloudflare-go/split_tunnel.go new file mode 100644 index 0000000..3124aa7 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/split_tunnel.go @@ -0,0 +1,107 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +// SplitTunnelResponse represents the response from the get split +// tunnel endpoints. +type SplitTunnelResponse struct { + Response + Result []SplitTunnel `json:"result"` +} + +// SplitTunnel represents the individual tunnel struct. +type SplitTunnel struct { + Address string `json:"address,omitempty"` + Host string `json:"host,omitempty"` + Description string `json:"description,omitempty"` +} + +// ListSplitTunnel returns all include or exclude split tunnel within an account. +// +// API reference for include: https://api.cloudflare.com/#device-policy-get-split-tunnel-include-list +// API reference for exclude: https://api.cloudflare.com/#device-policy-get-split-tunnel-exclude-list +func (api *API) ListSplitTunnels(ctx context.Context, accountID string, mode string) ([]SplitTunnel, error) { + uri := fmt.Sprintf("/%s/%s/devices/policy/%s", AccountRouteRoot, accountID, mode) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []SplitTunnel{}, err + } + + var splitTunnelResponse SplitTunnelResponse + err = json.Unmarshal(res, &splitTunnelResponse) + if err != nil { + return []SplitTunnel{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return splitTunnelResponse.Result, nil +} + +// UpdateSplitTunnel updates the existing split tunnel policy. +// +// API reference for include: https://api.cloudflare.com/#device-policy-set-split-tunnel-include-list +// API reference for exclude: https://api.cloudflare.com/#device-policy-set-split-tunnel-exclude-list +func (api *API) UpdateSplitTunnel(ctx context.Context, accountID string, mode string, tunnels []SplitTunnel) ([]SplitTunnel, error) { + uri := fmt.Sprintf("/%s/%s/devices/policy/%s", AccountRouteRoot, accountID, mode) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, tunnels) + if err != nil { + return []SplitTunnel{}, err + } + + var splitTunnelResponse SplitTunnelResponse + err = json.Unmarshal(res, &splitTunnelResponse) + if err != nil { + return []SplitTunnel{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return splitTunnelResponse.Result, nil +} + +// ListSplitTunnelDeviceSettingsPolicy returns all include or exclude split tunnel within a device settings policy +// +// API reference for include: https://api.cloudflare.com/#device-policy-get-split-tunnel-include-list +// API reference for exclude: https://api.cloudflare.com/#device-policy-get-split-tunnel-exclude-list +func (api *API) ListSplitTunnelsDeviceSettingsPolicy(ctx context.Context, accountID, policyID string, mode string) ([]SplitTunnel, error) { + uri := fmt.Sprintf("/%s/%s/devices/policy/%s/%s", AccountRouteRoot, accountID, policyID, mode) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []SplitTunnel{}, err + } + + var splitTunnelResponse SplitTunnelResponse + err = json.Unmarshal(res, &splitTunnelResponse) + if err != nil { + return []SplitTunnel{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return splitTunnelResponse.Result, nil +} + +// UpdateSplitTunnelDeviceSettingsPolicy updates the existing split tunnel policy within a device settings policy +// +// API reference for include: https://api.cloudflare.com/#device-policy-set-split-tunnel-include-list +// API reference for exclude: https://api.cloudflare.com/#device-policy-set-split-tunnel-exclude-list +func (api *API) UpdateSplitTunnelDeviceSettingsPolicy(ctx context.Context, accountID, policyID string, mode string, tunnels []SplitTunnel) ([]SplitTunnel, error) { + uri := fmt.Sprintf("/%s/%s/devices/policy/%s/%s", AccountRouteRoot, accountID, policyID, mode) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, tunnels) + if err != nil { + return []SplitTunnel{}, err + } + + var splitTunnelResponse SplitTunnelResponse + err = json.Unmarshal(res, &splitTunnelResponse) + if err != nil { + return []SplitTunnel{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return splitTunnelResponse.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/ssl.go b/vendor/github.com/cloudflare/cloudflare-go/ssl.go new file mode 100644 index 0000000..3d1b5cf --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/ssl.go @@ -0,0 +1,173 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// ZoneCustomSSL represents custom SSL certificate metadata. +type ZoneCustomSSL struct { + ID string `json:"id"` + Hosts []string `json:"hosts"` + Issuer string `json:"issuer"` + Signature string `json:"signature"` + Status string `json:"status"` + BundleMethod string `json:"bundle_method"` + GeoRestrictions *ZoneCustomSSLGeoRestrictions `json:"geo_restrictions,omitempty"` + ZoneID string `json:"zone_id"` + UploadedOn time.Time `json:"uploaded_on"` + ModifiedOn time.Time `json:"modified_on"` + ExpiresOn time.Time `json:"expires_on"` + Priority int `json:"priority"` + KeylessServer KeylessSSL `json:"keyless_server"` +} + +// ZoneCustomSSLGeoRestrictions represents the parameter to create or update +// geographic restrictions on a custom ssl certificate. +type ZoneCustomSSLGeoRestrictions struct { + Label string `json:"label"` +} + +// zoneCustomSSLResponse represents the response from the zone SSL details endpoint. +type zoneCustomSSLResponse struct { + Response + Result ZoneCustomSSL `json:"result"` +} + +// zoneCustomSSLsResponse represents the response from the zone SSL list endpoint. +type zoneCustomSSLsResponse struct { + Response + Result []ZoneCustomSSL `json:"result"` +} + +// ZoneCustomSSLOptions represents the parameters to create or update an existing +// custom SSL configuration. +type ZoneCustomSSLOptions struct { + Certificate string `json:"certificate"` + PrivateKey string `json:"private_key"` + BundleMethod string `json:"bundle_method,omitempty"` + GeoRestrictions *ZoneCustomSSLGeoRestrictions `json:"geo_restrictions,omitempty"` + Type string `json:"type,omitempty"` +} + +// ZoneCustomSSLPriority represents a certificate's ID and priority. It is a +// subset of ZoneCustomSSL used for patch requests. +type ZoneCustomSSLPriority struct { + ID string `json:"ID"` + Priority int `json:"priority"` +} + +// CreateSSL allows you to add a custom SSL certificate to the given zone. +// +// API reference: https://api.cloudflare.com/#custom-ssl-for-a-zone-create-ssl-configuration +func (api *API) CreateSSL(ctx context.Context, zoneID string, options ZoneCustomSSLOptions) (ZoneCustomSSL, error) { + uri := fmt.Sprintf("/zones/%s/custom_certificates", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, options) + if err != nil { + return ZoneCustomSSL{}, err + } + var r zoneCustomSSLResponse + if err := json.Unmarshal(res, &r); err != nil { + return ZoneCustomSSL{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// ListSSL lists the custom certificates for the given zone. +// +// API reference: https://api.cloudflare.com/#custom-ssl-for-a-zone-list-ssl-configurations +func (api *API) ListSSL(ctx context.Context, zoneID string) ([]ZoneCustomSSL, error) { + uri := fmt.Sprintf("/zones/%s/custom_certificates", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + var r zoneCustomSSLsResponse + if err := json.Unmarshal(res, &r); err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// SSLDetails returns the configuration details for a custom SSL certificate. +// +// API reference: https://api.cloudflare.com/#custom-ssl-for-a-zone-ssl-configuration-details +func (api *API) SSLDetails(ctx context.Context, zoneID, certificateID string) (ZoneCustomSSL, error) { + uri := fmt.Sprintf("/zones/%s/custom_certificates/%s", zoneID, certificateID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return ZoneCustomSSL{}, err + } + var r zoneCustomSSLResponse + if err := json.Unmarshal(res, &r); err != nil { + return ZoneCustomSSL{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// UpdateSSL updates (replaces) a custom SSL certificate. +// +// API reference: https://api.cloudflare.com/#custom-ssl-for-a-zone-update-ssl-configuration +func (api *API) UpdateSSL(ctx context.Context, zoneID, certificateID string, options ZoneCustomSSLOptions) (ZoneCustomSSL, error) { + uri := fmt.Sprintf("/zones/%s/custom_certificates/%s", zoneID, certificateID) + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, options) + if err != nil { + return ZoneCustomSSL{}, err + } + var r zoneCustomSSLResponse + if err := json.Unmarshal(res, &r); err != nil { + return ZoneCustomSSL{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// ReprioritizeSSL allows you to change the priority (which is served for a given +// request) of custom SSL certificates associated with the given zone. +// +// API reference: https://api.cloudflare.com/#custom-ssl-for-a-zone-re-prioritize-ssl-certificates +func (api *API) ReprioritizeSSL(ctx context.Context, zoneID string, p []ZoneCustomSSLPriority) ([]ZoneCustomSSL, error) { + uri := fmt.Sprintf("/zones/%s/custom_certificates/prioritize", zoneID) + params := struct { + Certificates []ZoneCustomSSLPriority `json:"certificates"` + }{ + Certificates: p, + } + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return nil, err + } + var r zoneCustomSSLsResponse + if err := json.Unmarshal(res, &r); err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// DeleteSSL deletes a custom SSL certificate from the given zone. +// +// API reference: https://api.cloudflare.com/#custom-ssl-for-a-zone-delete-an-ssl-certificate +func (api *API) DeleteSSL(ctx context.Context, zoneID, certificateID string) error { + uri := fmt.Sprintf("/zones/%s/custom_certificates/%s", zoneID, certificateID) + if _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil); err != nil { + return err + } + return nil +} + +// SSLValidationRecord displays Domain Control Validation tokens. +type SSLValidationRecord struct { + CnameTarget string `json:"cname_target,omitempty"` + CnameName string `json:"cname,omitempty"` + + TxtName string `json:"txt_name,omitempty"` + TxtValue string `json:"txt_value,omitempty"` + + HTTPUrl string `json:"http_url,omitempty"` + HTTPBody string `json:"http_body,omitempty"` + + Emails []string `json:"emails,omitempty"` +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/stream.go b/vendor/github.com/cloudflare/cloudflare-go/stream.go new file mode 100644 index 0000000..3880649 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/stream.go @@ -0,0 +1,574 @@ +package cloudflare + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "mime/multipart" + "net/http" + "os" + "strconv" + "strings" + "time" + + "github.com/goccy/go-json" +) + +var ( + // ErrMissingUploadURL is for when a URL is required but missing. + ErrMissingUploadURL = errors.New("required url missing") + // ErrMissingMaxDuration is for when MaxDuration is required but missing. + ErrMissingMaxDuration = errors.New("required max duration missing") + // ErrMissingVideoID is for when VideoID is required but missing. + ErrMissingVideoID = errors.New("required video id missing") + // ErrMissingFilePath is for when FilePath is required but missing. + ErrMissingFilePath = errors.New("required file path missing") + // ErrMissingTusResumable is for when TusResumable is required but missing. + ErrMissingTusResumable = errors.New("required tus resumable missing") + // ErrInvalidTusResumable is for when TusResumable is invalid. + ErrInvalidTusResumable = errors.New("invalid tus resumable") + // ErrMarshallingTUSMetadata is for when TUS metadata cannot be marshalled. + ErrMarshallingTUSMetadata = errors.New("error marshalling TUS metadata") + // ErrMissingUploadLength is for when UploadLength is required but missing. + ErrMissingUploadLength = errors.New("required upload length missing") + // ErrInvalidStatusCode is for when the status code is invalid. + ErrInvalidStatusCode = errors.New("invalid status code") +) + +type TusProtocolVersion string + +const ( + TusProtocolVersion1_0_0 TusProtocolVersion = "1.0.0" +) + +// StreamVideo represents a stream video. +type StreamVideo struct { + AllowedOrigins []string `json:"allowedOrigins,omitempty"` + Created *time.Time `json:"created,omitempty"` + Duration float64 `json:"duration,omitempty"` + Input StreamVideoInput `json:"input,omitempty"` + MaxDurationSeconds int `json:"maxDurationSeconds,omitempty"` + Meta map[string]interface{} `json:"meta,omitempty"` + Modified *time.Time `json:"modified,omitempty"` + UploadExpiry *time.Time `json:"uploadExpiry,omitempty"` + Playback StreamVideoPlayback `json:"playback,omitempty"` + Preview string `json:"preview,omitempty"` + ReadyToStream bool `json:"readyToStream,omitempty"` + RequireSignedURLs bool `json:"requireSignedURLs,omitempty"` + Size int `json:"size,omitempty"` + Status StreamVideoStatus `json:"status,omitempty"` + Thumbnail string `json:"thumbnail,omitempty"` + ThumbnailTimestampPct float64 `json:"thumbnailTimestampPct,omitempty"` + UID string `json:"uid,omitempty"` + Creator string `json:"creator,omitempty"` + LiveInput string `json:"liveInput,omitempty"` + Uploaded *time.Time `json:"uploaded,omitempty"` + ScheduledDeletion *time.Time `json:"scheduledDeletion,omitempty"` + Watermark StreamVideoWatermark `json:"watermark,omitempty"` + NFT StreamVideoNFTParameters `json:"nft,omitempty"` +} + +// StreamVideoInput represents the video input values of a stream video. +type StreamVideoInput struct { + Height int `json:"height,omitempty"` + Width int `json:"width,omitempty"` +} + +// StreamVideoPlayback represents the playback URLs for a video. +type StreamVideoPlayback struct { + HLS string `json:"hls,omitempty"` + Dash string `json:"dash,omitempty"` +} + +// StreamVideoStatus represents the status of a stream video. +type StreamVideoStatus struct { + State string `json:"state,omitempty"` + PctComplete string `json:"pctComplete,omitempty"` + ErrorReasonCode string `json:"errorReasonCode,omitempty"` + ErrorReasonText string `json:"errorReasonText,omitempty"` +} + +// StreamVideoWatermark represents a watermark for a stream video. +type StreamVideoWatermark struct { + UID string `json:"uid,omitempty"` + Size int `json:"size,omitempty"` + Height int `json:"height,omitempty"` + Width int `json:"width,omitempty"` + Created *time.Time `json:"created,omitempty"` + DownloadedFrom string `json:"downloadedFrom,omitempty"` + Name string `json:"name,omitempty"` + Opacity float64 `json:"opacity,omitempty"` + Padding float64 `json:"padding,omitempty"` + Scale float64 `json:"scale,omitempty"` + Position string `json:"position,omitempty"` +} + +// StreamVideoNFTParameters represents a NFT for a stream video. +type StreamVideoNFTParameters struct { + AccountID string + VideoID string + Contract string `json:"contract,omitempty"` + Token int `json:"token,omitempty"` +} + +// StreamUploadFromURLParameters are the parameters used when uploading a video from URL. +type StreamUploadFromURLParameters struct { + AccountID string + VideoID string + URL string `json:"url"` + Creator string `json:"creator,omitempty"` + ThumbnailTimestampPct float64 `json:"thumbnailTimestampPct,omitempty"` + AllowedOrigins []string `json:"allowedOrigins,omitempty"` + RequireSignedURLs bool `json:"requireSignedURLs,omitempty"` + Watermark UploadVideoURLWatermark `json:"watermark,omitempty"` + Meta map[string]interface{} `json:"meta,omitempty"` + ScheduledDeletion *time.Time `json:"scheduledDeletion,omitempty"` +} + +// StreamCreateVideoParameters are parameters used when creating a video. +type StreamCreateVideoParameters struct { + AccountID string + MaxDurationSeconds int `json:"maxDurationSeconds,omitempty"` + Expiry *time.Time `json:"expiry,omitempty"` + Creator string `json:"creator,omitempty"` + ThumbnailTimestampPct float64 `json:"thumbnailTimestampPct,omitempty"` + AllowedOrigins []string `json:"allowedOrigins,omitempty"` + RequireSignedURLs bool `json:"requireSignedURLs,omitempty"` + Watermark UploadVideoURLWatermark `json:"watermark,omitempty"` + Meta map[string]interface{} `json:"meta,omitempty"` + ScheduledDeletion *time.Time `json:"scheduledDeletion,omitempty"` +} + +// UploadVideoURLWatermark represents UID of an existing watermark. +type UploadVideoURLWatermark struct { + UID string `json:"uid,omitempty"` +} + +// StreamVideoCreate represents parameters returned after creating a video. +type StreamVideoCreate struct { + UploadURL string `json:"uploadURL,omitempty"` + UID string `json:"uid,omitempty"` + Watermark StreamVideoWatermark `json:"watermark,omitempty"` + ScheduledDeletion *time.Time `json:"scheduledDeletion,omitempty"` +} + +// StreamParameters are the basic parameters needed. +type StreamParameters struct { + AccountID string + VideoID string +} + +// StreamUploadFileParameters are parameters needed for file upload of a video. +type StreamUploadFileParameters struct { + AccountID string + VideoID string + FilePath string + ScheduledDeletion *time.Time +} + +// StreamListParameters represents parameters used when listing stream videos. +type StreamListParameters struct { + AccountID string + VideoID string + After *time.Time `url:"after,omitempty"` + Before *time.Time `url:"before,omitempty"` + Creator string `url:"creator,omitempty"` + IncludeCounts bool `url:"include_counts,omitempty"` + Search string `url:"search,omitempty"` + Limit int `url:"limit,omitempty"` + Asc bool `url:"asc,omitempty"` + Status string `url:"status,omitempty"` +} + +// StreamSignedURLParameters represent parameters used when creating a signed URL. +type StreamSignedURLParameters struct { + AccountID string + VideoID string + ID string `json:"id,omitempty"` + PEM string `json:"pem,omitempty"` + EXP int `json:"exp,omitempty"` + NBF int `json:"nbf,omitempty"` + Downloadable bool `json:"downloadable,omitempty"` + AccessRules []StreamAccessRule `json:"accessRules,omitempty"` +} + +type StreamInitiateTUSUploadParameters struct { + DirectUserUpload bool `url:"direct_user,omitempty"` + TusResumable TusProtocolVersion `url:"-"` + UploadLength int64 `url:"-"` + UploadCreator string `url:"-"` + Metadata TUSUploadMetadata `url:"-"` +} + +type StreamInitiateTUSUploadResponse struct { + ResponseHeaders http.Header +} + +type TUSUploadMetadata struct { + Name string `json:"name,omitempty"` + MaxDurationSeconds int `json:"maxDurationSeconds,omitempty"` + RequireSignedURLs bool `json:"requiresignedurls,omitempty"` + AllowedOrigins string `json:"allowedorigins,omitempty"` + ThumbnailTimestampPct float64 `json:"thumbnailtimestamppct,omitempty"` + ScheduledDeletion *time.Time `json:"scheduledDeletion,omitempty"` + Expiry *time.Time `json:"expiry,omitempty"` + Watermark string `json:"watermark,omitempty"` +} + +func (t TUSUploadMetadata) ToTUSCsv() (string, error) { + var metadataValues []string + if t.Name != "" { + metadataValues = append(metadataValues, fmt.Sprintf("%s %s", "name", base64.StdEncoding.EncodeToString([]byte(t.Name)))) + } + if t.MaxDurationSeconds != 0 { + metadataValues = append(metadataValues, fmt.Sprintf("%s %s", "maxDurationSeconds", base64.StdEncoding.EncodeToString([]byte(strconv.Itoa(t.MaxDurationSeconds))))) + } + if t.RequireSignedURLs { + metadataValues = append(metadataValues, "requiresignedurls") + } + if t.AllowedOrigins != "" { + metadataValues = append(metadataValues, fmt.Sprintf("%s %s", "allowedorigins", base64.StdEncoding.EncodeToString([]byte(t.AllowedOrigins)))) + } + if t.ThumbnailTimestampPct != 0 { + metadataValues = append(metadataValues, fmt.Sprintf("%s %s", "thumbnailtimestamppct", base64.StdEncoding.EncodeToString([]byte(strconv.FormatFloat(t.ThumbnailTimestampPct, 'f', -1, 64))))) + } + if t.ScheduledDeletion != nil { + metadataValues = append(metadataValues, fmt.Sprintf("%s %s", "scheduledDeletion", base64.StdEncoding.EncodeToString([]byte(t.ScheduledDeletion.Format(time.RFC3339))))) + } + if t.Expiry != nil { + metadataValues = append(metadataValues, fmt.Sprintf("%s %s", "expiry", base64.StdEncoding.EncodeToString([]byte(t.Expiry.Format(time.RFC3339))))) + } + if t.Watermark != "" { + metadataValues = append(metadataValues, fmt.Sprintf("%s %s", "watermark", base64.StdEncoding.EncodeToString([]byte(t.Watermark)))) + } + + if len(metadataValues) > 0 { + return strings.Join(metadataValues, ","), nil + } + + return "", nil +} + +// StreamVideoResponse represents an API response of a stream video. +type StreamVideoResponse struct { + Response + Result StreamVideo `json:"result,omitempty"` +} + +// StreamVideoCreateResponse represents an API response of creating a stream video. +type StreamVideoCreateResponse struct { + Response + Result StreamVideoCreate `json:"result,omitempty"` +} + +// StreamListResponse represents the API response from a StreamListRequest. +type StreamListResponse struct { + Response + Result []StreamVideo `json:"result,omitempty"` + Total string `json:"total,omitempty"` + Range string `json:"range,omitempty"` +} + +// StreamSignedURLResponse represents an API response for a signed URL. +type StreamSignedURLResponse struct { + Response + Result struct { + Token string `json:"token,omitempty"` + } +} + +// StreamAccessRule represents the accessRules when creating a signed URL. +type StreamAccessRule struct { + Type string `json:"type"` + Country []string `json:"country,omitempty"` + Action string `json:"action"` + IP []string `json:"ip,omitempty"` +} + +// StreamUploadFromURL send a video URL to it will be downloaded and made available on Stream. +// +// API Reference: https://api.cloudflare.com/#stream-videos-upload-a-video-from-a-url +func (api *API) StreamUploadFromURL(ctx context.Context, params StreamUploadFromURLParameters) (StreamVideo, error) { + if params.AccountID == "" { + return StreamVideo{}, ErrMissingAccountID + } + + if params.URL == "" { + return StreamVideo{}, ErrMissingUploadURL + } + + uri := fmt.Sprintf("/accounts/%s/stream/copy", params.AccountID) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return StreamVideo{}, err + } + + var streamVideoResponse StreamVideoResponse + if err := json.Unmarshal(res, &streamVideoResponse); err != nil { + return StreamVideo{}, err + } + return streamVideoResponse.Result, nil +} + +// StreamUploadVideoFile uploads a video from a path to the file. +// +// API Reference: https://api.cloudflare.com/#stream-videos-upload-a-video-using-a-single-http-request +func (api *API) StreamUploadVideoFile(ctx context.Context, params StreamUploadFileParameters) (StreamVideo, error) { + if params.AccountID == "" { + return StreamVideo{}, ErrMissingAccountID + } + + if params.FilePath == "" { + return StreamVideo{}, ErrMissingFilePath + } + + uri := fmt.Sprintf("/accounts/%s/stream", params.AccountID) + + // Create new multipart writer + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + formFile, err := writer.CreateFormFile("file", params.FilePath) + if err != nil { + return StreamVideo{}, err + } + file, err := os.Open(params.FilePath) + if err != nil { + return StreamVideo{}, err + } + if _, err := io.Copy(formFile, file); err != nil { + return StreamVideo{}, err + } + if err := writer.Close(); err != nil { + return StreamVideo{}, err + } + + res, err := api.makeRequestContextWithHeaders(ctx, http.MethodPost, uri, body, http.Header{ + "Accept": []string{"application/json"}, + "Content-Type": []string{writer.FormDataContentType()}, + }) + if err != nil { + return StreamVideo{}, err + } + + var streamVideoResponse StreamVideoResponse + if err := json.Unmarshal(res, &streamVideoResponse); err != nil { + return StreamVideo{}, err + } + return streamVideoResponse.Result, nil +} + +// StreamCreateVideoDirectURL creates a video and returns an authenticated URL. +// +// API Reference: https://api.cloudflare.com/#stream-videos-create-a-video-and-get-authenticated-direct-upload-url +func (api *API) StreamCreateVideoDirectURL(ctx context.Context, params StreamCreateVideoParameters) (StreamVideoCreate, error) { + if params.AccountID == "" { + return StreamVideoCreate{}, ErrMissingAccountID + } + + if params.MaxDurationSeconds == 0 { + return StreamVideoCreate{}, ErrMissingMaxDuration + } + + uri := fmt.Sprintf("/accounts/%s/stream/direct_upload", params.AccountID) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return StreamVideoCreate{}, err + } + + var streamVideoCreateResponse StreamVideoCreateResponse + if err := json.Unmarshal(res, &streamVideoCreateResponse); err != nil { + return StreamVideoCreate{}, err + } + return streamVideoCreateResponse.Result, nil +} + +// StreamListVideos list videos currently in stream. +// +// API reference: https://api.cloudflare.com/#stream-videos-list-videos +func (api *API) StreamListVideos(ctx context.Context, params StreamListParameters) ([]StreamVideo, error) { + if params.AccountID == "" { + return []StreamVideo{}, ErrMissingAccountID + } + + uri := buildURI(fmt.Sprintf("/accounts/%s/stream", params.AccountID), params) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []StreamVideo{}, err + } + + var streamListResponse StreamListResponse + if err := json.Unmarshal(res, &streamListResponse); err != nil { + return []StreamVideo{}, err + } + return streamListResponse.Result, nil +} + +// StreamInitiateTUSVideoUpload generates a direct upload TUS url for a video. +// +// API Reference: https://developers.cloudflare.com/api/operations/stream-videos-initiate-video-uploads-using-tus +func (api *API) StreamInitiateTUSVideoUpload(ctx context.Context, rc *ResourceContainer, params StreamInitiateTUSUploadParameters) (StreamInitiateTUSUploadResponse, error) { + if rc.Level != AccountRouteLevel { + return StreamInitiateTUSUploadResponse{}, ErrRequiredAccountLevelResourceContainer + } + + headers := http.Header{} + if params.TusResumable == "" { + return StreamInitiateTUSUploadResponse{}, ErrMissingTusResumable + } else if params.TusResumable != TusProtocolVersion1_0_0 { + return StreamInitiateTUSUploadResponse{}, ErrInvalidTusResumable + } else { + headers.Set("Tus-Resumable", string(params.TusResumable)) + } + + if params.UploadLength == 0 { + return StreamInitiateTUSUploadResponse{}, ErrMissingUploadLength + } else { + headers.Set("Upload-Length", strconv.FormatInt(params.UploadLength, 10)) + } + + if params.UploadCreator != "" { + headers.Set("Upload-Creator", params.UploadCreator) + } + + metadataTusCsv, err := params.Metadata.ToTUSCsv() + if err != nil { + return StreamInitiateTUSUploadResponse{}, ErrMarshallingTUSMetadata + } + if metadataTusCsv != "" { + headers.Set("Upload-Metadata", metadataTusCsv) + } + + uri := buildURI(fmt.Sprintf("/accounts/%s/stream", rc.Identifier), params) + res, err := api.makeRequestWithAuthTypeAndHeadersComplete(ctx, http.MethodPost, uri, nil, api.authType, headers) + if err != nil { + return StreamInitiateTUSUploadResponse{}, err + } + + if res.StatusCode != http.StatusCreated { + return StreamInitiateTUSUploadResponse{}, ErrInvalidStatusCode + } + + return StreamInitiateTUSUploadResponse{ResponseHeaders: res.Headers}, nil +} + +// StreamGetVideo gets the details for a specific video. +// +// API Reference: https://api.cloudflare.com/#stream-videos-video-details +func (api *API) StreamGetVideo(ctx context.Context, options StreamParameters) (StreamVideo, error) { + if options.AccountID == "" { + return StreamVideo{}, ErrMissingAccountID + } + + if options.VideoID == "" { + return StreamVideo{}, ErrMissingVideoID + } + + uri := fmt.Sprintf("/accounts/%s/stream/%s", options.AccountID, options.VideoID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return StreamVideo{}, err + } + var streamVideoResponse StreamVideoResponse + if err := json.Unmarshal(res, &streamVideoResponse); err != nil { + return StreamVideo{}, err + } + return streamVideoResponse.Result, nil +} + +// StreamEmbedHTML gets an HTML fragment to embed on a web page. +// +// API Reference: https://api.cloudflare.com/#stream-videos-embed-code-html +func (api *API) StreamEmbedHTML(ctx context.Context, options StreamParameters) (string, error) { + if options.AccountID == "" { + return "", ErrMissingAccountID + } + + if options.VideoID == "" { + return "", ErrMissingVideoID + } + + uri := fmt.Sprintf("/accounts/%s/stream/%s/embed", options.AccountID, options.VideoID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + + if err != nil { + return "", err + } + return string(res), nil +} + +// StreamDeleteVideo deletes a video. +// +// API Reference: https://api.cloudflare.com/#stream-videos-delete-video +func (api *API) StreamDeleteVideo(ctx context.Context, options StreamParameters) error { + if options.AccountID == "" { + return ErrMissingAccountID + } + + if options.VideoID == "" { + return ErrMissingVideoID + } + + uri := fmt.Sprintf("/accounts/%s/stream/%s", options.AccountID, options.VideoID) + if _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil); err != nil { + return err + } + return nil +} + +// StreamAssociateNFT associates a video to a token and contract address. +// +// API Reference: https://api.cloudflare.com/#stream-videos-associate-video-to-an-nft +func (api *API) StreamAssociateNFT(ctx context.Context, options StreamVideoNFTParameters) (StreamVideo, error) { + if options.AccountID == "" { + return StreamVideo{}, ErrMissingAccountID + } + + if options.VideoID == "" { + return StreamVideo{}, ErrMissingVideoID + } + + uri := fmt.Sprintf("/accounts/%s/stream/%s", options.AccountID, options.VideoID) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, options) + if err != nil { + return StreamVideo{}, err + } + var streamVideoResponse StreamVideoResponse + if err := json.Unmarshal(res, &streamVideoResponse); err != nil { + return StreamVideo{}, err + } + return streamVideoResponse.Result, nil +} + +// StreamCreateSignedURL creates a signed URL token for a video. +// +// API Reference: https://api.cloudflare.com/#stream-videos-associate-video-to-an-nft +func (api *API) StreamCreateSignedURL(ctx context.Context, params StreamSignedURLParameters) (string, error) { + if params.AccountID == "" { + return "", ErrMissingAccountID + } + if params.VideoID == "" { + return "", ErrMissingVideoID + } + + uri := fmt.Sprintf("/accounts/%s/stream/%s/token", params.AccountID, params.VideoID) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + + if err != nil { + return "", err + } + var streamSignedResponse StreamSignedURLResponse + if err := json.Unmarshal(res, &streamSignedResponse); err != nil { + return "", err + } + return streamSignedResponse.Result.Token, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/teams_accounts.go b/vendor/github.com/cloudflare/cloudflare-go/teams_accounts.go new file mode 100644 index 0000000..91e1e6a --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/teams_accounts.go @@ -0,0 +1,344 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +type TeamsAccount struct { + GatewayTag string `json:"gateway_tag"` // Internal teams ID + ProviderName string `json:"provider_name"` // Auth provider + ID string `json:"id"` // cloudflare account ID +} + +// TeamsAccountResponse is the API response, containing information on teams +// account. +type TeamsAccountResponse struct { + Response + Result TeamsAccount `json:"result"` +} + +// TeamsConfigResponse is the API response, containing information on teams +// account config. +type TeamsConfigResponse struct { + Response + Result TeamsConfiguration `json:"result"` +} + +// TeamsConfiguration data model. +type TeamsConfiguration struct { + Settings TeamsAccountSettings `json:"settings"` + CreatedAt time.Time `json:"created_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` +} + +type TeamsAccountSettings struct { + Antivirus *TeamsAntivirus `json:"antivirus,omitempty"` + TLSDecrypt *TeamsTLSDecrypt `json:"tls_decrypt,omitempty"` + ActivityLog *TeamsActivityLog `json:"activity_log,omitempty"` + BlockPage *TeamsBlockPage `json:"block_page,omitempty"` + BrowserIsolation *BrowserIsolation `json:"browser_isolation,omitempty"` + FIPS *TeamsFIPS `json:"fips,omitempty"` + ProtocolDetection *TeamsProtocolDetection `json:"protocol_detection,omitempty"` + BodyScanning *TeamsBodyScanning `json:"body_scanning,omitempty"` + ExtendedEmailMatching *TeamsExtendedEmailMatching `json:"extended_email_matching,omitempty"` + CustomCertificate *TeamsCustomCertificate `json:"custom_certificate,omitempty"` + Certificate *TeamsCertificateSetting `json:"certificate,omitempty"` +} + +type BrowserIsolation struct { + UrlBrowserIsolationEnabled *bool `json:"url_browser_isolation_enabled,omitempty"` + NonIdentityEnabled *bool `json:"non_identity_enabled,omitempty"` +} + +type TeamsAntivirus struct { + EnabledDownloadPhase bool `json:"enabled_download_phase"` + EnabledUploadPhase bool `json:"enabled_upload_phase"` + FailClosed bool `json:"fail_closed"` + NotificationSettings *TeamsNotificationSettings `json:"notification_settings"` +} + +type TeamsFIPS struct { + TLS bool `json:"tls"` +} + +type TeamsTLSDecrypt struct { + Enabled bool `json:"enabled"` +} + +type TeamsProtocolDetection struct { + Enabled bool `json:"enabled"` +} + +type TeamsActivityLog struct { + Enabled bool `json:"enabled"` +} + +type TeamsBlockPage struct { + Enabled *bool `json:"enabled,omitempty"` + FooterText string `json:"footer_text,omitempty"` + HeaderText string `json:"header_text,omitempty"` + LogoPath string `json:"logo_path,omitempty"` + BackgroundColor string `json:"background_color,omitempty"` + Name string `json:"name,omitempty"` + MailtoAddress string `json:"mailto_address,omitempty"` + MailtoSubject string `json:"mailto_subject,omitempty"` + SuppressFooter *bool `json:"suppress_footer,omitempty"` +} + +type TeamsInspectionMode = string + +const ( + TeamsShallowInspectionMode TeamsInspectionMode = "shallow" + TeamsDeepInspectionMode TeamsInspectionMode = "deep" +) + +type TeamsBodyScanning struct { + InspectionMode TeamsInspectionMode `json:"inspection_mode,omitempty"` +} + +type TeamsExtendedEmailMatching struct { + Enabled *bool `json:"enabled,omitempty"` +} + +type TeamsCustomCertificate struct { + Enabled *bool `json:"enabled,omitempty"` + ID string `json:"id,omitempty"` + BindingStatus string `json:"binding_status,omitempty"` + QsPackId string `json:"qs_pack_id,omitempty"` + UpdatedAt *time.Time `json:"updated_at,omitempty"` +} + +type TeamsCertificateSetting struct { + ID string `json:"id"` +} + +type TeamsRuleType = string + +const ( + TeamsHttpRuleType TeamsRuleType = "http" + TeamsDnsRuleType TeamsRuleType = "dns" + TeamsL4RuleType TeamsRuleType = "l4" +) + +type TeamsAccountLoggingConfiguration struct { + LogAll bool `json:"log_all"` + LogBlocks bool `json:"log_blocks"` +} + +type TeamsLoggingSettings struct { + LoggingSettingsByRuleType map[TeamsRuleType]TeamsAccountLoggingConfiguration `json:"settings_by_rule_type"` + RedactPii bool `json:"redact_pii,omitempty"` +} + +type TeamsDeviceSettings struct { + GatewayProxyEnabled bool `json:"gateway_proxy_enabled"` + GatewayProxyUDPEnabled bool `json:"gateway_udp_proxy_enabled"` + RootCertificateInstallationEnabled bool `json:"root_certificate_installation_enabled"` + UseZTVirtualIP *bool `json:"use_zt_virtual_ip"` + DisableForTime int32 `json:"disable_for_time"` +} + +type TeamsDeviceSettingsResponse struct { + Response + Result TeamsDeviceSettings `json:"result"` +} + +type TeamsLoggingSettingsResponse struct { + Response + Result TeamsLoggingSettings `json:"result"` +} + +type TeamsConnectivitySettings struct { + ICMPProxyEnabled *bool `json:"icmp_proxy_enabled"` + OfframpWARPEnabled *bool `json:"offramp_warp_enabled"` +} + +type TeamsAccountConnectivitySettingsResponse struct { + Response + Result TeamsConnectivitySettings `json:"result"` +} + +// TeamsAccount returns teams account information with internal and external ID. +// +// API reference: TBA. +func (api *API) TeamsAccount(ctx context.Context, accountID string) (TeamsAccount, error) { + uri := fmt.Sprintf("/accounts/%s/gateway", accountID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return TeamsAccount{}, err + } + + var teamsAccountResponse TeamsAccountResponse + err = json.Unmarshal(res, &teamsAccountResponse) + if err != nil { + return TeamsAccount{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsAccountResponse.Result, nil +} + +// TeamsAccountConfiguration returns teams account configuration. +// +// API reference: TBA. +func (api *API) TeamsAccountConfiguration(ctx context.Context, accountID string) (TeamsConfiguration, error) { + uri := fmt.Sprintf("/accounts/%s/gateway/configuration", accountID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return TeamsConfiguration{}, err + } + + var teamsConfigResponse TeamsConfigResponse + err = json.Unmarshal(res, &teamsConfigResponse) + if err != nil { + return TeamsConfiguration{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsConfigResponse.Result, nil +} + +// TeamsAccountDeviceConfiguration returns teams account device configuration with udp status. +// +// API reference: TBA. +func (api *API) TeamsAccountDeviceConfiguration(ctx context.Context, accountID string) (TeamsDeviceSettings, error) { + uri := fmt.Sprintf("/accounts/%s/devices/settings", accountID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return TeamsDeviceSettings{}, err + } + + var teamsDeviceResponse TeamsDeviceSettingsResponse + err = json.Unmarshal(res, &teamsDeviceResponse) + if err != nil { + return TeamsDeviceSettings{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsDeviceResponse.Result, nil +} + +// TeamsAccountLoggingConfiguration returns teams account logging configuration. +// +// API reference: TBA. +func (api *API) TeamsAccountLoggingConfiguration(ctx context.Context, accountID string) (TeamsLoggingSettings, error) { + uri := fmt.Sprintf("/accounts/%s/gateway/logging", accountID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return TeamsLoggingSettings{}, err + } + + var teamsConfigResponse TeamsLoggingSettingsResponse + err = json.Unmarshal(res, &teamsConfigResponse) + if err != nil { + return TeamsLoggingSettings{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsConfigResponse.Result, nil +} + +// TeamsAccountConnectivityConfiguration returns zero trust account connectivity settings. +// +// API reference: https://developers.cloudflare.com/api/operations/zero-trust-accounts-get-connectivity-settings +func (api *API) TeamsAccountConnectivityConfiguration(ctx context.Context, accountID string) (TeamsConnectivitySettings, error) { + uri := fmt.Sprintf("/accounts/%s/zerotrust/connectivity_settings", accountID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return TeamsConnectivitySettings{}, err + } + + var teamsConnectivityResponse TeamsAccountConnectivitySettingsResponse + err = json.Unmarshal(res, &teamsConnectivityResponse) + if err != nil { + return TeamsConnectivitySettings{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsConnectivityResponse.Result, nil +} + +// TeamsAccountUpdateConfiguration updates a teams account configuration. +// +// API reference: TBA. +func (api *API) TeamsAccountUpdateConfiguration(ctx context.Context, accountID string, config TeamsConfiguration) (TeamsConfiguration, error) { + uri := fmt.Sprintf("/accounts/%s/gateway/configuration", accountID) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, config) + if err != nil { + return TeamsConfiguration{}, err + } + + var teamsConfigResponse TeamsConfigResponse + err = json.Unmarshal(res, &teamsConfigResponse) + if err != nil { + return TeamsConfiguration{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsConfigResponse.Result, nil +} + +// TeamsAccountUpdateLoggingConfiguration updates the log settings and returns new teams account logging configuration. +// +// API reference: TBA. +func (api *API) TeamsAccountUpdateLoggingConfiguration(ctx context.Context, accountID string, config TeamsLoggingSettings) (TeamsLoggingSettings, error) { + uri := fmt.Sprintf("/accounts/%s/gateway/logging", accountID) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, config) + if err != nil { + return TeamsLoggingSettings{}, err + } + + var teamsConfigResponse TeamsLoggingSettingsResponse + err = json.Unmarshal(res, &teamsConfigResponse) + if err != nil { + return TeamsLoggingSettings{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsConfigResponse.Result, nil +} + +// TeamsAccountDeviceUpdateConfiguration updates teams account device configuration including udp filtering status. +// +// API reference: TBA. +func (api *API) TeamsAccountDeviceUpdateConfiguration(ctx context.Context, accountID string, settings TeamsDeviceSettings) (TeamsDeviceSettings, error) { + uri := fmt.Sprintf("/accounts/%s/devices/settings", accountID) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, settings) + if err != nil { + return TeamsDeviceSettings{}, err + } + + var teamsDeviceResponse TeamsDeviceSettingsResponse + err = json.Unmarshal(res, &teamsDeviceResponse) + if err != nil { + return TeamsDeviceSettings{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsDeviceResponse.Result, nil +} + +// TeamsAccountConnectivityUpdateConfiguration updates zero trust account connectivity settings. +// +// API reference: https://developers.cloudflare.com/api/operations/zero-trust-accounts-patch-connectivity-settings +func (api *API) TeamsAccountConnectivityUpdateConfiguration(ctx context.Context, accountID string, settings TeamsConnectivitySettings) (TeamsConnectivitySettings, error) { + uri := fmt.Sprintf("/accounts/%s/zerotrust/connectivity_settings", accountID) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, settings) + if err != nil { + return TeamsConnectivitySettings{}, err + } + + var teamsConnectivityResponse TeamsAccountConnectivitySettingsResponse + err = json.Unmarshal(res, &teamsConnectivityResponse) + if err != nil { + return TeamsConnectivitySettings{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsConnectivityResponse.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/teams_audit_ssh_settings.go b/vendor/github.com/cloudflare/cloudflare-go/teams_audit_ssh_settings.go new file mode 100644 index 0000000..a4ec1e3 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/teams_audit_ssh_settings.go @@ -0,0 +1,86 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// TeamsList represents a Teams List. +type AuditSSHSettings struct { + PublicKey string `json:"public_key"` + SeedUUID string `json:"seed_id"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` +} + +type AuditSSHSettingsResponse struct { + Result AuditSSHSettings `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +type GetAuditSSHSettingsParams struct{} + +type UpdateAuditSSHSettingsParams struct { + PublicKey string `json:"public_key"` +} + +// GetAuditSSHSettings returns the accounts zt audit ssh settings. +// +// API reference: https://api.cloudflare.com/#zero-trust-get-audit-ssh-settings +func (api *API) GetAuditSSHSettings(ctx context.Context, rc *ResourceContainer, params GetAuditSSHSettingsParams) (AuditSSHSettings, ResultInfo, error) { + if rc.Level != AccountRouteLevel { + return AuditSSHSettings{}, ResultInfo{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + uri := fmt.Sprintf("/%s/%s/gateway/audit_ssh_settings", rc.Level, rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return AuditSSHSettings{}, ResultInfo{}, err + } + + var auditSSHSettingsResponse AuditSSHSettingsResponse + err = json.Unmarshal(res, &auditSSHSettingsResponse) + if err != nil { + return AuditSSHSettings{}, ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return auditSSHSettingsResponse.Result, auditSSHSettingsResponse.ResultInfo, nil +} + +// UpdateAuditSSHSettings updates an existing zt audit ssh setting. +// +// API reference: https://api.cloudflare.com/#zero-trust-update-audit-ssh-settings +func (api *API) UpdateAuditSSHSettings(ctx context.Context, rc *ResourceContainer, params UpdateAuditSSHSettingsParams) (AuditSSHSettings, error) { + if rc.Level != AccountRouteLevel { + return AuditSSHSettings{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + if rc.Identifier == "" { + return AuditSSHSettings{}, ErrMissingAccountID + } + + uri := fmt.Sprintf( + "/%s/%s/gateway/audit_ssh_settings", + rc.Level, + rc.Identifier, + ) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return AuditSSHSettings{}, err + } + + var auditSSHSettingsResponse AuditSSHSettingsResponse + err = json.Unmarshal(res, &auditSSHSettingsResponse) + if err != nil { + return AuditSSHSettings{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return auditSSHSettingsResponse.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/teams_certificates.go b/vendor/github.com/cloudflare/cloudflare-go/teams_certificates.go new file mode 100644 index 0000000..00e6f10 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/teams_certificates.go @@ -0,0 +1,158 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +type TeamsCertificate struct { + InUse *bool `json:"in_use"` + ID string `json:"id"` + BindingStatus string `json:"binding_status"` + QsPackId string `json:"qs_pack_id"` + Type string `json:"type"` + UpdatedAt *time.Time `json:"updated_at"` + UploadedOn *time.Time `json:"uploaded_on"` + CreatedAt *time.Time `json:"created_at"` + ExpiresOn *time.Time `json:"expires_on"` +} + +type TeamsCertificateCreateRequest struct { + ValidityPeriodDays int `json:"validity_period_days,omitempty"` +} + +const DEFAULT_VALIDITY_PERIOD_DAYS = 1826 + +// TeamsCertificateResponse is the API response, containing a single certificate. +type TeamsCertificateResponse struct { + Response + Result TeamsCertificate `json:"result"` +} + +// TeamsCertificatesResponse is the API response, containing an array of certificates. +type TeamsCertificatesResponse struct { + Response + Result []TeamsCertificate `json:"result"` +} + +// TeamsCertificates returns all certificates in an account +// +// API reference: https://developers.cloudflare.com/api/operations/zero-trust-certificates-list-zero-trust-certificates +func (api *API) TeamsCertificates(ctx context.Context, accountID string) ([]TeamsCertificate, error) { + uri := fmt.Sprintf("/accounts/%s/gateway/certificates", accountID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []TeamsCertificate{}, err + } + + var teamsCertificatesResponse TeamsCertificatesResponse + err = json.Unmarshal(res, &teamsCertificatesResponse) + if err != nil { + return []TeamsCertificate{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsCertificatesResponse.Result, nil +} + +// TeamsCertificate returns teams account certificate. +// +// API reference: https://developers.cloudflare.com/api/operations/zero-trust-certificates-zero-trust-certificate-details +func (api *API) TeamsCertificate(ctx context.Context, accountID string, certificateId string) (TeamsCertificate, error) { + uri := fmt.Sprintf("/accounts/%s/gateway/certificates/%s", accountID, certificateId) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return TeamsCertificate{}, err + } + + var teamsCertificateResponse TeamsCertificateResponse + err = json.Unmarshal(res, &teamsCertificateResponse) + if err != nil { + return TeamsCertificate{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsCertificateResponse.Result, nil +} + +// TeamsGenerateCertificate generates a new gateway managed certificate +// +// API reference: https://developers.cloudflare.com/api/operations/zero-trust-certificates-create-zero-trust-certificate +func (api *API) TeamsGenerateCertificate(ctx context.Context, accountID string, certificateRequest TeamsCertificateCreateRequest) (TeamsCertificate, error) { + uri := fmt.Sprintf("/accounts/%s/gateway/certificates", accountID) + + if certificateRequest.ValidityPeriodDays == 0 { + certificateRequest.ValidityPeriodDays = DEFAULT_VALIDITY_PERIOD_DAYS + } + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, certificateRequest) + if err != nil { + return TeamsCertificate{}, err + } + + var teamsCertResponse TeamsCertificateResponse + err = json.Unmarshal(res, &teamsCertResponse) + if err != nil { + return TeamsCertificate{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsCertResponse.Result, nil +} + +// TeamsActivateCertificate activates a certificate +// +// API reference: https://developers.cloudflare.com/api/operations/zero-trust-certificates-activate-zero-trust-certificate +func (api *API) TeamsActivateCertificate(ctx context.Context, accountID string, certificateId string) (TeamsCertificate, error) { + uri := fmt.Sprintf("/accounts/%s/gateway/certificates/%s/activate", accountID, certificateId) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, nil) + if err != nil { + return TeamsCertificate{}, err + } + + var teamsCertResponse TeamsCertificateResponse + err = json.Unmarshal(res, &teamsCertResponse) + if err != nil { + return TeamsCertificate{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsCertResponse.Result, nil +} + +// TeamsDectivateCertificate deactivates a certificate +// +// API reference: https://developers.cloudflare.com/api/operations/zero-trust-certificates-deactivate-zero-trust-certificate +func (api *API) TeamsDeactivateCertificate(ctx context.Context, accountID string, certificateId string) (TeamsCertificate, error) { + uri := fmt.Sprintf("/accounts/%s/gateway/certificates/%s/deactivate", accountID, certificateId) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, nil) + if err != nil { + return TeamsCertificate{}, err + } + + var teamsCertResponse TeamsCertificateResponse + err = json.Unmarshal(res, &teamsCertResponse) + if err != nil { + return TeamsCertificate{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsCertResponse.Result, nil +} + +// TeamsDeleteCertificate deletes a certificate. +// +// API reference: https://developers.cloudflare.com/api/operations/zero-trust-certificates-delete-zero-trust-certificate +func (api *API) TeamsDeleteCertificate(ctx context.Context, accountID string, certificateId string) error { + uri := fmt.Sprintf("/accounts/%s/gateway/certificates/%s", accountID, certificateId) + + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/teams_devices.go b/vendor/github.com/cloudflare/cloudflare-go/teams_devices.go new file mode 100644 index 0000000..96ec4d0 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/teams_devices.go @@ -0,0 +1,107 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +type TeamsDevicesList struct { + Response + Result []TeamsDeviceListItem `json:"result"` +} + +type TeamsDeviceDetail struct { + Response + Result TeamsDeviceListItem `json:"result"` +} + +type TeamsDeviceListItem struct { + User UserItem `json:"user,omitempty"` + ID string `json:"id,omitempty"` + Key string `json:"key,omitempty"` + DeviceType string `json:"device_type,omitempty"` + Name string `json:"name,omitempty"` + Model string `json:"model,omitempty"` + Manufacturer string `json:"manufacturer,omitempty"` + Deleted bool `json:"deleted,omitempty"` + Version string `json:"version,omitempty"` + SerialNumber string `json:"serial_number,omitempty"` + OSVersion string `json:"os_version,omitempty"` + OSDistroName string `json:"os_distro_name,omitempty"` + OsDistroRevision string `json:"os_distro_revision,omitempty"` + OSVersionExtra string `json:"os_version_extra,omitempty"` + MacAddress string `json:"mac_address,omitempty"` + IP string `json:"ip,omitempty"` + Created string `json:"created,omitempty"` + Updated string `json:"updated,omitempty"` + LastSeen string `json:"last_seen,omitempty"` + RevokedAt string `json:"revoked_at,omitempty"` +} + +type UserItem struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Email string `json:"email,omitempty"` +} + +// ListTeamsDevice returns all devices for a given account. +// +// API reference : https://api.cloudflare.com/#devices-list-devices +func (api *API) ListTeamsDevices(ctx context.Context, accountID string) ([]TeamsDeviceListItem, error) { + uri := fmt.Sprintf("/%s/%s/devices", AccountRouteRoot, accountID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []TeamsDeviceListItem{}, err + } + + var response TeamsDevicesList + err = json.Unmarshal(res, &response) + if err != nil { + return []TeamsDeviceListItem{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response.Result, nil +} + +// RevokeTeamsDevice revokes device with given identifiers. +// +// API reference : https://api.cloudflare.com/#devices-revoke-devices +func (api *API) RevokeTeamsDevices(ctx context.Context, accountID string, deviceIds []string) (Response, error) { + uri := fmt.Sprintf("/%s/%s/devices/revoke", AccountRouteRoot, accountID) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, deviceIds) + if err != nil { + return Response{}, err + } + + result := Response{} + if err := json.Unmarshal(res, &result); err != nil { + return result, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result, err +} + +// GetTeamsDeviceDetails gets device details. +// +// API reference : https://api.cloudflare.com/#devices-device-details +func (api *API) GetTeamsDeviceDetails(ctx context.Context, accountID string, deviceID string) (TeamsDeviceListItem, error) { + uri := fmt.Sprintf("/%s/%s/devices/%s", AccountRouteRoot, accountID, deviceID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return TeamsDeviceListItem{}, err + } + + var response TeamsDeviceDetail + err = json.Unmarshal(res, &response) + if err != nil { + return TeamsDeviceListItem{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/teams_list.go b/vendor/github.com/cloudflare/cloudflare-go/teams_list.go new file mode 100644 index 0000000..61f160c --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/teams_list.go @@ -0,0 +1,331 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +var ErrMissingListID = errors.New("required missing list ID") + +// TeamsList represents a Teams List. +type TeamsList struct { + ID string `json:"id,omitempty"` + Name string `json:"name"` + Type string `json:"type"` + Description string `json:"description,omitempty"` + Items []TeamsListItem `json:"items,omitempty"` + Count uint64 `json:"count,omitempty"` + CreatedAt *time.Time `json:"created_at,omitempty"` + UpdatedAt *time.Time `json:"updated_at,omitempty"` +} + +// TeamsListItem represents a single list item. +type TeamsListItem struct { + Value string `json:"value"` + Description string `json:"description,omitempty"` + CreatedAt *time.Time `json:"created_at,omitempty"` +} + +// PatchTeamsList represents a patch request for appending/removing list items. +type PatchTeamsList struct { + ID string `json:"id"` + Append []TeamsListItem `json:"append"` + Remove []string `json:"remove"` +} + +// TeamsListListResponse represents the response from the list +// teams lists endpoint. +type TeamsListListResponse struct { + Result []TeamsList `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +// TeamsListItemsListResponse represents the response from the list +// teams list items endpoint. +type TeamsListItemsListResponse struct { + Result []TeamsListItem `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +// TeamsListDetailResponse is the API response, containing a single +// teams list. +type TeamsListDetailResponse struct { + Response + Result TeamsList `json:"result"` +} + +type ListTeamsListItemsParams struct { + ListID string `url:"-"` + + ResultInfo +} + +type ListTeamListsParams struct{} + +type CreateTeamsListParams struct { + ID string `json:"id,omitempty"` + Name string `json:"name"` + Type string `json:"type"` + Description string `json:"description,omitempty"` + Items []TeamsListItem `json:"items,omitempty"` + Count uint64 `json:"count,omitempty"` + CreatedAt *time.Time `json:"created_at,omitempty"` + UpdatedAt *time.Time `json:"updated_at,omitempty"` +} + +type UpdateTeamsListParams struct { + ID string `json:"id,omitempty"` + Name string `json:"name"` + Type string `json:"type"` + Description string `json:"description,omitempty"` + Items []TeamsListItem `json:"items,omitempty"` + Count uint64 `json:"count,omitempty"` + CreatedAt *time.Time `json:"created_at,omitempty"` + UpdatedAt *time.Time `json:"updated_at,omitempty"` +} + +type PatchTeamsListParams struct { + ID string `json:"id"` + Append []TeamsListItem `json:"append"` + Remove []string `json:"remove"` +} + +// ListTeamsLists returns all lists within an account. +// +// API reference: https://api.cloudflare.com/#teams-lists-list-teams-lists +func (api *API) ListTeamsLists(ctx context.Context, rc *ResourceContainer, params ListTeamListsParams) ([]TeamsList, ResultInfo, error) { + uri := fmt.Sprintf("/%s/%s/gateway/lists", AccountRouteRoot, rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []TeamsList{}, ResultInfo{}, err + } + + var teamsListListResponse TeamsListListResponse + err = json.Unmarshal(res, &teamsListListResponse) + if err != nil { + return []TeamsList{}, ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsListListResponse.Result, teamsListListResponse.ResultInfo, nil +} + +// GetTeamsList returns a single list based on the list ID. +// +// API reference: https://api.cloudflare.com/#teams-lists-teams-list-details +func (api *API) GetTeamsList(ctx context.Context, rc *ResourceContainer, listID string) (TeamsList, error) { + uri := fmt.Sprintf( + "/%s/%s/gateway/lists/%s", + rc.Level, + rc.Identifier, + listID, + ) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return TeamsList{}, err + } + + var teamsListDetailResponse TeamsListDetailResponse + err = json.Unmarshal(res, &teamsListDetailResponse) + if err != nil { + return TeamsList{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsListDetailResponse.Result, nil +} + +// ListTeamsListItems returns all list items for a list. +// +// API reference: https://api.cloudflare.com/#teams-lists-teams-list-items +func (api *API) ListTeamsListItems(ctx context.Context, rc *ResourceContainer, params ListTeamsListItemsParams) ([]TeamsListItem, ResultInfo, error) { + if rc.Level != AccountRouteLevel { + return []TeamsListItem{}, ResultInfo{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + if rc.Identifier == "" { + return []TeamsListItem{}, ResultInfo{}, ErrMissingAccountID + } + + if params.ListID == "" { + return []TeamsListItem{}, ResultInfo{}, ErrMissingListID + } + + autoPaginate := true + if params.PerPage >= 1 || params.Page >= 1 { + autoPaginate = false + } + + if params.PerPage < 1 { + params.PerPage = 50 + } + + if params.Page < 1 { + params.Page = 1 + } + + var teamListItems []TeamsListItem + var lResponse TeamsListItemsListResponse + for { + lResponse = TeamsListItemsListResponse{} + uri := buildURI( + fmt.Sprintf("/%s/%s/gateway/lists/%s/items", rc.Level, rc.Identifier, params.ListID), + params, + ) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []TeamsListItem{}, ResultInfo{}, err + } + + err = json.Unmarshal(res, &lResponse) + if err != nil { + return []TeamsListItem{}, ResultInfo{}, fmt.Errorf("failed to unmarshal teams list JSON data: %w", err) + } + + teamListItems = append(teamListItems, lResponse.Result...) + params.ResultInfo = lResponse.ResultInfo.Next() + + if params.ResultInfo.Done() || !autoPaginate { + break + } + } + + return teamListItems, lResponse.ResultInfo, nil +} + +// CreateTeamsList creates a new teams list. +// +// API reference: https://api.cloudflare.com/#teams-lists-create-teams-list +func (api *API) CreateTeamsList(ctx context.Context, rc *ResourceContainer, params CreateTeamsListParams) (TeamsList, error) { + if rc.Level != AccountRouteLevel { + return TeamsList{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + if rc.Identifier == "" { + return TeamsList{}, ErrMissingAccountID + } + + uri := fmt.Sprintf("/%s/%s/gateway/lists", rc.Level, rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return TeamsList{}, err + } + + var teamsListDetailResponse TeamsListDetailResponse + err = json.Unmarshal(res, &teamsListDetailResponse) + if err != nil { + return TeamsList{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsListDetailResponse.Result, nil +} + +// UpdateTeamsList updates an existing teams list. +// +// API reference: https://api.cloudflare.com/#teams-lists-update-teams-list +func (api *API) UpdateTeamsList(ctx context.Context, rc *ResourceContainer, params UpdateTeamsListParams) (TeamsList, error) { + if rc.Level != AccountRouteLevel { + return TeamsList{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + if rc.Identifier == "" { + return TeamsList{}, ErrMissingAccountID + } + + if params.ID == "" { + return TeamsList{}, fmt.Errorf("teams list ID cannot be empty") + } + + uri := fmt.Sprintf( + "/%s/%s/gateway/lists/%s", + rc.Level, + rc.Identifier, + params.ID, + ) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return TeamsList{}, err + } + + var teamsListDetailResponse TeamsListDetailResponse + err = json.Unmarshal(res, &teamsListDetailResponse) + if err != nil { + return TeamsList{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsListDetailResponse.Result, nil +} + +// PatchTeamsList updates the items in an existing teams list. +// +// API reference: https://api.cloudflare.com/#teams-lists-patch-teams-list +func (api *API) PatchTeamsList(ctx context.Context, rc *ResourceContainer, listPatch PatchTeamsListParams) (TeamsList, error) { + if rc.Level != AccountRouteLevel { + return TeamsList{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + if rc.Identifier == "" { + return TeamsList{}, ErrMissingAccountID + } + + if listPatch.ID == "" { + return TeamsList{}, fmt.Errorf("teams list ID cannot be empty") + } + + uri := fmt.Sprintf( + "/%s/%s/gateway/lists/%s", + AccountRouteRoot, + rc.Identifier, + listPatch.ID, + ) + + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, listPatch) + if err != nil { + return TeamsList{}, err + } + + var teamsListDetailResponse TeamsListDetailResponse + err = json.Unmarshal(res, &teamsListDetailResponse) + if err != nil { + return TeamsList{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsListDetailResponse.Result, nil +} + +// DeleteTeamsList deletes a teams list. +// +// API reference: https://api.cloudflare.com/#teams-lists-delete-teams-list +func (api *API) DeleteTeamsList(ctx context.Context, rc *ResourceContainer, teamsListID string) error { + if rc.Level != AccountRouteLevel { + return fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + if rc.Identifier == "" { + return ErrMissingAccountID + } + + uri := fmt.Sprintf( + "/%s/%s/gateway/lists/%s", + AccountRouteRoot, + rc.Identifier, + teamsListID, + ) + + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/teams_locations.go b/vendor/github.com/cloudflare/cloudflare-go/teams_locations.go new file mode 100644 index 0000000..b6c5830 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/teams_locations.go @@ -0,0 +1,155 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +type TeamsLocationsListResponse struct { + Response + ResultInfo `json:"result_info"` + Result []TeamsLocation `json:"result"` +} + +type TeamsLocationDetailResponse struct { + Response + Result TeamsLocation `json:"result"` +} + +type TeamsLocationNetwork struct { + ID string `json:"id"` + Network string `json:"network"` +} + +type TeamsLocation struct { + ID string `json:"id"` + Name string `json:"name"` + Networks []TeamsLocationNetwork `json:"networks"` + PolicyIDs []string `json:"policy_ids"` + Ip string `json:"ip,omitempty"` + Subdomain string `json:"doh_subdomain"` + AnonymizedLogsEnabled bool `json:"anonymized_logs_enabled"` + IPv4Destination string `json:"ipv4_destination"` + ClientDefault bool `json:"client_default"` + ECSSupport *bool `json:"ecs_support,omitempty"` + + CreatedAt *time.Time `json:"created_at,omitempty"` + UpdatedAt *time.Time `json:"updated_at,omitempty"` +} + +// TeamsLocations returns all locations within an account. +// +// API reference: https://api.cloudflare.com/#teams-locations-list-teams-locations +func (api *API) TeamsLocations(ctx context.Context, accountID string) ([]TeamsLocation, ResultInfo, error) { + uri := fmt.Sprintf("/%s/%s/gateway/locations", AccountRouteRoot, accountID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []TeamsLocation{}, ResultInfo{}, err + } + + var teamsLocationsListResponse TeamsLocationsListResponse + err = json.Unmarshal(res, &teamsLocationsListResponse) + if err != nil { + return []TeamsLocation{}, ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsLocationsListResponse.Result, teamsLocationsListResponse.ResultInfo, nil +} + +// TeamsLocation returns a single location based on the ID. +// +// API reference: https://api.cloudflare.com/#teams-locations-teams-location-details +func (api *API) TeamsLocation(ctx context.Context, accountID, locationID string) (TeamsLocation, error) { + uri := fmt.Sprintf( + "/%s/%s/gateway/locations/%s", + AccountRouteRoot, + accountID, + locationID, + ) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return TeamsLocation{}, err + } + + var teamsLocationDetailResponse TeamsLocationDetailResponse + err = json.Unmarshal(res, &teamsLocationDetailResponse) + if err != nil { + return TeamsLocation{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsLocationDetailResponse.Result, nil +} + +// CreateTeamsLocation creates a new teams location. +// +// API reference: https://api.cloudflare.com/#teams-locations-create-teams-location +func (api *API) CreateTeamsLocation(ctx context.Context, accountID string, teamsLocation TeamsLocation) (TeamsLocation, error) { + uri := fmt.Sprintf("/%s/%s/gateway/locations", AccountRouteRoot, accountID) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, teamsLocation) + if err != nil { + return TeamsLocation{}, err + } + + var teamsLocationDetailResponse TeamsLocationDetailResponse + err = json.Unmarshal(res, &teamsLocationDetailResponse) + if err != nil { + return TeamsLocation{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsLocationDetailResponse.Result, nil +} + +// UpdateTeamsLocation updates an existing teams location. +// +// API reference: https://api.cloudflare.com/#teams-locations-update-teams-location +func (api *API) UpdateTeamsLocation(ctx context.Context, accountID string, teamsLocation TeamsLocation) (TeamsLocation, error) { + if teamsLocation.ID == "" { + return TeamsLocation{}, fmt.Errorf("teams location ID cannot be empty") + } + + uri := fmt.Sprintf( + "/%s/%s/gateway/locations/%s", + AccountRouteRoot, + accountID, + teamsLocation.ID, + ) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, teamsLocation) + if err != nil { + return TeamsLocation{}, err + } + + var teamsLocationDetailResponse TeamsLocationDetailResponse + err = json.Unmarshal(res, &teamsLocationDetailResponse) + if err != nil { + return TeamsLocation{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsLocationDetailResponse.Result, nil +} + +// DeleteTeamsLocation deletes a teams location. +// +// API reference: https://api.cloudflare.com/#teams-locations-delete-teams-location +func (api *API) DeleteTeamsLocation(ctx context.Context, accountID, teamsLocationID string) error { + uri := fmt.Sprintf( + "/%s/%s/gateway/locations/%s", + AccountRouteRoot, + accountID, + teamsLocationID, + ) + + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/teams_proxy_endpoints.go b/vendor/github.com/cloudflare/cloudflare-go/teams_proxy_endpoints.go new file mode 100644 index 0000000..8a658e2 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/teams_proxy_endpoints.go @@ -0,0 +1,137 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +type TeamsProxyEndpointListResponse struct { + Response + ResultInfo `json:"result_info"` + Result []TeamsProxyEndpoint `json:"result"` +} +type TeamsProxyEndpointDetailResponse struct { + Response + Result TeamsProxyEndpoint `json:"result"` +} + +type TeamsProxyEndpoint struct { + ID string `json:"id"` + Name string `json:"name"` + IPs []string `json:"ips"` + Subdomain string `json:"subdomain"` + CreatedAt *time.Time `json:"created_at,omitempty"` + UpdatedAt *time.Time `json:"updated_at,omitempty"` +} + +// TeamsProxyEndpoint returns a single proxy endpoints within an account. +// +// API reference: https://api.cloudflare.com/#zero-trust-gateway-proxy-endpoints-proxy-endpoint-details +func (api *API) TeamsProxyEndpoint(ctx context.Context, accountID, proxyEndpointID string) (TeamsProxyEndpoint, error) { + uri := fmt.Sprintf("/%s/%s/gateway/proxy_endpoints/%s", AccountRouteRoot, accountID, proxyEndpointID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return TeamsProxyEndpoint{}, err + } + + var teamsProxyEndpointDetailResponse TeamsProxyEndpointDetailResponse + err = json.Unmarshal(res, &teamsProxyEndpointDetailResponse) + if err != nil { + return TeamsProxyEndpoint{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsProxyEndpointDetailResponse.Result, nil +} + +// TeamsProxyEndpoints returns all proxy endpoints within an account. +// +// API reference: https://api.cloudflare.com/#zero-trust-gateway-proxy-endpoints-list-proxy-endpoints +func (api *API) TeamsProxyEndpoints(ctx context.Context, accountID string) ([]TeamsProxyEndpoint, ResultInfo, error) { + uri := fmt.Sprintf("/%s/%s/gateway/proxy_endpoints", AccountRouteRoot, accountID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []TeamsProxyEndpoint{}, ResultInfo{}, err + } + + var teamsProxyEndpointListResponse TeamsProxyEndpointListResponse + err = json.Unmarshal(res, &teamsProxyEndpointListResponse) + if err != nil { + return []TeamsProxyEndpoint{}, ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsProxyEndpointListResponse.Result, teamsProxyEndpointListResponse.ResultInfo, nil +} + +// CreateTeamsProxyEndpoint creates a new proxy endpoint. +// +// API reference: https://api.cloudflare.com/#zero-trust-gateway-proxy-endpoints-create-proxy-endpoint +func (api *API) CreateTeamsProxyEndpoint(ctx context.Context, accountID string, proxyEndpoint TeamsProxyEndpoint) (TeamsProxyEndpoint, error) { + uri := fmt.Sprintf("/%s/%s/gateway/proxy_endpoints", AccountRouteRoot, accountID) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, proxyEndpoint) + if err != nil { + return TeamsProxyEndpoint{}, err + } + + var teamsProxyEndpointDetailResponse TeamsProxyEndpointDetailResponse + err = json.Unmarshal(res, &teamsProxyEndpointDetailResponse) + if err != nil { + return TeamsProxyEndpoint{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsProxyEndpointDetailResponse.Result, nil +} + +// UpdateTeamsProxyEndpoint updates an existing teams Proxy Endpoint. +// +// API reference: https://api.cloudflare.com/#zero-trust-gateway-proxy-endpoints-update-proxy-endpoint +func (api *API) UpdateTeamsProxyEndpoint(ctx context.Context, accountID string, proxyEndpoint TeamsProxyEndpoint) (TeamsProxyEndpoint, error) { + if proxyEndpoint.ID == "" { + return TeamsProxyEndpoint{}, fmt.Errorf("Proxy Endpoint ID cannot be empty") + } + + uri := fmt.Sprintf( + "/%s/%s/gateway/proxy_endpoints/%s", + AccountRouteRoot, + accountID, + proxyEndpoint.ID, + ) + + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, proxyEndpoint) + if err != nil { + return TeamsProxyEndpoint{}, err + } + + var teamsProxyEndpointDetailResponse TeamsProxyEndpointDetailResponse + err = json.Unmarshal(res, &teamsProxyEndpointDetailResponse) + if err != nil { + return TeamsProxyEndpoint{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsProxyEndpointDetailResponse.Result, nil +} + +// DeleteTeamsProxyEndpoint deletes a teams Proxy Endpoint. +// +// API reference: https://api.cloudflare.com/#zero-trust-gateway-proxy-endpoints-delete-proxy-endpoint +func (api *API) DeleteTeamsProxyEndpoint(ctx context.Context, accountID, proxyEndpointID string) error { + uri := fmt.Sprintf( + "/%s/%s/gateway/proxy_endpoints/%s", + AccountRouteRoot, + accountID, + proxyEndpointID, + ) + + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/teams_rules.go b/vendor/github.com/cloudflare/cloudflare-go/teams_rules.go new file mode 100644 index 0000000..38bfbce --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/teams_rules.go @@ -0,0 +1,359 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +type TeamsRuleSettings struct { + // list of ipv4 or ipv6 ips to override with, when action is set to dns override + OverrideIPs []string `json:"override_ips"` + + // show this string at block page caused by this rule + BlockReason string `json:"block_reason"` + + // host name to override with when action is set to dns override. Can not be used with OverrideIPs + OverrideHost string `json:"override_host"` + + // settings for browser isolation actions + BISOAdminControls *TeamsBISOAdminControlSettings `json:"biso_admin_controls"` + + // settings for l4(network) level overrides + L4Override *TeamsL4OverrideSettings `json:"l4override"` + + // settings for adding headers to http requests + AddHeaders http.Header `json:"add_headers"` + + // settings for session check in allow action + CheckSession *TeamsCheckSessionSettings `json:"check_session"` + + // Enable block page on rules with action block + BlockPageEnabled bool `json:"block_page_enabled"` + + // whether to disable dnssec validation for allow action + InsecureDisableDNSSECValidation bool `json:"insecure_disable_dnssec_validation"` + + // settings for rules with egress action + EgressSettings *EgressSettings `json:"egress"` + + // DLP payload logging configuration + PayloadLog *TeamsDlpPayloadLogSettings `json:"payload_log"` + + //AuditSsh Settings + AuditSSH *AuditSSHRuleSettings `json:"audit_ssh"` + + // Turns on ip category based filter on dns if the rule contains dns category checks + IPCategories bool `json:"ip_categories"` + + // Turns on for explicitly ignoring cname domain category matches + IgnoreCNAMECategoryMatches *bool `json:"ignore_cname_category_matches"` + + // Allow parent MSP accounts to enable bypass their children's rules. Do not set them for non MSP accounts. + AllowChildBypass *bool `json:"allow_child_bypass,omitempty"` + + // Allow child MSP accounts to bypass their parent's rules. Do not set them for non MSP accounts. + BypassParentRule *bool `json:"bypass_parent_rule,omitempty"` + + // Action taken when an untrusted origin certificate error occurs in a http allow rule + UntrustedCertSettings *UntrustedCertSettings `json:"untrusted_cert"` + + // Specifies that a resolver policy should use Cloudflare's DNS Resolver. + ResolveDnsThroughCloudflare *bool `json:"resolve_dns_through_cloudflare,omitempty"` + + // Resolver policy settings. + DnsResolverSettings *TeamsDnsResolverSettings `json:"dns_resolvers,omitempty"` + + NotificationSettings *TeamsNotificationSettings `json:"notification_settings"` +} + +type TeamsGatewayUntrustedCertAction string + +const ( + UntrustedCertPassthrough TeamsGatewayUntrustedCertAction = "pass_through" + UntrustedCertBlock TeamsGatewayUntrustedCertAction = "block" + UntrustedCertError TeamsGatewayUntrustedCertAction = "error" +) + +type UntrustedCertSettings struct { + Action TeamsGatewayUntrustedCertAction `json:"action"` +} + +type TeamsNotificationSettings struct { + Enabled *bool `json:"enabled,omitempty"` + Message string `json:"msg"` + SupportURL string `json:"support_url"` +} + +type AuditSSHRuleSettings struct { + CommandLogging bool `json:"command_logging"` +} + +type EgressSettings struct { + Ipv6Range string `json:"ipv6"` + Ipv4 string `json:"ipv4"` + Ipv4Fallback string `json:"ipv4_fallback"` +} + +// TeamsL4OverrideSettings used in l4 filter type rule with action set to override. +type TeamsL4OverrideSettings struct { + IP string `json:"ip,omitempty"` + Port int `json:"port,omitempty"` +} + +type TeamsBISOAdminControlSettings struct { + DisablePrinting bool `json:"dp"` + DisableCopyPaste bool `json:"dcp"` + DisableDownload bool `json:"dd"` + DisableUpload bool `json:"du"` + DisableKeyboard bool `json:"dk"` + DisableClipboardRedirection bool `json:"dcr"` +} + +type TeamsCheckSessionSettings struct { + Enforce bool `json:"enforce"` + Duration Duration `json:"duration"` +} + +type ( + TeamsDnsResolverSettings struct { + V4Resolvers []TeamsDnsResolverAddressV4 `json:"ipv4,omitempty"` + V6Resolvers []TeamsDnsResolverAddressV6 `json:"ipv6,omitempty"` + } + + TeamsDnsResolverAddressV4 struct { + TeamsDnsResolverAddress + } + + TeamsDnsResolverAddressV6 struct { + TeamsDnsResolverAddress + } + + TeamsDnsResolverAddress struct { + IP string `json:"ip"` + Port *int `json:"port,omitempty"` + VnetID string `json:"vnet_id,omitempty"` + RouteThroughPrivateNetwork *bool `json:"route_through_private_network,omitempty"` + } +) + +type TeamsDlpPayloadLogSettings struct { + Enabled bool `json:"enabled"` +} + +type TeamsFilterType string + +type TeamsGatewayAction string + +const ( + HttpFilter TeamsFilterType = "http" + DnsFilter TeamsFilterType = "dns" + L4Filter TeamsFilterType = "l4" + EgressFilter TeamsFilterType = "egress" + DnsResolverFilter TeamsFilterType = "dns_resolver" +) + +const ( + Allow TeamsGatewayAction = "allow" // dns|http|l4 + Block TeamsGatewayAction = "block" // dns|http|l4 + SafeSearch TeamsGatewayAction = "safesearch" // dns + YTRestricted TeamsGatewayAction = "ytrestricted" // dns + On TeamsGatewayAction = "on" // http + Off TeamsGatewayAction = "off" // http + Scan TeamsGatewayAction = "scan" // http + NoScan TeamsGatewayAction = "noscan" // http + Isolate TeamsGatewayAction = "isolate" // http + NoIsolate TeamsGatewayAction = "noisolate" // http + Override TeamsGatewayAction = "override" // http + L4Override TeamsGatewayAction = "l4_override" // l4 + Egress TeamsGatewayAction = "egress" // egress + AuditSSH TeamsGatewayAction = "audit_ssh" // l4 + Resolve TeamsGatewayAction = "resolve" // resolve +) + +func TeamsRulesActionValues() []string { + return []string{ + string(Allow), + string(Block), + string(SafeSearch), + string(YTRestricted), + string(On), + string(Off), + string(Scan), + string(NoScan), + string(Isolate), + string(NoIsolate), + string(Override), + string(L4Override), + string(Egress), + string(AuditSSH), + string(Resolve), + } +} + +func TeamsRulesUntrustedCertActionValues() []string { + return []string{ + string(UntrustedCertPassthrough), + string(UntrustedCertBlock), + string(UntrustedCertError), + } +} + +// TeamsRule represents an Teams wirefilter rule. +type TeamsRule struct { + ID string `json:"id,omitempty"` + CreatedAt *time.Time `json:"created_at,omitempty"` + UpdatedAt *time.Time `json:"updated_at,omitempty"` + DeletedAt *time.Time `json:"deleted_at,omitempty"` + Name string `json:"name"` + Description string `json:"description"` + Precedence uint64 `json:"precedence"` + Enabled bool `json:"enabled"` + Action TeamsGatewayAction `json:"action"` + Filters []TeamsFilterType `json:"filters"` + Traffic string `json:"traffic"` + Identity string `json:"identity"` + DevicePosture string `json:"device_posture"` + Version uint64 `json:"version"` + RuleSettings TeamsRuleSettings `json:"rule_settings,omitempty"` +} + +// TeamsRuleResponse is the API response, containing a single rule. +type TeamsRuleResponse struct { + Response + Result TeamsRule `json:"result"` +} + +// TeamsRuleResponse is the API response, containing an array of rules. +type TeamsRulesResponse struct { + Response + Result []TeamsRule `json:"result"` +} + +// TeamsRulePatchRequest is used to patch an existing rule. +type TeamsRulePatchRequest struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Precedence uint64 `json:"precedence"` + Enabled bool `json:"enabled"` + Action TeamsGatewayAction `json:"action"` + RuleSettings TeamsRuleSettings `json:"rule_settings,omitempty"` +} + +// TeamsRules returns all rules within an account. +// +// API reference: https://api.cloudflare.com/#teams-rules-properties +func (api *API) TeamsRules(ctx context.Context, accountID string) ([]TeamsRule, error) { + uri := fmt.Sprintf("/accounts/%s/gateway/rules", accountID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []TeamsRule{}, err + } + + var teamsRulesResponse TeamsRulesResponse + err = json.Unmarshal(res, &teamsRulesResponse) + if err != nil { + return []TeamsRule{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsRulesResponse.Result, nil +} + +// TeamsRule returns the rule with rule ID in the URL. +// +// API reference: https://api.cloudflare.com/#teams-rules-properties +func (api *API) TeamsRule(ctx context.Context, accountID string, ruleId string) (TeamsRule, error) { + uri := fmt.Sprintf("/accounts/%s/gateway/rules/%s", accountID, ruleId) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return TeamsRule{}, err + } + + var teamsRuleResponse TeamsRuleResponse + err = json.Unmarshal(res, &teamsRuleResponse) + if err != nil { + return TeamsRule{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsRuleResponse.Result, nil +} + +// TeamsCreateRule creates a rule with wirefilter expression. +// +// API reference: https://api.cloudflare.com/#teams-rules-properties +func (api *API) TeamsCreateRule(ctx context.Context, accountID string, rule TeamsRule) (TeamsRule, error) { + uri := fmt.Sprintf("/accounts/%s/gateway/rules", accountID) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, rule) + if err != nil { + return TeamsRule{}, err + } + + var teamsRuleResponse TeamsRuleResponse + err = json.Unmarshal(res, &teamsRuleResponse) + if err != nil { + return TeamsRule{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsRuleResponse.Result, nil +} + +// TeamsUpdateRule updates a rule with wirefilter expression. +// +// API reference: https://api.cloudflare.com/#teams-rules-properties +func (api *API) TeamsUpdateRule(ctx context.Context, accountID string, ruleId string, rule TeamsRule) (TeamsRule, error) { + uri := fmt.Sprintf("/accounts/%s/gateway/rules/%s", accountID, ruleId) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, rule) + if err != nil { + return TeamsRule{}, err + } + + var teamsRuleResponse TeamsRuleResponse + err = json.Unmarshal(res, &teamsRuleResponse) + if err != nil { + return TeamsRule{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsRuleResponse.Result, nil +} + +// TeamsPatchRule patches a rule associated values. +// +// API reference: https://api.cloudflare.com/#teams-rules-properties +func (api *API) TeamsPatchRule(ctx context.Context, accountID string, ruleId string, rule TeamsRulePatchRequest) (TeamsRule, error) { + uri := fmt.Sprintf("/accounts/%s/gateway/rules/%s", accountID, ruleId) + + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, rule) + if err != nil { + return TeamsRule{}, err + } + + var teamsRuleResponse TeamsRuleResponse + err = json.Unmarshal(res, &teamsRuleResponse) + if err != nil { + return TeamsRule{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return teamsRuleResponse.Result, nil +} + +// TeamsDeleteRule deletes a rule. +// +// API reference: https://api.cloudflare.com/#teams-rules-properties +func (api *API) TeamsDeleteRule(ctx context.Context, accountID string, ruleId string) error { + uri := fmt.Sprintf("/accounts/%s/gateway/rules/%s", accountID, ruleId) + + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/tiered_cache.go b/vendor/github.com/cloudflare/cloudflare-go/tiered_cache.go new file mode 100644 index 0000000..a6e8450 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/tiered_cache.go @@ -0,0 +1,317 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +type TieredCacheType int + +const ( + TieredCacheOff TieredCacheType = 0 + TieredCacheGeneric TieredCacheType = 1 + TieredCacheSmart TieredCacheType = 2 +) + +func (e TieredCacheType) String() string { + switch e { + case TieredCacheGeneric: + return "generic" + case TieredCacheSmart: + return "smart" + case TieredCacheOff: + return "off" + default: + return fmt.Sprintf("%d", int(e)) + } +} + +type TieredCache struct { + Type TieredCacheType + LastModified time.Time +} + +// GetTieredCache allows you to retrieve the current Tiered Cache Settings for a Zone. +// This function does not support custom topologies, only Generic and Smart Tiered Caching. +// +// API Reference: https://api.cloudflare.com/#smart-tiered-cache-get-smart-tiered-cache-setting +// API Reference: https://api.cloudflare.com/#tiered-cache-get-tiered-cache-setting +func (api *API) GetTieredCache(ctx context.Context, rc *ResourceContainer) (TieredCache, error) { + var lastModified time.Time + + generic, err := getGenericTieredCache(api, ctx, rc) + if err != nil { + return TieredCache{}, err + } + lastModified = generic.LastModified + + smart, err := getSmartTieredCache(api, ctx, rc) + if err != nil { + return TieredCache{}, err + } + + if smart.LastModified.After(lastModified) { + lastModified = smart.LastModified + } + + if generic.Type == TieredCacheOff { + return TieredCache{Type: TieredCacheOff, LastModified: lastModified}, nil + } + + if smart.Type == TieredCacheOff { + return TieredCache{Type: TieredCacheGeneric, LastModified: lastModified}, nil + } + + return TieredCache{Type: TieredCacheSmart, LastModified: lastModified}, nil +} + +// SetTieredCache allows you to set a zone's tiered cache topology between the available types. +// Using the value of TieredCacheOff will disable Tiered Cache entirely. +// +// API Reference: https://api.cloudflare.com/#smart-tiered-cache-patch-smart-tiered-cache-setting +// API Reference: https://api.cloudflare.com/#tiered-cache-patch-tiered-cache-setting +func (api *API) SetTieredCache(ctx context.Context, rc *ResourceContainer, value TieredCacheType) (TieredCache, error) { + if value == TieredCacheOff { + return api.DeleteTieredCache(ctx, rc) + } + + var lastModified time.Time + + if value == TieredCacheGeneric { + result, err := deleteSmartTieredCache(api, ctx, rc) + if err != nil { + return TieredCache{}, err + } + lastModified = result.LastModified + + result, err = enableGenericTieredCache(api, ctx, rc) + if err != nil { + return TieredCache{}, err + } + + if result.LastModified.After(lastModified) { + lastModified = result.LastModified + } + return TieredCache{Type: TieredCacheGeneric, LastModified: lastModified}, nil + } + + result, err := enableGenericTieredCache(api, ctx, rc) + if err != nil { + return TieredCache{}, err + } + lastModified = result.LastModified + + result, err = enableSmartTieredCache(api, ctx, rc) + if err != nil { + return TieredCache{}, err + } + + if result.LastModified.After(lastModified) { + lastModified = result.LastModified + } + return TieredCache{Type: TieredCacheSmart, LastModified: lastModified}, nil +} + +// DeleteTieredCache allows you to delete the tiered cache settings for a zone. +// This is equivalent to using SetTieredCache with the value of TieredCacheOff. +// +// API Reference: https://api.cloudflare.com/#smart-tiered-cache-delete-smart-tiered-cache-setting +// API Reference: https://api.cloudflare.com/#tiered-cache-patch-tiered-cache-setting +func (api *API) DeleteTieredCache(ctx context.Context, rc *ResourceContainer) (TieredCache, error) { + var lastModified time.Time + + result, err := deleteSmartTieredCache(api, ctx, rc) + if err != nil { + return TieredCache{}, err + } + lastModified = result.LastModified + + result, err = disableGenericTieredCache(api, ctx, rc) + if err != nil { + return TieredCache{}, err + } + + if result.LastModified.After(lastModified) { + lastModified = result.LastModified + } + return TieredCache{Type: TieredCacheOff, LastModified: lastModified}, nil +} + +type tieredCacheResult struct { + ID string `json:"id"` + Value string `json:"value,omitempty"` + LastModified time.Time `json:"modified_on"` +} + +type tieredCacheResponse struct { + Result tieredCacheResult `json:"result"` + Response +} + +type tieredCacheSetting struct { + Value string `json:"value"` +} + +func getGenericTieredCache(api *API, ctx context.Context, rc *ResourceContainer) (TieredCache, error) { + uri := fmt.Sprintf("/zones/%s/argo/tiered_caching", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return TieredCache{Type: TieredCacheOff}, err + } + + var response tieredCacheResponse + err = json.Unmarshal(res, &response) + if err != nil { + return TieredCache{Type: TieredCacheOff}, err + } + + if !response.Success { + return TieredCache{Type: TieredCacheOff}, errors.New("request to retrieve generic tiered cache failed") + } + + if response.Result.Value == "off" { + return TieredCache{Type: TieredCacheOff, LastModified: response.Result.LastModified}, nil + } + + return TieredCache{Type: TieredCacheGeneric, LastModified: response.Result.LastModified}, nil +} + +func getSmartTieredCache(api *API, ctx context.Context, rc *ResourceContainer) (TieredCache, error) { + uri := fmt.Sprintf("/zones/%s/cache/tiered_cache_smart_topology_enable", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + var notFoundError *NotFoundError + if errors.As(err, ¬FoundError) { + return TieredCache{Type: TieredCacheOff}, nil + } + return TieredCache{Type: TieredCacheOff}, err + } + + var response tieredCacheResponse + err = json.Unmarshal(res, &response) + if err != nil { + return TieredCache{Type: TieredCacheOff}, err + } + + if !response.Success { + return TieredCache{Type: TieredCacheOff}, errors.New("request to retrieve smart tiered cache failed") + } + + if response.Result.Value == "off" { + return TieredCache{Type: TieredCacheOff, LastModified: response.Result.LastModified}, nil + } + return TieredCache{Type: TieredCacheSmart, LastModified: response.Result.LastModified}, nil +} + +func enableGenericTieredCache(api *API, ctx context.Context, rc *ResourceContainer) (TieredCache, error) { + uri := fmt.Sprintf("/zones/%s/argo/tiered_caching", rc.Identifier) + setting := tieredCacheSetting{ + Value: "on", + } + body, err := json.Marshal(setting) + if err != nil { + return TieredCache{Type: TieredCacheOff}, err + } + + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, body) + if err != nil { + return TieredCache{Type: TieredCacheOff}, err + } + + var response tieredCacheResponse + err = json.Unmarshal(res, &response) + if err != nil { + return TieredCache{Type: TieredCacheOff}, err + } + + if !response.Success { + return TieredCache{Type: TieredCacheOff}, errors.New("request to enable generic tiered cache failed") + } + + return TieredCache{Type: TieredCacheGeneric, LastModified: response.Result.LastModified}, nil +} + +func enableSmartTieredCache(api *API, ctx context.Context, rc *ResourceContainer) (TieredCache, error) { + uri := fmt.Sprintf("/zones/%s/cache/tiered_cache_smart_topology_enable", rc.Identifier) + setting := tieredCacheSetting{ + Value: "on", + } + body, err := json.Marshal(setting) + if err != nil { + return TieredCache{Type: TieredCacheOff}, err + } + + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, body) + if err != nil { + return TieredCache{Type: TieredCacheOff}, err + } + + var response tieredCacheResponse + err = json.Unmarshal(res, &response) + if err != nil { + return TieredCache{Type: TieredCacheOff}, err + } + + if !response.Success { + return TieredCache{Type: TieredCacheOff}, errors.New("request to enable smart tiered cache failed") + } + + return TieredCache{Type: TieredCacheSmart, LastModified: response.Result.LastModified}, nil +} + +func disableGenericTieredCache(api *API, ctx context.Context, rc *ResourceContainer) (TieredCache, error) { + uri := fmt.Sprintf("/zones/%s/argo/tiered_caching", rc.Identifier) + setting := tieredCacheSetting{ + Value: "off", + } + body, err := json.Marshal(setting) + if err != nil { + return TieredCache{Type: TieredCacheOff}, err + } + + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, body) + if err != nil { + return TieredCache{Type: TieredCacheOff}, err + } + + var response tieredCacheResponse + err = json.Unmarshal(res, &response) + if err != nil { + return TieredCache{Type: TieredCacheOff}, err + } + + if !response.Success { + return TieredCache{Type: TieredCacheOff}, errors.New("request to disable generic tiered cache failed") + } + + return TieredCache{Type: TieredCacheOff, LastModified: response.Result.LastModified}, nil +} + +func deleteSmartTieredCache(api *API, ctx context.Context, rc *ResourceContainer) (TieredCache, error) { + uri := fmt.Sprintf("/zones/%s/cache/tiered_cache_smart_topology_enable", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + var notFoundError *NotFoundError + if errors.As(err, ¬FoundError) { + return TieredCache{Type: TieredCacheOff}, nil + } + return TieredCache{Type: TieredCacheOff}, err + } + + var response tieredCacheResponse + err = json.Unmarshal(res, &response) + if err != nil { + return TieredCache{Type: TieredCacheOff}, err + } + + if !response.Success { + return TieredCache{Type: TieredCacheOff}, errors.New("request to disable smart tiered cache failed") + } + + return TieredCache{Type: TieredCacheOff, LastModified: response.Result.LastModified}, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/total_tls.go b/vendor/github.com/cloudflare/cloudflare-go/total_tls.go new file mode 100644 index 0000000..5ea0bdb --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/total_tls.go @@ -0,0 +1,64 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +type TotalTLS struct { + Enabled *bool `json:"enabled,omitempty"` + CertificateAuthority string `json:"certificate_authority,omitempty"` + ValidityDays int `json:"validity_days,omitempty"` +} + +type TotalTLSResponse struct { + Response + Result TotalTLS `json:"result"` +} + +// GetTotalTLS Get Total TLS Settings for a Zone. +// +// API Reference: https://api.cloudflare.com/#total-tls-total-tls-settings-details +func (api *API) GetTotalTLS(ctx context.Context, rc *ResourceContainer) (TotalTLS, error) { + if rc.Identifier == "" { + return TotalTLS{}, ErrMissingZoneID + } + uri := fmt.Sprintf("/zones/%s/acm/total_tls", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return TotalTLS{}, err + } + + var r TotalTLSResponse + err = json.Unmarshal(res, &r) + if err != nil { + return TotalTLS{}, err + } + + return r.Result, nil +} + +// SetTotalTLS Set Total TLS Settings or disable the feature for a Zone. +// +// API Reference: https://api.cloudflare.com/#total-tls-enable-or-disable-total-tls +func (api *API) SetTotalTLS(ctx context.Context, rc *ResourceContainer, params TotalTLS) (TotalTLS, error) { + if rc.Identifier == "" { + return TotalTLS{}, ErrMissingZoneID + } + uri := fmt.Sprintf("/zones/%s/acm/total_tls", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return TotalTLS{}, err + } + + var r TotalTLSResponse + err = json.Unmarshal(res, &r) + if err != nil { + return TotalTLS{}, err + } + + return r.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/tunnel.go b/vendor/github.com/cloudflare/cloudflare-go/tunnel.go new file mode 100644 index 0000000..2f3f6d7 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/tunnel.go @@ -0,0 +1,536 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "strconv" + "time" + + "github.com/goccy/go-json" +) + +// A TunnelDuration is a Duration that has custom serialization for JSON. +// JSON in Javascript assumes that int fields are 32 bits and Duration fields +// are deserialized assuming that numbers are in nanoseconds, which in 32bit +// integers limits to just 2 seconds. This type assumes that when +// serializing/deserializing from JSON, that the number is in seconds, while it +// maintains the YAML serde assumptions. +type TunnelDuration struct { + time.Duration +} + +func (s TunnelDuration) MarshalJSON() ([]byte, error) { + return json.Marshal(s.Duration.Seconds()) +} + +func (s *TunnelDuration) UnmarshalJSON(data []byte) error { + seconds, err := strconv.ParseInt(string(data), 10, 64) + if err != nil { + return err + } + + s.Duration = time.Duration(seconds * int64(time.Second)) + return nil +} + +// ErrMissingTunnelID is for when a required tunnel ID is missing from the +// parameters. +var ErrMissingTunnelID = errors.New("required missing tunnel ID") + +// Tunnel is the struct definition of a tunnel. +type Tunnel struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Secret string `json:"tunnel_secret,omitempty"` + CreatedAt *time.Time `json:"created_at,omitempty"` + DeletedAt *time.Time `json:"deleted_at,omitempty"` + Connections []TunnelConnection `json:"connections,omitempty"` + ConnsActiveAt *time.Time `json:"conns_active_at,omitempty"` + ConnInactiveAt *time.Time `json:"conns_inactive_at,omitempty"` + TunnelType string `json:"tun_type,omitempty"` + Status string `json:"status,omitempty"` + RemoteConfig bool `json:"remote_config,omitempty"` +} + +// Connection is the struct definition of a connection. +type Connection struct { + ID string `json:"id,omitempty"` + Features []string `json:"features,omitempty"` + Version string `json:"version,omitempty"` + Arch string `json:"arch,omitempty"` + Connections []TunnelConnection `json:"conns,omitempty"` + RunAt *time.Time `json:"run_at,omitempty"` + ConfigVersion int `json:"config_version,omitempty"` +} + +// TunnelConnection represents the connections associated with a tunnel. +type TunnelConnection struct { + ColoName string `json:"colo_name"` + ID string `json:"id"` + IsPendingReconnect bool `json:"is_pending_reconnect"` + ClientID string `json:"client_id"` + ClientVersion string `json:"client_version"` + OpenedAt string `json:"opened_at"` + OriginIP string `json:"origin_ip"` +} + +// TunnelsDetailResponse is used for representing the API response payload for +// multiple tunnels. +type TunnelsDetailResponse struct { + Result []Tunnel `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +// listTunnelsDefaultPageSize represents the default per_page size of the API. +var listTunnelsDefaultPageSize int = 100 + +// TunnelDetailResponse is used for representing the API response payload for +// a single tunnel. +type TunnelDetailResponse struct { + Result Tunnel `json:"result"` + Response +} + +// TunnelConnectionResponse is used for representing the API response payload for +// connections of a single tunnel. +type TunnelConnectionResponse struct { + Result []Connection `json:"result"` + Response +} + +type TunnelConfigurationResult struct { + TunnelID string `json:"tunnel_id,omitempty"` + Config TunnelConfiguration `json:"config,omitempty"` + Version int `json:"version,omitempty"` +} + +// TunnelConfigurationResponse is used for representing the API response payload +// for a single tunnel. +type TunnelConfigurationResponse struct { + Result TunnelConfigurationResult `json:"result"` + Response +} + +// TunnelTokenResponse is the API response for a tunnel token. +type TunnelTokenResponse struct { + Result string `json:"result"` + Response +} + +type TunnelCreateParams struct { + Name string `json:"name,omitempty"` + Secret string `json:"tunnel_secret,omitempty"` + ConfigSrc string `json:"config_src,omitempty"` +} + +type TunnelUpdateParams struct { + Name string `json:"name,omitempty"` + Secret string `json:"tunnel_secret,omitempty"` +} + +type UnvalidatedIngressRule struct { + Hostname string `json:"hostname,omitempty"` + Path string `json:"path,omitempty"` + Service string `json:"service,omitempty"` + OriginRequest *OriginRequestConfig `json:"originRequest,omitempty"` +} + +// OriginRequestConfig is a set of optional fields that users may set to +// customize how cloudflared sends requests to origin services. It is used to set +// up general config that apply to all rules, and also, specific per-rule +// config. +type OriginRequestConfig struct { + // HTTP proxy timeout for establishing a new connection + ConnectTimeout *TunnelDuration `json:"connectTimeout,omitempty"` + // HTTP proxy timeout for completing a TLS handshake + TLSTimeout *TunnelDuration `json:"tlsTimeout,omitempty"` + // HTTP proxy TCP keepalive duration + TCPKeepAlive *TunnelDuration `json:"tcpKeepAlive,omitempty"` + // HTTP proxy should disable "happy eyeballs" for IPv4/v6 fallback + NoHappyEyeballs *bool `json:"noHappyEyeballs,omitempty"` + // HTTP proxy maximum keepalive connection pool size + KeepAliveConnections *int `json:"keepAliveConnections,omitempty"` + // HTTP proxy timeout for closing an idle connection + KeepAliveTimeout *TunnelDuration `json:"keepAliveTimeout,omitempty"` + // Sets the HTTP Host header for the local webserver. + HTTPHostHeader *string `json:"httpHostHeader,omitempty"` + // Hostname on the origin server certificate. + OriginServerName *string `json:"originServerName,omitempty"` + // Path to the CA for the certificate of your origin. + // This option should be used only if your certificate is not signed by Cloudflare. + CAPool *string `json:"caPool,omitempty"` + // Disables TLS verification of the certificate presented by your origin. + // Will allow any certificate from the origin to be accepted. + // Note: The connection from your machine to Cloudflare's Edge is still encrypted. + NoTLSVerify *bool `json:"noTLSVerify,omitempty"` + // Disables chunked transfer encoding. + // Useful if you are running a WSGI server. + DisableChunkedEncoding *bool `json:"disableChunkedEncoding,omitempty"` + // Runs as jump host + BastionMode *bool `json:"bastionMode,omitempty"` + // Listen address for the proxy. + ProxyAddress *string `json:"proxyAddress,omitempty"` + // Listen port for the proxy. + ProxyPort *uint `json:"proxyPort,omitempty"` + // Valid options are 'socks' or empty. + ProxyType *string `json:"proxyType,omitempty"` + // IP rules for the proxy service + IPRules []IngressIPRule `json:"ipRules,omitempty"` + // Attempt to connect to origin with HTTP/2 + Http2Origin *bool `json:"http2Origin,omitempty"` + // Access holds all access related configs + Access *AccessConfig `json:"access,omitempty"` +} + +type AccessConfig struct { + // Required when set to true will fail every request that does not arrive + // through an access authenticated endpoint. + Required bool `yaml:"required" json:"required,omitempty"` + // TeamName is the organization team name to get the public key certificates for. + TeamName string `yaml:"teamName" json:"teamName"` + // AudTag is the AudTag to verify access JWT against. + AudTag []string `yaml:"audTag" json:"audTag"` +} + +type IngressIPRule struct { + Prefix *string `json:"prefix,omitempty"` + Ports []int `json:"ports,omitempty"` + Allow bool `json:"allow,omitempty"` +} + +type TunnelConfiguration struct { + Ingress []UnvalidatedIngressRule `json:"ingress,omitempty"` + WarpRouting *WarpRoutingConfig `json:"warp-routing,omitempty"` + OriginRequest OriginRequestConfig `json:"originRequest,omitempty"` +} + +type WarpRoutingConfig struct { + Enabled bool `json:"enabled,omitempty"` +} + +type TunnelConfigurationParams struct { + TunnelID string `json:"-"` + Config TunnelConfiguration `json:"config,omitempty"` +} + +type TunnelListParams struct { + Name string `url:"name,omitempty"` + UUID string `url:"uuid,omitempty"` // the tunnel ID + IsDeleted *bool `url:"is_deleted,omitempty"` + ExistedAt *time.Time `url:"existed_at,omitempty"` + IncludePrefix string `url:"include_prefix,omitempty"` + ExcludePrefix string `url:"exclude_prefix,omitempty"` + + ResultInfo +} + +// ListTunnels lists all tunnels. +// +// API reference: https://api.cloudflare.com/#cloudflare-tunnel-list-cloudflare-tunnels +func (api *API) ListTunnels(ctx context.Context, rc *ResourceContainer, params TunnelListParams) ([]Tunnel, *ResultInfo, error) { + if rc.Identifier == "" { + return []Tunnel{}, &ResultInfo{}, ErrMissingAccountID + } + + autoPaginate := true + if params.PerPage >= 1 || params.Page >= 1 { + autoPaginate = false + } + + if params.PerPage < 1 { + params.PerPage = listTunnelsDefaultPageSize + } + + if params.Page < 1 { + params.Page = 1 + } + + var records []Tunnel + var listResponse TunnelsDetailResponse + + for { + uri := buildURI(fmt.Sprintf("/accounts/%s/cfd_tunnel", rc.Identifier), params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []Tunnel{}, &ResultInfo{}, err + } + + err = json.Unmarshal(res, &listResponse) + if err != nil { + return []Tunnel{}, &ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + records = append(records, listResponse.Result...) + params.ResultInfo = listResponse.ResultInfo.Next() + if params.ResultInfo.Done() || !autoPaginate { + break + } + } + + return records, &listResponse.ResultInfo, nil +} + +// GetTunnel returns a single Argo tunnel. +// +// API reference: https://api.cloudflare.com/#cloudflare-tunnel-get-cloudflare-tunnel +func (api *API) GetTunnel(ctx context.Context, rc *ResourceContainer, tunnelID string) (Tunnel, error) { + if rc.Identifier == "" { + return Tunnel{}, ErrMissingAccountID + } + + if tunnelID == "" { + return Tunnel{}, errors.New("missing tunnel ID") + } + + uri := fmt.Sprintf("/accounts/%s/cfd_tunnel/%s", rc.Identifier, tunnelID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return Tunnel{}, err + } + + var argoDetailsResponse TunnelDetailResponse + err = json.Unmarshal(res, &argoDetailsResponse) + if err != nil { + return Tunnel{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return argoDetailsResponse.Result, nil +} + +// CreateTunnel creates a new tunnel for the account. +// +// API reference: https://api.cloudflare.com/#cloudflare-tunnel-create-cloudflare-tunnel +func (api *API) CreateTunnel(ctx context.Context, rc *ResourceContainer, params TunnelCreateParams) (Tunnel, error) { + if rc.Identifier == "" { + return Tunnel{}, ErrMissingAccountID + } + + if params.Name == "" { + return Tunnel{}, errors.New("missing tunnel name") + } + + if params.Secret == "" { + return Tunnel{}, errors.New("missing tunnel secret") + } + + uri := fmt.Sprintf("/accounts/%s/cfd_tunnel", rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return Tunnel{}, err + } + + var argoDetailsResponse TunnelDetailResponse + err = json.Unmarshal(res, &argoDetailsResponse) + if err != nil { + return Tunnel{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return argoDetailsResponse.Result, nil +} + +// UpdateTunnel updates an existing tunnel for the account. +// +// API reference: https://api.cloudflare.com/#cloudflare-tunnel-update-cloudflare-tunnel +func (api *API) UpdateTunnel(ctx context.Context, rc *ResourceContainer, params TunnelUpdateParams) (Tunnel, error) { + if rc.Identifier == "" { + return Tunnel{}, ErrMissingAccountID + } + + uri := fmt.Sprintf("/accounts/%s/cfd_tunnel", rc.Identifier) + + var tunnel Tunnel + + if params.Name != "" { + tunnel.Name = params.Name + } + + if params.Secret != "" { + tunnel.Secret = params.Secret + } + + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, tunnel) + if err != nil { + return Tunnel{}, err + } + + var argoDetailsResponse TunnelDetailResponse + err = json.Unmarshal(res, &argoDetailsResponse) + if err != nil { + return Tunnel{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return argoDetailsResponse.Result, nil +} + +// UpdateTunnelConfiguration updates an existing tunnel for the account. +// +// API reference: https://api.cloudflare.com/#cloudflare-tunnel-configuration-properties +func (api *API) UpdateTunnelConfiguration(ctx context.Context, rc *ResourceContainer, params TunnelConfigurationParams) (TunnelConfigurationResult, error) { + if rc.Identifier == "" { + return TunnelConfigurationResult{}, ErrMissingAccountID + } + + if params.TunnelID == "" { + return TunnelConfigurationResult{}, ErrMissingTunnelID + } + + uri := fmt.Sprintf("/accounts/%s/cfd_tunnel/%s/configurations", rc.Identifier, params.TunnelID) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return TunnelConfigurationResult{}, err + } + + var tunnelDetailsResponse TunnelConfigurationResponse + err = json.Unmarshal(res, &tunnelDetailsResponse) + if err != nil { + return TunnelConfigurationResult{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + var tunnelDetails TunnelConfigurationResult + + tunnelDetails.Config = tunnelDetailsResponse.Result.Config + tunnelDetails.TunnelID = tunnelDetailsResponse.Result.TunnelID + tunnelDetails.Version = tunnelDetailsResponse.Result.Version + + return tunnelDetails, nil +} + +// GetTunnelConfiguration updates an existing tunnel for the account. +// +// API reference: https://api.cloudflare.com/#cloudflare-tunnel-configuration-properties +func (api *API) GetTunnelConfiguration(ctx context.Context, rc *ResourceContainer, tunnelID string) (TunnelConfigurationResult, error) { + if rc.Identifier == "" { + return TunnelConfigurationResult{}, ErrMissingAccountID + } + + if tunnelID == "" { + return TunnelConfigurationResult{}, ErrMissingTunnelID + } + + uri := fmt.Sprintf("/accounts/%s/cfd_tunnel/%s/configurations", rc.Identifier, tunnelID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return TunnelConfigurationResult{}, err + } + + var tunnelDetailsResponse TunnelConfigurationResponse + err = json.Unmarshal(res, &tunnelDetailsResponse) + if err != nil { + return TunnelConfigurationResult{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + var tunnelDetails TunnelConfigurationResult + + tunnelDetails.Config = tunnelDetailsResponse.Result.Config + tunnelDetails.TunnelID = tunnelDetailsResponse.Result.TunnelID + tunnelDetails.Version = tunnelDetailsResponse.Result.Version + + return tunnelDetails, nil +} + +// ListTunnelConnections gets all connections on a tunnel. +// +// API reference: https://api.cloudflare.com/#cloudflare-tunnel-list-cloudflare-tunnel-connections +func (api *API) ListTunnelConnections(ctx context.Context, rc *ResourceContainer, tunnelID string) ([]Connection, error) { + if rc.Identifier == "" { + return []Connection{}, ErrMissingAccountID + } + + if tunnelID == "" { + return []Connection{}, ErrMissingTunnelID + } + + uri := fmt.Sprintf("/accounts/%s/cfd_tunnel/%s/connections", rc.Identifier, tunnelID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []Connection{}, err + } + + var argoDetailsResponse TunnelConnectionResponse + err = json.Unmarshal(res, &argoDetailsResponse) + if err != nil { + return []Connection{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return argoDetailsResponse.Result, nil +} + +// DeleteTunnel removes a single Argo tunnel. +// +// API reference: https://api.cloudflare.com/#cloudflare-tunnel-delete-cloudflare-tunnel +func (api *API) DeleteTunnel(ctx context.Context, rc *ResourceContainer, tunnelID string) error { + uri := fmt.Sprintf("/accounts/%s/cfd_tunnel/%s", rc.Identifier, tunnelID) + + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + var argoDetailsResponse TunnelDetailResponse + err = json.Unmarshal(res, &argoDetailsResponse) + if err != nil { + return fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return nil +} + +// CleanupTunnelConnections deletes any inactive connections on a tunnel. +// +// API reference: https://api.cloudflare.com/#cloudflare-tunnel-clean-up-cloudflare-tunnel-connections +func (api *API) CleanupTunnelConnections(ctx context.Context, rc *ResourceContainer, tunnelID string) error { + if rc.Identifier == "" { + return ErrMissingAccountID + } + + if tunnelID == "" { + return errors.New("missing tunnel ID") + } + + uri := fmt.Sprintf("/accounts/%s/cfd_tunnel/%s/connections", rc.Identifier, tunnelID) + + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + var argoDetailsResponse TunnelDetailResponse + err = json.Unmarshal(res, &argoDetailsResponse) + if err != nil { + return fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return nil +} + +// GetTunnelToken that allows to run a tunnel. +// +// API reference: https://api.cloudflare.com/#cloudflare-tunnel-get-cloudflare-tunnel-token +func (api *API) GetTunnelToken(ctx context.Context, rc *ResourceContainer, tunnelID string) (string, error) { + if rc.Identifier == "" { + return "", ErrMissingAccountID + } + + if tunnelID == "" { + return "", errors.New("missing tunnel ID") + } + + uri := fmt.Sprintf("/accounts/%s/cfd_tunnel/%s/token", rc.Identifier, tunnelID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return "", err + } + + var tunnelTokenResponse TunnelTokenResponse + err = json.Unmarshal(res, &tunnelTokenResponse) + if err != nil { + return "", fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return tunnelTokenResponse.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/tunnel_routes.go b/vendor/github.com/cloudflare/cloudflare-go/tunnel_routes.go new file mode 100644 index 0000000..c67925e --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/tunnel_routes.go @@ -0,0 +1,214 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/goccy/go-json" +) + +var ( + ErrMissingNetwork = errors.New("missing required network parameter") + ErrInvalidNetworkValue = errors.New("invalid IP parameter. Cannot use CIDR ranges for this endpoint.") +) + +// TunnelRoute is the full record for a route. +type TunnelRoute struct { + Network string `json:"network"` + TunnelID string `json:"tunnel_id"` + TunnelName string `json:"tunnel_name"` + Comment string `json:"comment"` + CreatedAt *time.Time `json:"created_at"` + DeletedAt *time.Time `json:"deleted_at"` + VirtualNetworkID string `json:"virtual_network_id"` +} + +type TunnelRoutesListParams struct { + TunnelID string `url:"tunnel_id,omitempty"` + Comment string `url:"comment,omitempty"` + IsDeleted *bool `url:"is_deleted,omitempty"` + NetworkSubset string `url:"network_subset,omitempty"` + NetworkSuperset string `url:"network_superset,omitempty"` + ExistedAt *time.Time `url:"existed_at,omitempty"` + VirtualNetworkID string `url:"virtual_network_id,omitempty"` + PaginationOptions +} + +type TunnelRoutesCreateParams struct { + Network string `json:"-"` + TunnelID string `json:"tunnel_id"` + Comment string `json:"comment,omitempty"` + VirtualNetworkID string `json:"virtual_network_id,omitempty"` +} + +type TunnelRoutesUpdateParams struct { + Network string `json:"network"` + TunnelID string `json:"tunnel_id"` + Comment string `json:"comment,omitempty"` + VirtualNetworkID string `json:"virtual_network_id,omitempty"` +} + +type TunnelRoutesForIPParams struct { + Network string `url:"-"` + VirtualNetworkID string `url:"virtual_network_id,omitempty"` +} + +type TunnelRoutesDeleteParams struct { + Network string `url:"-"` + VirtualNetworkID string `url:"virtual_network_id,omitempty"` +} + +// tunnelRouteListResponse is the API response for listing tunnel routes. +type tunnelRouteListResponse struct { + Response + Result []TunnelRoute `json:"result"` +} + +type tunnelRouteResponse struct { + Response + Result TunnelRoute `json:"result"` +} + +// ListTunnelRoutes lists all defined routes for tunnels in the account. +// +// See: https://api.cloudflare.com/#tunnel-route-list-tunnel-routes +func (api *API) ListTunnelRoutes(ctx context.Context, rc *ResourceContainer, params TunnelRoutesListParams) ([]TunnelRoute, error) { + if rc.Identifier == "" { + return []TunnelRoute{}, ErrMissingAccountID + } + + uri := buildURI(fmt.Sprintf("/%s/%s/teamnet/routes", AccountRouteRoot, rc.Identifier), params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []TunnelRoute{}, err + } + + var resp tunnelRouteListResponse + err = json.Unmarshal(res, &resp) + if err != nil { + return []TunnelRoute{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return resp.Result, nil +} + +// GetTunnelRouteForIP finds the Tunnel Route that encompasses the given IP. +// +// See: https://api.cloudflare.com/#tunnel-route-get-tunnel-route-by-ip +func (api *API) GetTunnelRouteForIP(ctx context.Context, rc *ResourceContainer, params TunnelRoutesForIPParams) (TunnelRoute, error) { + if rc.Identifier == "" { + return TunnelRoute{}, ErrMissingAccountID + } + + if params.Network == "" { + return TunnelRoute{}, ErrMissingNetwork + } + + if strings.Contains(params.Network, "/") { + return TunnelRoute{}, ErrInvalidNetworkValue + } + + uri := buildURI(fmt.Sprintf("/%s/%s/teamnet/routes/ip/%s", AccountRouteRoot, rc.Identifier, params.Network), params) + + responseBody, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return TunnelRoute{}, err + } + + var routeResponse tunnelRouteResponse + err = json.Unmarshal(responseBody, &routeResponse) + if err != nil { + return TunnelRoute{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return routeResponse.Result, nil +} + +// CreateTunnelRoute add a new route to the account routing table for the given +// tunnel. +// +// See: https://api.cloudflare.com/#tunnel-route-create-route +func (api *API) CreateTunnelRoute(ctx context.Context, rc *ResourceContainer, params TunnelRoutesCreateParams) (TunnelRoute, error) { + if rc.Identifier == "" { + return TunnelRoute{}, ErrMissingAccountID + } + + if params.Network == "" { + return TunnelRoute{}, ErrMissingNetwork + } + + uri := fmt.Sprintf("/%s/%s/teamnet/routes/network/%s", AccountRouteRoot, rc.Identifier, url.PathEscape(params.Network)) + + responseBody, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return TunnelRoute{}, err + } + + var routeResponse tunnelRouteResponse + err = json.Unmarshal(responseBody, &routeResponse) + if err != nil { + return TunnelRoute{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return routeResponse.Result, nil +} + +// DeleteTunnelRoute delete an existing route from the account routing table. +// +// See: https://api.cloudflare.com/#tunnel-route-delete-route +func (api *API) DeleteTunnelRoute(ctx context.Context, rc *ResourceContainer, params TunnelRoutesDeleteParams) error { + if rc.Identifier == "" { + return ErrMissingAccountID + } + + if params.Network == "" { + return ErrMissingNetwork + } + + // Cannot fully utilize buildURI here because it tries to escape "%" sign + // from the already escaped "/" sign from Network field. + uri := fmt.Sprintf("/%s/%s/teamnet/routes/network/%s%s", AccountRouteRoot, rc.Identifier, url.PathEscape(params.Network), buildURI("", params)) + + responseBody, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + var routeResponse tunnelRouteResponse + err = json.Unmarshal(responseBody, &routeResponse) + if err != nil { + return fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return nil +} + +// UpdateTunnelRoute updates an existing route in the account routing table for +// the given tunnel. +// +// See: https://api.cloudflare.com/#tunnel-route-update-route +func (api *API) UpdateTunnelRoute(ctx context.Context, rc *ResourceContainer, params TunnelRoutesUpdateParams) (TunnelRoute, error) { + if rc.Identifier == "" { + return TunnelRoute{}, ErrMissingAccountID + } + + uri := fmt.Sprintf("/%s/%s/teamnet/routes/network/%s", AccountRouteRoot, rc.Identifier, url.PathEscape(params.Network)) + + responseBody, err := api.makeRequestContext(ctx, http.MethodPatch, uri, params) + if err != nil { + return TunnelRoute{}, err + } + + var routeResponse tunnelRouteResponse + err = json.Unmarshal(responseBody, &routeResponse) + if err != nil { + return TunnelRoute{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return routeResponse.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/tunnel_virtual_networks.go b/vendor/github.com/cloudflare/cloudflare-go/tunnel_virtual_networks.go new file mode 100644 index 0000000..26a6875 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/tunnel_virtual_networks.go @@ -0,0 +1,159 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +var ErrMissingVnetName = errors.New("required missing virtual network name") + +// TunnelVirtualNetwork is segregation of Tunnel IP Routes via Virtualized +// Networks to handle overlapping private IPs in your origins. +type TunnelVirtualNetwork struct { + ID string `json:"id"` + Name string `json:"name"` + IsDefaultNetwork bool `json:"is_default_network"` + Comment string `json:"comment"` + CreatedAt *time.Time `json:"created_at"` + DeletedAt *time.Time `json:"deleted_at"` +} + +type TunnelVirtualNetworksListParams struct { + ID string `url:"id,omitempty"` + Name string `url:"name,omitempty"` + IsDefault *bool `url:"is_default,omitempty"` + IsDeleted *bool `url:"is_deleted,omitempty"` + + PaginationOptions +} + +type TunnelVirtualNetworkCreateParams struct { + Name string `json:"name"` + Comment string `json:"comment"` + IsDefault bool `json:"is_default"` +} + +type TunnelVirtualNetworkUpdateParams struct { + VnetID string `json:"-"` + Name string `json:"name,omitempty"` + Comment string `json:"comment,omitempty"` + IsDefaultNetwork *bool `json:"is_default_network,omitempty"` +} + +// tunnelRouteListResponse is the API response for listing tunnel virtual +// networks. +type tunnelVirtualNetworkListResponse struct { + Response + Result []TunnelVirtualNetwork `json:"result"` +} + +type tunnelVirtualNetworkResponse struct { + Response + Result TunnelVirtualNetwork `json:"result"` +} + +// ListTunnelVirtualNetworks lists all defined virtual networks for tunnels in +// the account. +// +// API reference: https://api.cloudflare.com/#tunnel-virtual-network-list-virtual-networks +func (api *API) ListTunnelVirtualNetworks(ctx context.Context, rc *ResourceContainer, params TunnelVirtualNetworksListParams) ([]TunnelVirtualNetwork, error) { + if rc.Identifier == "" { + return []TunnelVirtualNetwork{}, ErrMissingAccountID + } + + uri := buildURI(fmt.Sprintf("/%s/%s/teamnet/virtual_networks", AccountRouteRoot, rc.Identifier), params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, params) + if err != nil { + return []TunnelVirtualNetwork{}, err + } + + var resp tunnelVirtualNetworkListResponse + err = json.Unmarshal(res, &resp) + if err != nil { + return []TunnelVirtualNetwork{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return resp.Result, nil +} + +// CreateTunnelVirtualNetwork adds a new virtual network to the account. +// +// API reference: https://api.cloudflare.com/#tunnel-virtual-network-create-virtual-network +func (api *API) CreateTunnelVirtualNetwork(ctx context.Context, rc *ResourceContainer, params TunnelVirtualNetworkCreateParams) (TunnelVirtualNetwork, error) { + if rc.Identifier == "" { + return TunnelVirtualNetwork{}, ErrMissingAccountID + } + + if params.Name == "" { + return TunnelVirtualNetwork{}, ErrMissingVnetName + } + + uri := fmt.Sprintf("/%s/%s/teamnet/virtual_networks", AccountRouteRoot, rc.Identifier) + + responseBody, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return TunnelVirtualNetwork{}, err + } + + var resp tunnelVirtualNetworkResponse + err = json.Unmarshal(responseBody, &resp) + if err != nil { + return TunnelVirtualNetwork{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return resp.Result, nil +} + +// DeleteTunnelVirtualNetwork deletes an existing virtual network from the +// account. +// +// API reference: https://api.cloudflare.com/#tunnel-virtual-network-delete-virtual-network +func (api *API) DeleteTunnelVirtualNetwork(ctx context.Context, rc *ResourceContainer, vnetID string) error { + if rc.Identifier == "" { + return ErrMissingAccountID + } + + uri := fmt.Sprintf("/%s/%s/teamnet/virtual_networks/%s", AccountRouteRoot, rc.Identifier, vnetID) + + responseBody, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + var resp tunnelVirtualNetworkResponse + err = json.Unmarshal(responseBody, &resp) + if err != nil { + return fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return nil +} + +// UpdateTunnelRoute updates an existing virtual network in the account. +// +// API reference: https://api.cloudflare.com/#tunnel-virtual-network-update-virtual-network +func (api *API) UpdateTunnelVirtualNetwork(ctx context.Context, rc *ResourceContainer, params TunnelVirtualNetworkUpdateParams) (TunnelVirtualNetwork, error) { + if rc.Identifier == "" { + return TunnelVirtualNetwork{}, ErrMissingAccountID + } + + uri := fmt.Sprintf("/%s/%s/teamnet/virtual_networks/%s", AccountRouteRoot, rc.Identifier, params.VnetID) + + responseBody, err := api.makeRequestContext(ctx, http.MethodPatch, uri, params) + if err != nil { + return TunnelVirtualNetwork{}, err + } + + var resp tunnelVirtualNetworkResponse + err = json.Unmarshal(responseBody, &resp) + if err != nil { + return TunnelVirtualNetwork{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return resp.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/turnstile.go b/vendor/github.com/cloudflare/cloudflare-go/turnstile.go new file mode 100644 index 0000000..5bc9273 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/turnstile.go @@ -0,0 +1,245 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +var ErrMissingSiteKey = errors.New("required site key missing") + +type TurnstileWidget struct { + SiteKey string `json:"sitekey,omitempty"` + Secret string `json:"secret,omitempty"` + CreatedOn *time.Time `json:"created_on,omitempty"` + ModifiedOn *time.Time `json:"modified_on,omitempty"` + Name string `json:"name,omitempty"` + Domains []string `json:"domains,omitempty"` + Mode string `json:"mode,omitempty"` + BotFightMode bool `json:"bot_fight_mode,omitempty"` + Region string `json:"region,omitempty"` + OffLabel bool `json:"offlabel,omitempty"` +} + +type CreateTurnstileWidgetParams struct { + Name string `json:"name,omitempty"` + Domains []string `json:"domains,omitempty"` + Mode string `json:"mode,omitempty"` + BotFightMode bool `json:"bot_fight_mode,omitempty"` + Region string `json:"region,omitempty"` + OffLabel bool `json:"offlabel,omitempty"` +} + +type UpdateTurnstileWidgetParams struct { + SiteKey string `json:"-"` + Name string `json:"name,omitempty"` + Domains []string `json:"domains,omitempty"` + Mode string `json:"mode,omitempty"` + BotFightMode bool `json:"bot_fight_mode,omitempty"` + Region string `json:"region,omitempty"` + OffLabel bool `json:"offlabel,omitempty"` +} + +type TurnstileWidgetResponse struct { + Response + Result TurnstileWidget `json:"result"` +} + +type ListTurnstileWidgetParams struct { + ResultInfo + Direction string `url:"direction,omitempty"` + Order OrderDirection `url:"order,omitempty"` +} + +type ListTurnstileWidgetResponse struct { + Response + ResultInfo `json:"result_info"` + Result []TurnstileWidget `json:"result"` +} + +type RotateTurnstileWidgetParams struct { + SiteKey string `json:"-"` + InvalidateImmediately bool `json:"invalidate_immediately,omitempty"` +} + +// CreateTurnstileWidget creates a new challenge widgets. +// +// API reference: https://api.cloudflare.com/#challenge-widgets-properties +func (api *API) CreateTurnstileWidget(ctx context.Context, rc *ResourceContainer, params CreateTurnstileWidgetParams) (TurnstileWidget, error) { + if rc.Identifier == "" { + return TurnstileWidget{}, ErrMissingAccountID + } + + uri := fmt.Sprintf("/accounts/%s/challenges/widgets", rc.Identifier) + res, err := api.makeRequestContext(ctx, "POST", uri, params) + if err != nil { + return TurnstileWidget{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var r TurnstileWidgetResponse + err = json.Unmarshal(res, &r) + if err != nil { + return TurnstileWidget{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// ListTurnstileWidgets lists challenge widgets. +// +// API reference: https://api.cloudflare.com/#challenge-widgets-list-challenge-widgets +func (api *API) ListTurnstileWidgets(ctx context.Context, rc *ResourceContainer, params ListTurnstileWidgetParams) ([]TurnstileWidget, *ResultInfo, error) { + if rc.Identifier == "" { + return []TurnstileWidget{}, &ResultInfo{}, ErrMissingAccountID + } + autoPaginate := true + if params.PerPage >= 1 || params.Page >= 1 { + autoPaginate = false + } + + if params.PerPage < 1 { + params.PerPage = 25 + } + + if params.Page < 1 { + params.Page = 1 + } + + var widgets []TurnstileWidget + var r ListTurnstileWidgetResponse + for { + uri := buildURI(fmt.Sprintf("/accounts/%s/challenges/widgets", rc.Identifier), params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + + if err != nil { + return []TurnstileWidget{}, &ResultInfo{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + err = json.Unmarshal(res, &r) + if err != nil { + return []TurnstileWidget{}, &ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + widgets = append(widgets, r.Result...) + params.ResultInfo = r.ResultInfo.Next() + if params.ResultInfo.Done() || !autoPaginate { + break + } + } + + return widgets, &r.ResultInfo, nil +} + +// GetTurnstileWidget shows a single challenge widget configuration. +// +// API reference: https://api.cloudflare.com/#challenge-widgets-challenge-widget-details +func (api *API) GetTurnstileWidget(ctx context.Context, rc *ResourceContainer, siteKey string) (TurnstileWidget, error) { + if rc.Identifier == "" { + return TurnstileWidget{}, ErrMissingAccountID + } + + if siteKey == "" { + return TurnstileWidget{}, ErrMissingSiteKey + } + + uri := fmt.Sprintf("/accounts/%s/challenges/widgets/%s", rc.Identifier, siteKey) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return TurnstileWidget{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var r TurnstileWidgetResponse + err = json.Unmarshal(res, &r) + if err != nil { + return TurnstileWidget{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// UpdateTurnstileWidget update the configuration of a widget. +// +// API reference: https://api.cloudflare.com/#challenge-widgets-update-a-challenge-widget +func (api *API) UpdateTurnstileWidget(ctx context.Context, rc *ResourceContainer, params UpdateTurnstileWidgetParams) (TurnstileWidget, error) { + if rc.Identifier == "" { + return TurnstileWidget{}, ErrMissingAccountID + } + + if params.SiteKey == "" { + return TurnstileWidget{}, ErrMissingSiteKey + } + + uri := fmt.Sprintf("/accounts/%s/challenges/widgets/%s", rc.Identifier, params.SiteKey) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return TurnstileWidget{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var r TurnstileWidgetResponse + err = json.Unmarshal(res, &r) + if err != nil { + return TurnstileWidget{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// RotateTurnstileWidget generates a new secret key for this widget. If +// invalidate_immediately is set to false, the previous secret remains valid for +// 2 hours. +// +// Note that secrets cannot be rotated again during the grace period. +// +// API reference: https://api.cloudflare.com/#challenge-widgets-rotate-secret-for-a-challenge-widget +func (api *API) RotateTurnstileWidget(ctx context.Context, rc *ResourceContainer, param RotateTurnstileWidgetParams) (TurnstileWidget, error) { + if rc.Identifier == "" { + return TurnstileWidget{}, ErrMissingAccountID + } + if param.SiteKey == "" { + return TurnstileWidget{}, ErrMissingSiteKey + } + + uri := fmt.Sprintf("/accounts/%s/challenges/widgets/%s/rotate_secret", rc.Identifier, param.SiteKey) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, param) + + if err != nil { + return TurnstileWidget{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var r TurnstileWidgetResponse + err = json.Unmarshal(res, &r) + if err != nil { + return TurnstileWidget{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// DeleteTurnstileWidget delete a challenge widget. +// +// API reference: https://api.cloudflare.com/#challenge-widgets-delete-a-challenge-widget +func (api *API) DeleteTurnstileWidget(ctx context.Context, rc *ResourceContainer, siteKey string) error { + if rc.Identifier == "" { + return ErrMissingAccountID + } + + if siteKey == "" { + return ErrMissingSiteKey + } + uri := fmt.Sprintf("/accounts/%s/challenges/widgets/%s", rc.Identifier, siteKey) + + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var r TurnstileWidgetResponse + err = json.Unmarshal(res, &r) + if err != nil { + return fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/universal_ssl.go b/vendor/github.com/cloudflare/cloudflare-go/universal_ssl.go new file mode 100644 index 0000000..a0b7a1b --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/universal_ssl.go @@ -0,0 +1,108 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +// UniversalSSLSetting represents a universal ssl setting's properties. +type UniversalSSLSetting struct { + Enabled bool `json:"enabled"` +} + +type universalSSLSettingResponse struct { + Response + Result UniversalSSLSetting `json:"result"` +} + +// UniversalSSLVerificationDetails represents a universal ssl verification's properties. +type UniversalSSLVerificationDetails struct { + CertificateStatus string `json:"certificate_status"` + VerificationType string `json:"verification_type"` + ValidationMethod string `json:"validation_method"` + CertPackUUID string `json:"cert_pack_uuid"` + VerificationStatus bool `json:"verification_status"` + BrandCheck bool `json:"brand_check"` + VerificationInfo []SSLValidationRecord `json:"verification_info"` +} + +type universalSSLVerificationResponse struct { + Response + Result []UniversalSSLVerificationDetails `json:"result"` +} + +type UniversalSSLCertificatePackValidationMethodSetting struct { + ValidationMethod string `json:"validation_method"` +} + +type universalSSLCertificatePackValidationMethodSettingResponse struct { + Response + Result UniversalSSLCertificatePackValidationMethodSetting `json:"result"` +} + +// UniversalSSLSettingDetails returns the details for a universal ssl setting +// +// API reference: https://api.cloudflare.com/#universal-ssl-settings-for-a-zone-universal-ssl-settings-details +func (api *API) UniversalSSLSettingDetails(ctx context.Context, zoneID string) (UniversalSSLSetting, error) { + uri := fmt.Sprintf("/zones/%s/ssl/universal/settings", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return UniversalSSLSetting{}, err + } + var r universalSSLSettingResponse + if err := json.Unmarshal(res, &r); err != nil { + return UniversalSSLSetting{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// EditUniversalSSLSetting edits the universal ssl setting for a zone +// +// API reference: https://api.cloudflare.com/#universal-ssl-settings-for-a-zone-edit-universal-ssl-settings +func (api *API) EditUniversalSSLSetting(ctx context.Context, zoneID string, setting UniversalSSLSetting) (UniversalSSLSetting, error) { + uri := fmt.Sprintf("/zones/%s/ssl/universal/settings", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, setting) + if err != nil { + return UniversalSSLSetting{}, err + } + var r universalSSLSettingResponse + if err := json.Unmarshal(res, &r); err != nil { + return UniversalSSLSetting{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// UniversalSSLVerificationDetails returns the details for a universal ssl verification +// +// API reference: https://api.cloudflare.com/#ssl-verification-ssl-verification-details +func (api *API) UniversalSSLVerificationDetails(ctx context.Context, zoneID string) ([]UniversalSSLVerificationDetails, error) { + uri := fmt.Sprintf("/zones/%s/ssl/verification", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []UniversalSSLVerificationDetails{}, err + } + var r universalSSLVerificationResponse + if err := json.Unmarshal(res, &r); err != nil { + return []UniversalSSLVerificationDetails{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// UpdateUniversalSSLCertificatePackValidationMethod changes the validation method for a certificate pack +// +// API reference: https://api.cloudflare.com/#ssl-verification-ssl-verification-details +func (api *API) UpdateUniversalSSLCertificatePackValidationMethod(ctx context.Context, zoneID string, certPackUUID string, setting UniversalSSLCertificatePackValidationMethodSetting) (UniversalSSLCertificatePackValidationMethodSetting, error) { + uri := fmt.Sprintf("/zones/%s/ssl/verification/%s", zoneID, certPackUUID) + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, setting) + if err != nil { + return UniversalSSLCertificatePackValidationMethodSetting{}, err + } + var r universalSSLCertificatePackValidationMethodSettingResponse + if err := json.Unmarshal(res, &r); err != nil { + return UniversalSSLCertificatePackValidationMethodSetting{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/url_normalization_settings.go b/vendor/github.com/cloudflare/cloudflare-go/url_normalization_settings.go new file mode 100644 index 0000000..c919100 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/url_normalization_settings.go @@ -0,0 +1,60 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +type URLNormalizationSettings struct { + Type string `json:"type"` + Scope string `json:"scope"` +} + +type URLNormalizationSettingsResponse struct { + Result URLNormalizationSettings `json:"result"` + Response +} + +type URLNormalizationSettingsUpdateParams struct { + Type string `json:"type"` + Scope string `json:"scope"` +} + +// URLNormalizationSettings API reference: https://api.cloudflare.com/#url-normalization-get-url-normalization-settings +func (api *API) URLNormalizationSettings(ctx context.Context, rc *ResourceContainer) (URLNormalizationSettings, error) { + uri := fmt.Sprintf("/zones/%s/url_normalization", rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return URLNormalizationSettings{}, err + } + + var urlNormalizationSettingsResponse URLNormalizationSettingsResponse + err = json.Unmarshal(res, &urlNormalizationSettingsResponse) + if err != nil { + return URLNormalizationSettings{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return urlNormalizationSettingsResponse.Result, nil +} + +// UpdateURLNormalizationSettings https://api.cloudflare.com/#url-normalization-update-url-normalization-settings +func (api *API) UpdateURLNormalizationSettings(ctx context.Context, rc *ResourceContainer, params URLNormalizationSettingsUpdateParams) (URLNormalizationSettings, error) { + uri := fmt.Sprintf("/zones/%s/url_normalization", rc.Identifier) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return URLNormalizationSettings{}, err + } + + var urlNormalizationSettingsResponse URLNormalizationSettingsResponse + err = json.Unmarshal(res, &urlNormalizationSettingsResponse) + if err != nil { + return URLNormalizationSettings{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return urlNormalizationSettingsResponse.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/user.go b/vendor/github.com/cloudflare/cloudflare-go/user.go new file mode 100644 index 0000000..90feb89 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/user.go @@ -0,0 +1,161 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// User describes a user account. +type User struct { + ID string `json:"id,omitempty"` + Email string `json:"email,omitempty"` + FirstName string `json:"first_name,omitempty"` + LastName string `json:"last_name,omitempty"` + Username string `json:"username,omitempty"` + Telephone string `json:"telephone,omitempty"` + Country string `json:"country,omitempty"` + Zipcode string `json:"zipcode,omitempty"` + CreatedOn *time.Time `json:"created_on,omitempty"` + ModifiedOn *time.Time `json:"modified_on,omitempty"` + APIKey string `json:"api_key,omitempty"` + TwoFA bool `json:"two_factor_authentication_enabled,omitempty"` + Betas []string `json:"betas,omitempty"` + Accounts []Account `json:"organizations,omitempty"` +} + +// UserResponse wraps a response containing User accounts. +type UserResponse struct { + Response + Result User `json:"result"` +} + +// userBillingProfileResponse wraps a response containing Billing Profile information. +type userBillingProfileResponse struct { + Response + Result UserBillingProfile +} + +// UserBillingProfile contains Billing Profile information. +type UserBillingProfile struct { + ID string `json:"id,omitempty"` + FirstName string `json:"first_name,omitempty"` + LastName string `json:"last_name,omitempty"` + Address string `json:"address,omitempty"` + Address2 string `json:"address2,omitempty"` + Company string `json:"company,omitempty"` + City string `json:"city,omitempty"` + State string `json:"state,omitempty"` + ZipCode string `json:"zipcode,omitempty"` + Country string `json:"country,omitempty"` + Telephone string `json:"telephone,omitempty"` + CardNumber string `json:"card_number,omitempty"` + CardExpiryYear int `json:"card_expiry_year,omitempty"` + CardExpiryMonth int `json:"card_expiry_month,omitempty"` + VAT string `json:"vat,omitempty"` + CreatedOn *time.Time `json:"created_on,omitempty"` + EditedOn *time.Time `json:"edited_on,omitempty"` +} + +type UserBillingHistoryResponse struct { + Response + Result []UserBillingHistory `json:"result"` + ResultInfo ResultInfo `json:"result_info"` +} + +type UserBillingHistory struct { + ID string `json:"id,omitempty"` + Type string `json:"type,omitempty"` + Action string `json:"action,omitempty"` + Description string `json:"description,omitempty"` + OccurredAt *time.Time `json:"occurred_at,omitempty"` + Amount float32 `json:"amount,omitempty"` + Currency string `json:"currency,omitempty"` + Zone userBillingHistoryZone `json:"zone"` +} + +type userBillingHistoryZone struct { + Name string `json:"name,omitempty"` +} + +type UserBillingOptions struct { + PaginationOptions + Order string `url:"order,omitempty"` + Type string `url:"type,omitempty"` + OccurredAt *time.Time `url:"occurred_at,omitempty"` + Action string `url:"action,omitempty"` +} + +// UserDetails provides information about the logged-in user. +// +// API reference: https://api.cloudflare.com/#user-user-details +func (api *API) UserDetails(ctx context.Context) (User, error) { + var r UserResponse + res, err := api.makeRequestContext(ctx, http.MethodGet, "/user", nil) + if err != nil { + return User{}, err + } + + err = json.Unmarshal(res, &r) + if err != nil { + return User{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// UpdateUser updates the properties of the given user. +// +// API reference: https://api.cloudflare.com/#user-update-user +func (api *API) UpdateUser(ctx context.Context, user *User) (User, error) { + var r UserResponse + res, err := api.makeRequestContext(ctx, http.MethodPatch, "/user", user) + if err != nil { + return User{}, err + } + + err = json.Unmarshal(res, &r) + if err != nil { + return User{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// UserBillingProfile returns the billing profile of the user. +// +// API reference: https://api.cloudflare.com/#user-billing-profile +func (api *API) UserBillingProfile(ctx context.Context) (UserBillingProfile, error) { + var r userBillingProfileResponse + res, err := api.makeRequestContext(ctx, http.MethodGet, "/user/billing/profile", nil) + if err != nil { + return UserBillingProfile{}, err + } + + err = json.Unmarshal(res, &r) + if err != nil { + return UserBillingProfile{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// UserBillingHistory return the billing history of the user +// +// API reference: https://api.cloudflare.com/#user-billing-history-billing-history-details +func (api *API) UserBillingHistory(ctx context.Context, pageOpts UserBillingOptions) ([]UserBillingHistory, error) { + uri := buildURI("/user/billing/history", pageOpts) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []UserBillingHistory{}, err + } + var r UserBillingHistoryResponse + err = json.Unmarshal(res, &r) + if err != nil { + return []UserBillingHistory{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/user_agent.go b/vendor/github.com/cloudflare/cloudflare-go/user_agent.go new file mode 100644 index 0000000..10449bb --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/user_agent.go @@ -0,0 +1,151 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "strconv" + + "github.com/goccy/go-json" +) + +// UserAgentRule represents a User-Agent Block. These rules can be used to +// challenge, block or whitelist specific User-Agents for a given zone. +type UserAgentRule struct { + ID string `json:"id"` + Description string `json:"description"` + Mode string `json:"mode"` + Configuration UserAgentRuleConfig `json:"configuration"` + Paused bool `json:"paused"` +} + +// UserAgentRuleConfig represents a Zone Lockdown config, which comprises +// a Target ("ip" or "ip_range") and a Value (an IP address or IP+mask, +// respectively.) +type UserAgentRuleConfig ZoneLockdownConfig + +// UserAgentRuleResponse represents a response from the Zone Lockdown endpoint. +type UserAgentRuleResponse struct { + Result UserAgentRule `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +// UserAgentRuleListResponse represents a response from the List Zone Lockdown endpoint. +type UserAgentRuleListResponse struct { + Result []UserAgentRule `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +// CreateUserAgentRule creates a User-Agent Block rule for the given zone ID. +// +// API reference: https://api.cloudflare.com/#user-agent-blocking-rules-create-a-useragent-rule +func (api *API) CreateUserAgentRule(ctx context.Context, zoneID string, ld UserAgentRule) (*UserAgentRuleResponse, error) { + switch ld.Mode { + case "block", "challenge", "js_challenge", "managed_challenge": + break + default: + return nil, errors.New(`the User-Agent Block rule mode must be one of "block", "challenge", "js_challenge", "managed_challenge"`) + } + + uri := fmt.Sprintf("/zones/%s/firewall/ua_rules", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, ld) + if err != nil { + return nil, err + } + + response := &UserAgentRuleResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response, nil +} + +// UpdateUserAgentRule updates a User-Agent Block rule (based on the ID) for the given zone ID. +// +// API reference: https://api.cloudflare.com/#user-agent-blocking-rules-update-useragent-rule +func (api *API) UpdateUserAgentRule(ctx context.Context, zoneID string, id string, ld UserAgentRule) (*UserAgentRuleResponse, error) { + uri := fmt.Sprintf("/zones/%s/firewall/ua_rules/%s", zoneID, id) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, ld) + if err != nil { + return nil, err + } + + response := &UserAgentRuleResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response, nil +} + +// DeleteUserAgentRule deletes a User-Agent Block rule (based on the ID) for the given zone ID. +// +// API reference: https://api.cloudflare.com/#user-agent-blocking-rules-delete-useragent-rule +func (api *API) DeleteUserAgentRule(ctx context.Context, zoneID string, id string) (*UserAgentRuleResponse, error) { + uri := fmt.Sprintf("/zones/%s/firewall/ua_rules/%s", zoneID, id) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return nil, err + } + + response := &UserAgentRuleResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response, nil +} + +// UserAgentRule retrieves a User-Agent Block rule (based on the ID) for the given zone ID. +// +// API reference: https://api.cloudflare.com/#user-agent-blocking-rules-useragent-rule-details +func (api *API) UserAgentRule(ctx context.Context, zoneID string, id string) (*UserAgentRuleResponse, error) { + uri := fmt.Sprintf("/zones/%s/firewall/ua_rules/%s", zoneID, id) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + + response := &UserAgentRuleResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response, nil +} + +// ListUserAgentRules retrieves a list of User-Agent Block rules for a given zone ID by page number. +// +// API reference: https://api.cloudflare.com/#user-agent-blocking-rules-list-useragent-rules +func (api *API) ListUserAgentRules(ctx context.Context, zoneID string, page int) (*UserAgentRuleListResponse, error) { + v := url.Values{} + if page <= 0 { + page = 1 + } + + v.Set("page", strconv.Itoa(page)) + v.Set("per_page", strconv.Itoa(100)) + + uri := fmt.Sprintf("/zones/%s/firewall/ua_rules?%s", zoneID, v.Encode()) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + + response := &UserAgentRuleListResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/utils.go b/vendor/github.com/cloudflare/cloudflare-go/utils.go new file mode 100644 index 0000000..bc2dc56 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/utils.go @@ -0,0 +1,28 @@ +package cloudflare + +import ( + "fmt" + "net/url" + "os" + "path/filepath" + + "github.com/google/go-querystring/query" +) + +// buildURI assembles the base path and queries. +func buildURI(path string, options interface{}) string { + v, _ := query.Values(options) + return (&url.URL{Path: path, RawQuery: v.Encode()}).String() +} + +// loadFixture takes a series of path components and returns the JSON fixture at +// that location associated. +func loadFixture(parts ...string) string { + paths := []string{"testdata", "fixtures"} + paths = append(paths, parts...) + b, err := os.ReadFile(filepath.Join(paths...) + ".json") + if err != nil { + fmt.Print(err) + } + return string(b) +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/waf.go b/vendor/github.com/cloudflare/cloudflare-go/waf.go new file mode 100644 index 0000000..c7a458a --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/waf.go @@ -0,0 +1,352 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "net/url" + "strconv" + + "github.com/goccy/go-json" +) + +// WAFPackage represents a WAF package configuration. +type WAFPackage struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + ZoneID string `json:"zone_id"` + DetectionMode string `json:"detection_mode"` + Sensitivity string `json:"sensitivity"` + ActionMode string `json:"action_mode"` +} + +// WAFPackagesResponse represents the response from the WAF packages endpoint. +type WAFPackagesResponse struct { + Response + Result []WAFPackage `json:"result"` + ResultInfo ResultInfo `json:"result_info"` +} + +// WAFPackageResponse represents the response from the WAF package endpoint. +type WAFPackageResponse struct { + Response + Result WAFPackage `json:"result"` + ResultInfo ResultInfo `json:"result_info"` +} + +// WAFPackageOptions represents options to edit a WAF package. +type WAFPackageOptions struct { + Sensitivity string `json:"sensitivity,omitempty"` + ActionMode string `json:"action_mode,omitempty"` +} + +// WAFGroup represents a WAF rule group. +type WAFGroup struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + RulesCount int `json:"rules_count"` + ModifiedRulesCount int `json:"modified_rules_count"` + PackageID string `json:"package_id"` + Mode string `json:"mode"` + AllowedModes []string `json:"allowed_modes"` +} + +// WAFGroupsResponse represents the response from the WAF groups endpoint. +type WAFGroupsResponse struct { + Response + Result []WAFGroup `json:"result"` + ResultInfo ResultInfo `json:"result_info"` +} + +// WAFGroupResponse represents the response from the WAF group endpoint. +type WAFGroupResponse struct { + Response + Result WAFGroup `json:"result"` + ResultInfo ResultInfo `json:"result_info"` +} + +// WAFRule represents a WAF rule. +type WAFRule struct { + ID string `json:"id"` + Description string `json:"description"` + Priority string `json:"priority"` + PackageID string `json:"package_id"` + Group struct { + ID string `json:"id"` + Name string `json:"name"` + } `json:"group"` + Mode string `json:"mode"` + DefaultMode string `json:"default_mode"` + AllowedModes []string `json:"allowed_modes"` +} + +// WAFRulesResponse represents the response from the WAF rules endpoint. +type WAFRulesResponse struct { + Response + Result []WAFRule `json:"result"` + ResultInfo ResultInfo `json:"result_info"` +} + +// WAFRuleResponse represents the response from the WAF rule endpoint. +type WAFRuleResponse struct { + Response + Result WAFRule `json:"result"` + ResultInfo ResultInfo `json:"result_info"` +} + +// WAFRuleOptions is a subset of WAFRule, for editable options. +type WAFRuleOptions struct { + Mode string `json:"mode"` +} + +// ListWAFPackages returns a slice of the WAF packages for the given zone. +// +// API Reference: https://api.cloudflare.com/#waf-rule-packages-list-firewall-packages +func (api *API) ListWAFPackages(ctx context.Context, zoneID string) ([]WAFPackage, error) { + // Construct a query string + v := url.Values{} + // Request as many WAF packages as possible per page - API max is 100 + v.Set("per_page", "100") + + var packages []WAFPackage + var res []byte + var err error + page := 1 + + // Loop over makeRequest until what we've fetched all records + for { + v.Set("page", strconv.Itoa(page)) + uri := fmt.Sprintf("/zones/%s/firewall/waf/packages?%s", zoneID, v.Encode()) + res, err = api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []WAFPackage{}, err + } + + var p WAFPackagesResponse + err = json.Unmarshal(res, &p) + if err != nil { + return []WAFPackage{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + if !p.Success { + // TODO: Provide an actual error message instead of always returning nil + return []WAFPackage{}, err + } + + packages = append(packages, p.Result...) + if p.ResultInfo.Page >= p.ResultInfo.TotalPages { + break + } + + // Loop around and fetch the next page + page++ + } + + return packages, nil +} + +// WAFPackage returns a WAF package for the given zone. +// +// API Reference: https://api.cloudflare.com/#waf-rule-packages-firewall-package-details +func (api *API) WAFPackage(ctx context.Context, zoneID, packageID string) (WAFPackage, error) { + uri := fmt.Sprintf("/zones/%s/firewall/waf/packages/%s", zoneID, packageID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return WAFPackage{}, err + } + + var r WAFPackageResponse + err = json.Unmarshal(res, &r) + if err != nil { + return WAFPackage{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// UpdateWAFPackage lets you update the a WAF Package. +// +// API Reference: https://api.cloudflare.com/#waf-rule-packages-edit-firewall-package +func (api *API) UpdateWAFPackage(ctx context.Context, zoneID, packageID string, opts WAFPackageOptions) (WAFPackage, error) { + uri := fmt.Sprintf("/zones/%s/firewall/waf/packages/%s", zoneID, packageID) + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, opts) + if err != nil { + return WAFPackage{}, err + } + + var r WAFPackageResponse + err = json.Unmarshal(res, &r) + if err != nil { + return WAFPackage{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// ListWAFGroups returns a slice of the WAF groups for the given WAF package. +// +// API Reference: https://api.cloudflare.com/#waf-rule-groups-list-rule-groups +func (api *API) ListWAFGroups(ctx context.Context, zoneID, packageID string) ([]WAFGroup, error) { + // Construct a query string + v := url.Values{} + // Request as many WAF groups as possible per page - API max is 100 + v.Set("per_page", "100") + + var groups []WAFGroup + var res []byte + var err error + page := 1 + + // Loop over makeRequest until what we've fetched all records + for { + v.Set("page", strconv.Itoa(page)) + uri := fmt.Sprintf("/zones/%s/firewall/waf/packages/%s/groups?%s", zoneID, packageID, v.Encode()) + res, err = api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []WAFGroup{}, err + } + + var r WAFGroupsResponse + err = json.Unmarshal(res, &r) + if err != nil { + return []WAFGroup{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + if !r.Success { + // TODO: Provide an actual error message instead of always returning nil + return []WAFGroup{}, err + } + + groups = append(groups, r.Result...) + if r.ResultInfo.Page >= r.ResultInfo.TotalPages { + break + } + + // Loop around and fetch the next page + page++ + } + return groups, nil +} + +// WAFGroup returns a WAF rule group from the given WAF package. +// +// API Reference: https://api.cloudflare.com/#waf-rule-groups-rule-group-details +func (api *API) WAFGroup(ctx context.Context, zoneID, packageID, groupID string) (WAFGroup, error) { + uri := fmt.Sprintf("/zones/%s/firewall/waf/packages/%s/groups/%s", zoneID, packageID, groupID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return WAFGroup{}, err + } + + var r WAFGroupResponse + err = json.Unmarshal(res, &r) + if err != nil { + return WAFGroup{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// UpdateWAFGroup lets you update the mode of a WAF Group. +// +// API Reference: https://api.cloudflare.com/#waf-rule-groups-edit-rule-group +func (api *API) UpdateWAFGroup(ctx context.Context, zoneID, packageID, groupID, mode string) (WAFGroup, error) { + opts := WAFRuleOptions{Mode: mode} + uri := fmt.Sprintf("/zones/%s/firewall/waf/packages/%s/groups/%s", zoneID, packageID, groupID) + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, opts) + if err != nil { + return WAFGroup{}, err + } + + var r WAFGroupResponse + err = json.Unmarshal(res, &r) + if err != nil { + return WAFGroup{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// ListWAFRules returns a slice of the WAF rules for the given WAF package. +// +// API Reference: https://api.cloudflare.com/#waf-rules-list-rules +func (api *API) ListWAFRules(ctx context.Context, zoneID, packageID string) ([]WAFRule, error) { + // Construct a query string + v := url.Values{} + // Request as many WAF rules as possible per page - API max is 100 + v.Set("per_page", "100") + + var rules []WAFRule + var res []byte + var err error + page := 1 + + // Loop over makeRequest until what we've fetched all records + for { + v.Set("page", strconv.Itoa(page)) + uri := fmt.Sprintf("/zones/%s/firewall/waf/packages/%s/rules?%s", zoneID, packageID, v.Encode()) + res, err = api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []WAFRule{}, err + } + + var r WAFRulesResponse + err = json.Unmarshal(res, &r) + if err != nil { + return []WAFRule{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + if !r.Success { + // TODO: Provide an actual error message instead of always returning nil + return []WAFRule{}, err + } + + rules = append(rules, r.Result...) + if r.ResultInfo.Page >= r.ResultInfo.TotalPages { + break + } + + // Loop around and fetch the next page + page++ + } + + return rules, nil +} + +// WAFRule returns a WAF rule from the given WAF package. +// +// API Reference: https://api.cloudflare.com/#waf-rules-rule-details +func (api *API) WAFRule(ctx context.Context, zoneID, packageID, ruleID string) (WAFRule, error) { + uri := fmt.Sprintf("/zones/%s/firewall/waf/packages/%s/rules/%s", zoneID, packageID, ruleID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return WAFRule{}, err + } + + var r WAFRuleResponse + err = json.Unmarshal(res, &r) + if err != nil { + return WAFRule{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// UpdateWAFRule lets you update the mode of a WAF Rule. +// +// API Reference: https://api.cloudflare.com/#waf-rules-edit-rule +func (api *API) UpdateWAFRule(ctx context.Context, zoneID, packageID, ruleID, mode string) (WAFRule, error) { + opts := WAFRuleOptions{Mode: mode} + uri := fmt.Sprintf("/zones/%s/firewall/waf/packages/%s/rules/%s", zoneID, packageID, ruleID) + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, opts) + if err != nil { + return WAFRule{}, err + } + + var r WAFRuleResponse + err = json.Unmarshal(res, &r) + if err != nil { + return WAFRule{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/waf_overrides.go b/vendor/github.com/cloudflare/cloudflare-go/waf_overrides.go new file mode 100644 index 0000000..665cb61 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/waf_overrides.go @@ -0,0 +1,138 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +// WAFOverridesResponse represents the response form the WAF overrides endpoint. +type WAFOverridesResponse struct { + Response + Result []WAFOverride `json:"result"` + ResultInfo ResultInfo `json:"result_info"` +} + +// WAFOverrideResponse represents the response form the WAF override endpoint. +type WAFOverrideResponse struct { + Response + Result WAFOverride `json:"result"` + ResultInfo ResultInfo `json:"result_info"` +} + +// WAFOverride represents a WAF override. +type WAFOverride struct { + ID string `json:"id,omitempty"` + Description string `json:"description"` + URLs []string `json:"urls"` + Priority int `json:"priority"` + Groups map[string]string `json:"groups"` + RewriteAction map[string]string `json:"rewrite_action"` + Rules map[string]string `json:"rules"` + Paused bool `json:"paused"` +} + +// ListWAFOverrides returns a slice of the WAF overrides. +// +// API Reference: https://api.cloudflare.com/#waf-overrides-list-uri-controlled-waf-configurations +func (api *API) ListWAFOverrides(ctx context.Context, zoneID string) ([]WAFOverride, error) { + var overrides []WAFOverride + var res []byte + var err error + + uri := fmt.Sprintf("/zones/%s/firewall/waf/overrides", zoneID) + res, err = api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []WAFOverride{}, err + } + + var r WAFOverridesResponse + err = json.Unmarshal(res, &r) + if err != nil { + return []WAFOverride{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + if !r.Success { + // TODO: Provide an actual error message instead of always returning nil + return []WAFOverride{}, err + } + + for ri := range r.Result { + overrides = append(overrides, r.Result[ri]) + } + return overrides, nil +} + +// WAFOverride returns a WAF override from the given override ID. +// +// API Reference: https://api.cloudflare.com/#waf-overrides-uri-controlled-waf-configuration-details +func (api *API) WAFOverride(ctx context.Context, zoneID, overrideID string) (WAFOverride, error) { + uri := fmt.Sprintf("/zones/%s/firewall/waf/overrides/%s", zoneID, overrideID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return WAFOverride{}, err + } + + var r WAFOverrideResponse + err = json.Unmarshal(res, &r) + if err != nil { + return WAFOverride{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// CreateWAFOverride creates a new WAF override. +// +// API reference: https://api.cloudflare.com/#waf-overrides-create-a-uri-controlled-waf-configuration +func (api *API) CreateWAFOverride(ctx context.Context, zoneID string, override WAFOverride) (WAFOverride, error) { + uri := fmt.Sprintf("/zones/%s/firewall/waf/overrides", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, override) + if err != nil { + return WAFOverride{}, err + } + var r WAFOverrideResponse + if err := json.Unmarshal(res, &r); err != nil { + return WAFOverride{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// UpdateWAFOverride updates an existing WAF override. +// +// API reference: https://api.cloudflare.com/#waf-overrides-update-uri-controlled-waf-configuration +func (api *API) UpdateWAFOverride(ctx context.Context, zoneID, overrideID string, override WAFOverride) (WAFOverride, error) { + uri := fmt.Sprintf("/zones/%s/firewall/waf/overrides/%s", zoneID, overrideID) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, override) + if err != nil { + return WAFOverride{}, err + } + + var r WAFOverrideResponse + err = json.Unmarshal(res, &r) + if err != nil { + return WAFOverride{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// DeleteWAFOverride deletes a WAF override for a zone. +// +// API reference: https://api.cloudflare.com/#waf-overrides-delete-lockdown-rule +func (api *API) DeleteWAFOverride(ctx context.Context, zoneID, overrideID string) error { + uri := fmt.Sprintf("/zones/%s/firewall/waf/overrides/%s", zoneID, overrideID) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + var r WAFOverrideResponse + err = json.Unmarshal(res, &r) + if err != nil { + return fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/waiting_room.go b/vendor/github.com/cloudflare/cloudflare-go/waiting_room.go new file mode 100644 index 0000000..0cb787b --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/waiting_room.go @@ -0,0 +1,630 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +var ( + ErrMissingWaitingRoomID = errors.New("missing required waiting room ID") + ErrMissingWaitingRoomRuleID = errors.New("missing required waiting room rule ID") +) + +// WaitingRoom describes a WaitingRoom object. +type WaitingRoom struct { + CreatedOn time.Time `json:"created_on,omitempty"` + ModifiedOn time.Time `json:"modified_on,omitempty"` + Path string `json:"path"` + Name string `json:"name"` + Description string `json:"description,omitempty"` + QueueingMethod string `json:"queueing_method,omitempty"` + CustomPageHTML string `json:"custom_page_html,omitempty"` + DefaultTemplateLanguage string `json:"default_template_language,omitempty"` + Host string `json:"host"` + ID string `json:"id,omitempty"` + NewUsersPerMinute int `json:"new_users_per_minute"` + TotalActiveUsers int `json:"total_active_users"` + SessionDuration int `json:"session_duration"` + QueueAll bool `json:"queue_all"` + DisableSessionRenewal bool `json:"disable_session_renewal"` + Suspended bool `json:"suspended"` + JsonResponseEnabled bool `json:"json_response_enabled"` + NextEventPrequeueStartTime *time.Time `json:"next_event_prequeue_start_time,omitempty"` + NextEventStartTime *time.Time `json:"next_event_start_time,omitempty"` + CookieSuffix string `json:"cookie_suffix"` + AdditionalRoutes []*WaitingRoomRoute `json:"additional_routes,omitempty"` + QueueingStatusCode int `json:"queueing_status_code"` + EnabledOriginCommands []string `json:"enabled_origin_commands,omitempty"` +} + +// WaitingRoomStatus describes the status of a waiting room. +type WaitingRoomStatus struct { + Status string `json:"status"` + EventID string `json:"event_id"` + EstimatedQueuedUsers int `json:"estimated_queued_users"` + EstimatedTotalActiveUsers int `json:"estimated_total_active_users"` + MaxEstimatedTimeMinutes int `json:"max_estimated_time_minutes"` +} + +// WaitingRoomEvent describes a WaitingRoomEvent object. +type WaitingRoomEvent struct { + EventEndTime time.Time `json:"event_end_time"` + CreatedOn time.Time `json:"created_on,omitempty"` + ModifiedOn time.Time `json:"modified_on,omitempty"` + PrequeueStartTime *time.Time `json:"prequeue_start_time,omitempty"` + EventStartTime time.Time `json:"event_start_time"` + Name string `json:"name"` + Description string `json:"description,omitempty"` + QueueingMethod string `json:"queueing_method,omitempty"` + ID string `json:"id,omitempty"` + CustomPageHTML string `json:"custom_page_html,omitempty"` + NewUsersPerMinute int `json:"new_users_per_minute,omitempty"` + TotalActiveUsers int `json:"total_active_users,omitempty"` + SessionDuration int `json:"session_duration,omitempty"` + DisableSessionRenewal *bool `json:"disable_session_renewal,omitempty"` + Suspended bool `json:"suspended"` + ShuffleAtEventStart bool `json:"shuffle_at_event_start"` +} + +type WaitingRoomRule struct { + ID string `json:"id,omitempty"` + Version string `json:"version,omitempty"` + Action string `json:"action"` + Expression string `json:"expression"` + Description string `json:"description"` + LastUpdated *time.Time `json:"last_updated,omitempty"` + Enabled *bool `json:"enabled"` +} + +// WaitingRoomSettings describes zone-level waiting room settings. +type WaitingRoomSettings struct { + // Whether to allow verified search engine crawlers to bypass all waiting rooms on this zone + SearchEngineCrawlerBypass bool `json:"search_engine_crawler_bypass"` +} + +// WaitingRoomPagePreviewURL describes a WaitingRoomPagePreviewURL object. +type WaitingRoomPagePreviewURL struct { + PreviewURL string `json:"preview_url"` +} + +// WaitingRoomPagePreviewCustomHTML describes a WaitingRoomPagePreviewCustomHTML object. +type WaitingRoomPagePreviewCustomHTML struct { + CustomHTML string `json:"custom_html"` +} + +// WaitingRoomRoute describes a WaitingRoomRoute object. +type WaitingRoomRoute struct { + Host string `json:"host"` + Path string `json:"path"` +} + +// WaitingRoomDetailResponse is the API response, containing a single WaitingRoom. +type WaitingRoomDetailResponse struct { + Response + Result WaitingRoom `json:"result"` +} + +// WaitingRoomsResponse is the API response, containing an array of WaitingRooms. +type WaitingRoomsResponse struct { + Response + Result []WaitingRoom `json:"result"` +} + +// WaitingRoomSettingsResponse is the API response, containing zone-level Waiting Room settings. +type WaitingRoomSettingsResponse struct { + Response + Result WaitingRoomSettings `json:"result"` +} + +// WaitingRoomStatusResponse is the API response, containing the status of a waiting room. +type WaitingRoomStatusResponse struct { + Response + Result WaitingRoomStatus `json:"result"` +} + +// WaitingRoomPagePreviewResponse is the API response, containing the URL to a custom waiting room preview. +type WaitingRoomPagePreviewResponse struct { + Response + Result WaitingRoomPagePreviewURL `json:"result"` +} + +// WaitingRoomEventDetailResponse is the API response, containing a single WaitingRoomEvent. +type WaitingRoomEventDetailResponse struct { + Response + Result WaitingRoomEvent `json:"result"` +} + +// WaitingRoomEventsResponse is the API response, containing an array of WaitingRoomEvents. +type WaitingRoomEventsResponse struct { + Response + Result []WaitingRoomEvent `json:"result"` +} + +// WaitingRoomRulesResponse is the API response, containing an array of WaitingRoomRule. +type WaitingRoomRulesResponse struct { + Response + Result []WaitingRoomRule `json:"result"` +} + +// CreateWaitingRoom creates a new Waiting Room for a zone. +// +// API reference: https://api.cloudflare.com/#waiting-room-create-waiting-room +func (api *API) CreateWaitingRoom(ctx context.Context, zoneID string, waitingRoom WaitingRoom) (*WaitingRoom, error) { + uri := fmt.Sprintf("/zones/%s/waiting_rooms", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, waitingRoom) + if err != nil { + return nil, err + } + var r WaitingRoomDetailResponse + err = json.Unmarshal(res, &r) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return &r.Result, nil +} + +// ListWaitingRooms returns all Waiting Room for a zone. +// +// API reference: https://api.cloudflare.com/#waiting-room-list-waiting-rooms +func (api *API) ListWaitingRooms(ctx context.Context, zoneID string) ([]WaitingRoom, error) { + uri := fmt.Sprintf("/zones/%s/waiting_rooms", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []WaitingRoom{}, err + } + var r WaitingRoomsResponse + err = json.Unmarshal(res, &r) + if err != nil { + return []WaitingRoom{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// WaitingRoom fetches detail about one Waiting room for a zone. +// +// API reference: https://api.cloudflare.com/#waiting-room-waiting-room-details +func (api *API) WaitingRoom(ctx context.Context, zoneID, waitingRoomID string) (WaitingRoom, error) { + uri := fmt.Sprintf("/zones/%s/waiting_rooms/%s", zoneID, waitingRoomID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return WaitingRoom{}, err + } + var r WaitingRoomDetailResponse + err = json.Unmarshal(res, &r) + if err != nil { + return WaitingRoom{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// ChangeWaitingRoom lets you change individual settings for a Waiting room. This is +// in contrast to UpdateWaitingRoom which replaces the entire Waiting room. +// +// API reference: https://api.cloudflare.com/#waiting-room-update-waiting-room +func (api *API) ChangeWaitingRoom(ctx context.Context, zoneID, waitingRoomID string, waitingRoom WaitingRoom) (WaitingRoom, error) { + uri := fmt.Sprintf("/zones/%s/waiting_rooms/%s", zoneID, waitingRoomID) + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, waitingRoom) + if err != nil { + return WaitingRoom{}, err + } + var r WaitingRoomDetailResponse + err = json.Unmarshal(res, &r) + if err != nil { + return WaitingRoom{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// UpdateWaitingRoom lets you replace a Waiting Room. This is in contrast to +// ChangeWaitingRoom which lets you change individual settings. +// +// API reference: https://api.cloudflare.com/#waiting-room-update-waiting-room +func (api *API) UpdateWaitingRoom(ctx context.Context, zoneID string, waitingRoom WaitingRoom) (WaitingRoom, error) { + uri := fmt.Sprintf("/zones/%s/waiting_rooms/%s", zoneID, waitingRoom.ID) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, waitingRoom) + if err != nil { + return WaitingRoom{}, err + } + var r WaitingRoomDetailResponse + err = json.Unmarshal(res, &r) + if err != nil { + return WaitingRoom{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// DeleteWaitingRoom deletes a Waiting Room for a zone. +// +// API reference: https://api.cloudflare.com/#waiting-room-delete-waiting-room +func (api *API) DeleteWaitingRoom(ctx context.Context, zoneID, waitingRoomID string) error { + uri := fmt.Sprintf("/zones/%s/waiting_rooms/%s", zoneID, waitingRoomID) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + var r WaitingRoomDetailResponse + err = json.Unmarshal(res, &r) + if err != nil { + return fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return nil +} + +// WaitingRoomStatus returns the status of one Waiting Room for a zone. +// +// API reference: https://api.cloudflare.com/#waiting-room-get-waiting-room-status +func (api *API) WaitingRoomStatus(ctx context.Context, zoneID, waitingRoomID string) (WaitingRoomStatus, error) { + uri := fmt.Sprintf("/zones/%s/waiting_rooms/%s/status", zoneID, waitingRoomID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return WaitingRoomStatus{}, err + } + var r WaitingRoomStatusResponse + err = json.Unmarshal(res, &r) + if err != nil { + return WaitingRoomStatus{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// WaitingRoomPagePreview uploads a custom waiting room page for preview and +// returns a preview URL. +// +// API reference: https://api.cloudflare.com/#waiting-room-create-a-custom-waiting-room-page-preview +func (api *API) WaitingRoomPagePreview(ctx context.Context, zoneID, customHTML string) (WaitingRoomPagePreviewURL, error) { + uri := fmt.Sprintf("/zones/%s/waiting_rooms/preview", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, WaitingRoomPagePreviewCustomHTML{CustomHTML: customHTML}) + + if err != nil { + return WaitingRoomPagePreviewURL{}, err + } + var r WaitingRoomPagePreviewResponse + err = json.Unmarshal(res, &r) + if err != nil { + return WaitingRoomPagePreviewURL{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// CreateWaitingRoomEvent creates a new event for a Waiting Room. +// +// API reference: https://api.cloudflare.com/#waiting-room-create-event +func (api *API) CreateWaitingRoomEvent(ctx context.Context, zoneID string, waitingRoomID string, waitingRoomEvent WaitingRoomEvent) (*WaitingRoomEvent, error) { + uri := fmt.Sprintf("/zones/%s/waiting_rooms/%s/events", zoneID, waitingRoomID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, waitingRoomEvent) + if err != nil { + return nil, err + } + var r WaitingRoomEventDetailResponse + err = json.Unmarshal(res, &r) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return &r.Result, nil +} + +// ListWaitingRoomEvents returns all Waiting Room Events for a zone. +// +// API reference: https://api.cloudflare.com/#waiting-room-list-events +func (api *API) ListWaitingRoomEvents(ctx context.Context, zoneID string, waitingRoomID string) ([]WaitingRoomEvent, error) { + uri := fmt.Sprintf("/zones/%s/waiting_rooms/%s/events", zoneID, waitingRoomID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []WaitingRoomEvent{}, err + } + var r WaitingRoomEventsResponse + err = json.Unmarshal(res, &r) + if err != nil { + return []WaitingRoomEvent{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// WaitingRoomEvent fetches detail about one Waiting Room Event for a zone. +// +// API reference: https://api.cloudflare.com/#waiting-room-event-details +func (api *API) WaitingRoomEvent(ctx context.Context, zoneID string, waitingRoomID string, eventID string) (WaitingRoomEvent, error) { + uri := fmt.Sprintf("/zones/%s/waiting_rooms/%s/events/%s", zoneID, waitingRoomID, eventID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return WaitingRoomEvent{}, err + } + var r WaitingRoomEventDetailResponse + err = json.Unmarshal(res, &r) + if err != nil { + return WaitingRoomEvent{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// WaitingRoomEventPreview returns an event's configuration as if it was active. +// Inherited fields from the waiting room will be displayed with their current values. +// +// API reference: https://api.cloudflare.com/#waiting-room-preview-active-event-details +func (api *API) WaitingRoomEventPreview(ctx context.Context, zoneID string, waitingRoomID string, eventID string) (WaitingRoomEvent, error) { + uri := fmt.Sprintf("/zones/%s/waiting_rooms/%s/events/%s/details", zoneID, waitingRoomID, eventID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return WaitingRoomEvent{}, err + } + var r WaitingRoomEventDetailResponse + err = json.Unmarshal(res, &r) + if err != nil { + return WaitingRoomEvent{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// ChangeWaitingRoomEvent lets you change individual settings for a Waiting Room Event. This is +// in contrast to UpdateWaitingRoomEvent which replaces the entire Waiting Room Event. +// +// API reference: https://api.cloudflare.com/#waiting-room-patch-event +func (api *API) ChangeWaitingRoomEvent(ctx context.Context, zoneID, waitingRoomID string, waitingRoomEvent WaitingRoomEvent) (WaitingRoomEvent, error) { + uri := fmt.Sprintf("/zones/%s/waiting_rooms/%s/events/%s", zoneID, waitingRoomID, waitingRoomEvent.ID) + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, waitingRoomEvent) + if err != nil { + return WaitingRoomEvent{}, err + } + var r WaitingRoomEventDetailResponse + err = json.Unmarshal(res, &r) + if err != nil { + return WaitingRoomEvent{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// UpdateWaitingRoomEvent lets you replace a Waiting Room Event. This is in contrast to +// ChangeWaitingRoomEvent which lets you change individual settings. +// +// API reference: https://api.cloudflare.com/#waiting-room-update-event +func (api *API) UpdateWaitingRoomEvent(ctx context.Context, zoneID string, waitingRoomID string, waitingRoomEvent WaitingRoomEvent) (WaitingRoomEvent, error) { + uri := fmt.Sprintf("/zones/%s/waiting_rooms/%s/events/%s", zoneID, waitingRoomID, waitingRoomEvent.ID) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, waitingRoomEvent) + if err != nil { + return WaitingRoomEvent{}, err + } + var r WaitingRoomEventDetailResponse + err = json.Unmarshal(res, &r) + if err != nil { + return WaitingRoomEvent{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// DeleteWaitingRoomEvent deletes an event for a Waiting Room. +// +// API reference: https://api.cloudflare.com/#waiting-room-delete-event +func (api *API) DeleteWaitingRoomEvent(ctx context.Context, zoneID string, waitingRoomID string, eventID string) error { + uri := fmt.Sprintf("/zones/%s/waiting_rooms/%s/events/%s", zoneID, waitingRoomID, eventID) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + var r WaitingRoomEventDetailResponse + err = json.Unmarshal(res, &r) + if err != nil { + return fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return nil +} + +type ListWaitingRoomRuleParams struct { + WaitingRoomID string +} + +type CreateWaitingRoomRuleParams struct { + WaitingRoomID string + Rule WaitingRoomRule +} + +type ReplaceWaitingRoomRuleParams struct { + WaitingRoomID string + Rules []WaitingRoomRule +} + +type UpdateWaitingRoomRuleParams struct { + WaitingRoomID string + Rule WaitingRoomRule +} + +type DeleteWaitingRoomRuleParams struct { + WaitingRoomID string + RuleID string +} + +// ListWaitingRoomRules lists all rules for a Waiting Room. +// +// API reference: https://api.cloudflare.com/#waiting-room-list-waiting-room-rules +func (api *API) ListWaitingRoomRules(ctx context.Context, rc *ResourceContainer, params ListWaitingRoomRuleParams) ([]WaitingRoomRule, error) { + if params.WaitingRoomID == "" { + return nil, ErrMissingWaitingRoomID + } + + uri := fmt.Sprintf("/zones/%s/waiting_rooms/%s/rules", rc.Identifier, params.WaitingRoomID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + + var r WaitingRoomRulesResponse + err = json.Unmarshal(res, &r) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// CreateWaitingRoomRule creates a new rule for a Waiting Room. +// +// API reference: https://api.cloudflare.com/#waiting-room-create-waiting-room-rule +func (api *API) CreateWaitingRoomRule(ctx context.Context, rc *ResourceContainer, params CreateWaitingRoomRuleParams) ([]WaitingRoomRule, error) { + if params.WaitingRoomID == "" { + return nil, ErrMissingWaitingRoomID + } + + uri := fmt.Sprintf("/zones/%s/waiting_rooms/%s/rules", rc.Identifier, params.WaitingRoomID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params.Rule) + if err != nil { + return nil, err + } + + var r WaitingRoomRulesResponse + err = json.Unmarshal(res, &r) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// ReplaceWaitingRoomRules replaces all rules for a Waiting Room. +// +// API reference: https://api.cloudflare.com/#waiting-room-replace-waiting-room-rules +func (api *API) ReplaceWaitingRoomRules(ctx context.Context, rc *ResourceContainer, params ReplaceWaitingRoomRuleParams) ([]WaitingRoomRule, error) { + if params.WaitingRoomID == "" { + return nil, ErrMissingWaitingRoomID + } + + uri := fmt.Sprintf("/zones/%s/waiting_rooms/%s/rules", rc.Identifier, params.WaitingRoomID) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params.Rules) + if err != nil { + return nil, err + } + + var r WaitingRoomRulesResponse + err = json.Unmarshal(res, &r) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// UpdateWaitingRoomRule updates a rule for a Waiting Room. +// +// API reference: https://api.cloudflare.com/#waiting-room-patch-waiting-room-rule +func (api *API) UpdateWaitingRoomRule(ctx context.Context, rc *ResourceContainer, params UpdateWaitingRoomRuleParams) ([]WaitingRoomRule, error) { + if params.WaitingRoomID == "" { + return nil, ErrMissingWaitingRoomID + } + + uri := fmt.Sprintf("/zones/%s/waiting_rooms/%s/rules/%s", rc.Identifier, params.WaitingRoomID, params.Rule.ID) + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, params.Rule) + if err != nil { + return nil, err + } + + var r WaitingRoomRulesResponse + err = json.Unmarshal(res, &r) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// DeleteWaitingRoomRule deletes a rule for a Waiting Room. +// +// API reference: https://api.cloudflare.com/#waiting-room-delete-waiting-room-rule +func (api *API) DeleteWaitingRoomRule(ctx context.Context, rc *ResourceContainer, params DeleteWaitingRoomRuleParams) ([]WaitingRoomRule, error) { + if params.WaitingRoomID == "" { + return nil, ErrMissingWaitingRoomID + } + + if params.RuleID == "" { + return nil, ErrMissingWaitingRoomRuleID + } + + uri := fmt.Sprintf("/zones/%s/waiting_rooms/%s/rules/%s", rc.Identifier, params.WaitingRoomID, params.RuleID) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return nil, err + } + + var r WaitingRoomRulesResponse + err = json.Unmarshal(res, &r) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// GetWaitingRoomSettings fetches the Waiting Room zone-level settings for a zone. +// +// API reference: https://api.cloudflare.com/#waiting-room-get-zone-settings +func (api *API) GetWaitingRoomSettings(ctx context.Context, rc *ResourceContainer) (WaitingRoomSettings, error) { + if rc.Level != ZoneRouteLevel { + return WaitingRoomSettings{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + uri := fmt.Sprintf("/zones/%s/waiting_rooms/settings", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return WaitingRoomSettings{}, err + } + var r WaitingRoomSettingsResponse + err = json.Unmarshal(res, &r) + if err != nil { + return WaitingRoomSettings{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +type PatchWaitingRoomSettingsParams struct { + SearchEngineCrawlerBypass *bool `json:"search_engine_crawler_bypass,omitempty"` +} + +// PatchWaitingRoomSettings lets you change individual zone-level Waiting Room settings. This is +// in contrast to UpdateWaitingRoomSettings which replaces all settings. +// +// API reference: https://api.cloudflare.com/#waiting-room-patch-zone-settings +func (api *API) PatchWaitingRoomSettings(ctx context.Context, rc *ResourceContainer, params PatchWaitingRoomSettingsParams) (WaitingRoomSettings, error) { + if rc.Level != ZoneRouteLevel { + return WaitingRoomSettings{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + uri := fmt.Sprintf("/zones/%s/waiting_rooms/settings", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, params) + if err != nil { + return WaitingRoomSettings{}, err + } + var r WaitingRoomSettingsResponse + err = json.Unmarshal(res, &r) + if err != nil { + return WaitingRoomSettings{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +type UpdateWaitingRoomSettingsParams struct { + SearchEngineCrawlerBypass *bool `json:"search_engine_crawler_bypass,omitempty"` +} + +// UpdateWaitingRoomSettings lets you replace all zone-level Waiting Room settings. This is in contrast to +// PatchWaitingRoomSettings which lets you change individual settings. +// +// API reference: https://api.cloudflare.com/#waiting-room-update-zone-settings +func (api *API) UpdateWaitingRoomSettings(ctx context.Context, rc *ResourceContainer, params UpdateWaitingRoomSettingsParams) (WaitingRoomSettings, error) { + if rc.Level != ZoneRouteLevel { + return WaitingRoomSettings{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + uri := fmt.Sprintf("/zones/%s/waiting_rooms/settings", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return WaitingRoomSettings{}, err + } + var r WaitingRoomSettingsResponse + err = json.Unmarshal(res, &r) + if err != nil { + return WaitingRoomSettings{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/web3.go b/vendor/github.com/cloudflare/cloudflare-go/web3.go new file mode 100644 index 0000000..481a533 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/web3.go @@ -0,0 +1,198 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +var ( + // ErrMissingIdentifier is for when identifier is required but missing. + ErrMissingIdentifier = errors.New("identifier required but missing") + // ErrMissingName is for when name is required but missing. + ErrMissingName = errors.New("name required but missing") + // ErrMissingTarget is for when target is required but missing. + ErrMissingTarget = errors.New("target required but missing") +) + +// Web3Hostname represents a web3 hostname. +type Web3Hostname struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Status string `json:"status,omitempty"` + Target string `json:"target,omitempty"` + Dnslink string `json:"dnslink,omitempty"` + CreatedOn *time.Time `json:"created_on,omitempty"` + ModifiedOn *time.Time `json:"modified_on,omitempty"` +} + +// Web3HostnameListParameters represents the parameters for listing web3 hostnames. +type Web3HostnameListParameters struct { + ZoneID string +} + +// Web3HostnameListResponse represents the API response body for listing web3 hostnames. +type Web3HostnameListResponse struct { + Response + Result []Web3Hostname `json:"result"` +} + +// Web3HostnameCreateParameters represents the parameters for creating a web3 hostname. +type Web3HostnameCreateParameters struct { + ZoneID string + Name string `json:"name,omitempty"` + Target string `json:"target,omitempty"` + Description string `json:"description,omitempty"` + DNSLink string `json:"dnslink,omitempty"` +} + +// Web3HostnameResponse represents an API response body for a web3 hostname. +type Web3HostnameResponse struct { + Response + Result Web3Hostname `json:"result,omitempty"` +} + +// Web3HostnameDetailsParameters represents the parameters for getting a single web3 hostname. +type Web3HostnameDetailsParameters struct { + ZoneID string + Identifier string +} + +// Web3HostnameUpdateParameters represents the parameters for editing a web3 hostname. +type Web3HostnameUpdateParameters struct { + ZoneID string + Identifier string + Description string `json:"description,omitempty"` + DNSLink string `json:"dnslink,omitempty"` +} + +// Web3HostnameDeleteResult represents the result of deleting a web3 hostname. +type Web3HostnameDeleteResult struct { + ID string `json:"id,omitempty"` +} + +// Web3HostnameDeleteResponse represents the API response body for deleting a web3 hostname. +type Web3HostnameDeleteResponse struct { + Response + Result Web3HostnameDeleteResult `json:"result,omitempty"` +} + +// ListWeb3Hostnames lists all web3 hostnames. +// +// API Reference: https://api.cloudflare.com/#web3-hostname-list-web3-hostnames +func (api *API) ListWeb3Hostnames(ctx context.Context, params Web3HostnameListParameters) ([]Web3Hostname, error) { + if params.ZoneID == "" { + return []Web3Hostname{}, ErrMissingZoneID + } + + uri := fmt.Sprintf("/zones/%s/web3/hostnames", params.ZoneID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []Web3Hostname{}, err + } + var web3ListResponse Web3HostnameListResponse + if err := json.Unmarshal(res, &web3ListResponse); err != nil { + return []Web3Hostname{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return web3ListResponse.Result, nil +} + +// CreateWeb3Hostname creates a web3 hostname. +// +// API Reference: https://api.cloudflare.com/#web3-hostname-create-web3-hostname +func (api *API) CreateWeb3Hostname(ctx context.Context, params Web3HostnameCreateParameters) (Web3Hostname, error) { + if params.ZoneID == "" { + return Web3Hostname{}, ErrMissingZoneID + } + if params.Name == "" { + return Web3Hostname{}, ErrMissingName + } + if params.Target == "" { + return Web3Hostname{}, ErrMissingTarget + } + + uri := fmt.Sprintf("/zones/%s/web3/hostnames", params.ZoneID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return Web3Hostname{}, err + } + var web3Response Web3HostnameResponse + if err := json.Unmarshal(res, &web3Response); err != nil { + return Web3Hostname{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return web3Response.Result, nil +} + +// GetWeb3Hostname gets a single web3 hostname by identifier. +// +// API Reference: https://api.cloudflare.com/#web3-hostname-web3-hostname-details +func (api *API) GetWeb3Hostname(ctx context.Context, params Web3HostnameDetailsParameters) (Web3Hostname, error) { + if params.ZoneID == "" { + return Web3Hostname{}, ErrMissingZoneID + } + if params.Identifier == "" { + return Web3Hostname{}, ErrMissingIdentifier + } + + uri := fmt.Sprintf("/zones/%s/web3/hostnames/%s", params.ZoneID, params.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return Web3Hostname{}, err + } + var web3Response Web3HostnameResponse + if err := json.Unmarshal(res, &web3Response); err != nil { + return Web3Hostname{}, err + } + return web3Response.Result, nil +} + +// UpdateWeb3Hostname edits a web3 hostname. +// +// API Reference: https://api.cloudflare.com/#web3-hostname-edit-web3-hostname +func (api *API) UpdateWeb3Hostname(ctx context.Context, params Web3HostnameUpdateParameters) (Web3Hostname, error) { + if params.ZoneID == "" { + return Web3Hostname{}, ErrMissingZoneID + } + if params.Identifier == "" { + return Web3Hostname{}, ErrMissingIdentifier + } + + uri := fmt.Sprintf("/zones/%s/web3/hostnames/%s", params.ZoneID, params.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, params) + if err != nil { + return Web3Hostname{}, err + } + var web3Response Web3HostnameResponse + if err := json.Unmarshal(res, &web3Response); err != nil { + return Web3Hostname{}, err + } + return web3Response.Result, nil +} + +// DeleteWeb3Hostname deletes a web3 hostname. +// +// API Reference: https://api.cloudflare.com/#web3-hostname-delete-web3-hostname +func (api *API) DeleteWeb3Hostname(ctx context.Context, params Web3HostnameDetailsParameters) (Web3HostnameDeleteResult, error) { + if params.ZoneID == "" { + return Web3HostnameDeleteResult{}, ErrMissingZoneID + } + if params.Identifier == "" { + return Web3HostnameDeleteResult{}, ErrMissingIdentifier + } + + uri := fmt.Sprintf("/zones/%s/web3/hostnames/%s", params.ZoneID, params.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return Web3HostnameDeleteResult{}, err + } + var web3Response Web3HostnameDeleteResponse + if err := json.Unmarshal(res, &web3Response); err != nil { + return Web3HostnameDeleteResult{}, err + } + return web3Response.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/web_analytics.go b/vendor/github.com/cloudflare/cloudflare-go/web_analytics.go new file mode 100644 index 0000000..9f3041f --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/web_analytics.go @@ -0,0 +1,411 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +var ( + ErrMissingWebAnalyticsSiteTag = errors.New("missing required web analytics site ID") + ErrMissingWebAnalyticsRulesetID = errors.New("missing required web analytics ruleset ID") + ErrMissingWebAnalyticsRuleID = errors.New("missing required web analytics rule ID") + ErrMissingWebAnalyticsSiteHost = errors.New("missing required web analytics host or zone_tag") + ErrConflictingWebAnalyticSiteHost = errors.New("conflicting web analytics host and zone_tag, only one must be specified") +) + +// listWebAnalyticsSitesDefaultPageSize represents the default per_pagesize of the API. +var listWebAnalyticsSitesDefaultPageSize = 10 + +// WebAnalyticsSite describes a Web Analytics Site object. +type WebAnalyticsSite struct { + SiteTag string `json:"site_tag"` + SiteToken string `json:"site_token"` + Created *time.Time `json:"created,omitempty"` + // Snippet is an encoded JS script to insert into your site HTML. + Snippet string `json:"snippet"` + // AutoInstall defines whether Cloudflare will inject the JS snippet automatically for orange-clouded sites. + AutoInstall bool `json:"auto_install"` + Ruleset WebAnalyticsRuleset `json:"ruleset"` + Rules []WebAnalyticsRule `json:"rules"` +} + +// WebAnalyticsRule describes a Web Analytics Rule object. +type WebAnalyticsRule struct { + ID string `json:"id,omitempty"` + Host string `json:"host"` + Paths []string `json:"paths"` + // Inclusive defines whether the rule includes or excludes the matched traffic from being measured in web analytics. + Inclusive bool `json:"inclusive"` + Created *time.Time `json:"created,omitempty"` + // IsPaused defines whether the rule is paused (inactive) or not. + IsPaused bool `json:"is_paused"` + Priority int `json:"priority,omitempty"` +} + +// CreateWebAnalyticsRule describes the properties required to create or update a Web Analytics Rule object. +type CreateWebAnalyticsRule struct { + ID string `json:"id,omitempty"` + Host string `json:"host"` + Paths []string `json:"paths"` + // Inclusive defines whether the rule includes or excludes the matched traffic from being measured in web analytics. + Inclusive bool `json:"inclusive"` + IsPaused bool `json:"is_paused"` +} + +// WebAnalyticsRuleset describes a Web Analytics Ruleset object. +type WebAnalyticsRuleset struct { + ID string `json:"id"` + ZoneTag string `json:"zone_tag"` + ZoneName string `json:"zone_name"` + Enabled bool `json:"enabled"` +} + +// WebAnalyticsSiteResponse is the API response, containing a single WebAnalyticsSite. +type WebAnalyticsSiteResponse struct { + Response + Result WebAnalyticsSite `json:"result"` +} + +// WebAnalyticsSitesResponse is the API response, containing an array of WebAnalyticsSite. +type WebAnalyticsSitesResponse struct { + Response + ResultInfo ResultInfo `json:"result_info"` + Result []WebAnalyticsSite `json:"result"` +} + +// WebAnalyticsRuleResponse is the API response, containing a single WebAnalyticsRule. +type WebAnalyticsRuleResponse struct { + Response + Result WebAnalyticsRule `json:"result"` +} + +type WebAnalyticsRulesetRules struct { + Ruleset WebAnalyticsRuleset `json:"ruleset"` + Rules []WebAnalyticsRule `json:"rules"` +} + +// WebAnalyticsRulesResponse is the API response, containing a WebAnalyticsRuleset and array of WebAnalyticsRule. +type WebAnalyticsRulesResponse struct { + Response + Result WebAnalyticsRulesetRules `json:"result"` +} + +// WebAnalyticsIDResponse is the API response, containing a single ID. +type WebAnalyticsIDResponse struct { + Response + Result struct { + ID string `json:"id"` + } `json:"result"` +} + +// WebAnalyticsSiteTagResponse is the API response, containing a single ID. +type WebAnalyticsSiteTagResponse struct { + Response + Result struct { + SiteTag string `json:"site_tag"` + } `json:"result"` +} + +type CreateWebAnalyticsSiteParams struct { + // Host is the host to measure traffic for. + Host string `json:"host,omitempty"` + // ZoneTag is the zone tag to measure traffic for. + ZoneTag string `json:"zone_tag,omitempty"` + // AutoInstall defines whether Cloudflare will inject the JS snippet automatically for orange-clouded sites. + AutoInstall *bool `json:"auto_install"` +} + +// CreateWebAnalyticsSite creates a new Web Analytics Site for an Account. +// +// API reference: https://api.cloudflare.com/#web-analytics-create-site +func (api *API) CreateWebAnalyticsSite(ctx context.Context, rc *ResourceContainer, params CreateWebAnalyticsSiteParams) (*WebAnalyticsSite, error) { + if rc.Level != AccountRouteLevel { + return nil, ErrRequiredAccountLevelResourceContainer + } + if params.Host == "" && params.ZoneTag == "" { + return nil, ErrMissingWebAnalyticsSiteHost + } + if params.Host != "" && params.ZoneTag != "" { + return nil, ErrConflictingWebAnalyticSiteHost + } + if params.AutoInstall == nil { + // default auto_install to true for orange-clouded zones (zone_tag is specified) + params.AutoInstall = BoolPtr(params.ZoneTag != "") + } + uri := fmt.Sprintf("/accounts/%s/rum/site_info", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return nil, err + } + var r WebAnalyticsSiteResponse + err = json.Unmarshal(res, &r) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return &r.Result, nil +} + +type ListWebAnalyticsSitesParams struct { + ResultInfo + // Property to order Sites by, "host" or "created". + OrderBy string `url:"order_by,omitempty"` +} + +// ListWebAnalyticsSites returns all Web Analytics Sites of an Account. +// +// API reference: https://api.cloudflare.com/#web-analytics-list-sites +func (api *API) ListWebAnalyticsSites(ctx context.Context, rc *ResourceContainer, params ListWebAnalyticsSitesParams) ([]WebAnalyticsSite, *ResultInfo, error) { + if rc.Level != AccountRouteLevel { + return nil, nil, ErrRequiredAccountLevelResourceContainer + } + + autoPaginate := true + if params.PerPage >= 1 || params.Page >= 1 { + autoPaginate = false + } + if params.PerPage < 1 { + params.PerPage = listWebAnalyticsSitesDefaultPageSize + } + + if params.Page < 1 { + params.Page = 1 + } + + var sites []WebAnalyticsSite + var lastResultInfo ResultInfo + + for { + uri := buildURI(fmt.Sprintf("/accounts/%s/rum/site_info/list", rc.Identifier), params) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, nil, err + } + var r WebAnalyticsSitesResponse + err = json.Unmarshal(res, &r) + if err != nil { + return nil, nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + sites = append(sites, r.Result...) + lastResultInfo = r.ResultInfo + params.ResultInfo = r.ResultInfo.Next() + if params.ResultInfo.Done() || !autoPaginate { + break + } + } + return sites, &lastResultInfo, nil +} + +type GetWebAnalyticsSiteParams struct { + SiteTag string +} + +// GetWebAnalyticsSite fetches detail about one Web Analytics Site for an Account. +// +// API reference: https://api.cloudflare.com/#web-analytics-get-site +func (api *API) GetWebAnalyticsSite(ctx context.Context, rc *ResourceContainer, params GetWebAnalyticsSiteParams) (*WebAnalyticsSite, error) { + if rc.Level != AccountRouteLevel { + return nil, ErrRequiredAccountLevelResourceContainer + } + if params.SiteTag == "" { + return nil, ErrMissingWebAnalyticsSiteTag + } + uri := fmt.Sprintf("/accounts/%s/rum/site_info/%s", rc.Identifier, params.SiteTag) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + var r WebAnalyticsSiteResponse + err = json.Unmarshal(res, &r) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return &r.Result, nil +} + +type UpdateWebAnalyticsSiteParams struct { + SiteTag string `json:"-"` + // Host is the host to measure traffic for. + Host string `json:"host,omitempty"` + // ZoneTag is the zone tag to measure traffic for. + ZoneTag string `json:"zone_tag,omitempty"` + // AutoInstall defines whether Cloudflare will inject the JS snippet automatically for orange-clouded sites. + AutoInstall *bool `json:"auto_install"` +} + +// UpdateWebAnalyticsSite updates an existing Web Analytics Site for an Account. +// +// API reference: https://api.cloudflare.com/#web-analytics-update-site +func (api *API) UpdateWebAnalyticsSite(ctx context.Context, rc *ResourceContainer, params UpdateWebAnalyticsSiteParams) (*WebAnalyticsSite, error) { + if rc.Level != AccountRouteLevel { + return nil, ErrRequiredAccountLevelResourceContainer + } + if params.SiteTag == "" { + return nil, ErrMissingWebAnalyticsSiteTag + } + if params.AutoInstall == nil { + // default auto_install to true for orange-clouded zones (zone_tag is specified) + params.AutoInstall = BoolPtr(params.ZoneTag != "") + } + uri := fmt.Sprintf("/accounts/%s/rum/site_info/%s", rc.Identifier, params.SiteTag) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return nil, err + } + var r WebAnalyticsSiteResponse + err = json.Unmarshal(res, &r) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return &r.Result, nil +} + +type DeleteWebAnalyticsSiteParams struct { + SiteTag string +} + +// DeleteWebAnalyticsSite deletes an existing Web Analytics Site for an Account. +// +// API reference: https://api.cloudflare.com/#web-analytics-delete-site +func (api *API) DeleteWebAnalyticsSite(ctx context.Context, rc *ResourceContainer, params DeleteWebAnalyticsSiteParams) (*string, error) { + if rc.Level != AccountRouteLevel { + return nil, ErrRequiredAccountLevelResourceContainer + } + if params.SiteTag == "" { + return nil, ErrMissingWebAnalyticsSiteTag + } + uri := fmt.Sprintf("/accounts/%s/rum/site_info/%s", rc.Identifier, params.SiteTag) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return nil, err + } + var r WebAnalyticsSiteResponse + err = json.Unmarshal(res, &r) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return &r.Result.SiteTag, nil +} + +type CreateWebAnalyticsRuleParams struct { + RulesetID string + Rule CreateWebAnalyticsRule +} + +// CreateWebAnalyticsRule creates a new Web Analytics Rule in a Web Analytics ruleset. +// +// API reference: https://api.cloudflare.com/#web-analytics-create-rule +func (api *API) CreateWebAnalyticsRule(ctx context.Context, rc *ResourceContainer, params CreateWebAnalyticsRuleParams) (*WebAnalyticsRule, error) { + if rc.Level != AccountRouteLevel { + return nil, ErrRequiredAccountLevelResourceContainer + } + if params.RulesetID == "" { + return nil, ErrMissingWebAnalyticsRulesetID + } + uri := fmt.Sprintf("/accounts/%s/rum/v2/%s/rule", rc.Identifier, params.RulesetID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params.Rule) + if err != nil { + return nil, err + } + var r WebAnalyticsRuleResponse + err = json.Unmarshal(res, &r) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return &r.Result, nil +} + +type ListWebAnalyticsRulesParams struct { + RulesetID string +} + +// ListWebAnalyticsRules fetches all Web Analytics Rules in a Web Analytics ruleset. +// +// API reference: https://api.cloudflare.com/#web-analytics-list-rules +func (api *API) ListWebAnalyticsRules(ctx context.Context, rc *ResourceContainer, params ListWebAnalyticsRulesParams) (*WebAnalyticsRulesetRules, error) { + if rc.Level != AccountRouteLevel { + return nil, ErrRequiredAccountLevelResourceContainer + } + if params.RulesetID == "" { + return nil, ErrMissingWebAnalyticsRulesetID + } + uri := fmt.Sprintf("/accounts/%s/rum/v2/%s/rules", rc.Identifier, params.RulesetID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + var r WebAnalyticsRulesResponse + err = json.Unmarshal(res, &r) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return &r.Result, nil +} + +type DeleteWebAnalyticsRuleParams struct { + RulesetID string + RuleID string +} + +// DeleteWebAnalyticsRule deletes an existing Web Analytics Rule from a Web Analytics ruleset. +// +// API reference: https://api.cloudflare.com/#web-analytics-delete-rule +func (api *API) DeleteWebAnalyticsRule(ctx context.Context, rc *ResourceContainer, params DeleteWebAnalyticsRuleParams) (*string, error) { + if rc.Level != AccountRouteLevel { + return nil, ErrRequiredAccountLevelResourceContainer + } + if params.RulesetID == "" { + return nil, ErrMissingWebAnalyticsRulesetID + } + if params.RuleID == "" { + return nil, ErrMissingWebAnalyticsRuleID + } + uri := fmt.Sprintf("/accounts/%s/rum/v2/%s/rule/%s", rc.Identifier, params.RulesetID, params.RuleID) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return nil, err + } + var r WebAnalyticsIDResponse + err = json.Unmarshal(res, &r) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return &r.Result.ID, nil +} + +type UpdateWebAnalyticsRuleParams struct { + RulesetID string + RuleID string + Rule CreateWebAnalyticsRule +} + +// UpdateWebAnalyticsRule updates a Web Analytics Rule in a Web Analytics ruleset. +// +// API reference: https://api.cloudflare.com/#web-analytics-update-rule +func (api *API) UpdateWebAnalyticsRule(ctx context.Context, rc *ResourceContainer, params UpdateWebAnalyticsRuleParams) (*WebAnalyticsRule, error) { + if rc.Level != AccountRouteLevel { + return nil, ErrRequiredAccountLevelResourceContainer + } + if params.RulesetID == "" { + return nil, ErrMissingWebAnalyticsRulesetID + } + if params.RuleID == "" { + return nil, ErrMissingWebAnalyticsRuleID + } + uri := fmt.Sprintf("/accounts/%s/rum/v2/%s/rule/%s", rc.Identifier, params.RulesetID, params.RuleID) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params.Rule) + if err != nil { + return nil, err + } + var r WebAnalyticsRuleResponse + err = json.Unmarshal(res, &r) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return &r.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/workers.go b/vendor/github.com/cloudflare/cloudflare-go/workers.go new file mode 100644 index 0000000..2ffc492 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/workers.go @@ -0,0 +1,631 @@ +package cloudflare + +import ( + "bytes" + "context" + "fmt" + "io" + "mime" + "mime/multipart" + "net/http" + "net/textproto" + "strings" + "time" + + "github.com/goccy/go-json" +) + +// WorkerRequestParams provides parameters for worker requests for both enterprise and standard requests. +type WorkerRequestParams struct { + ZoneID string + ScriptName string +} + +type CreateWorkerParams struct { + ScriptName string + Script string + + // DispatchNamespaceName uploads the worker to a WFP dispatch namespace if provided + DispatchNamespaceName *string + + // Module changes the Content-Type header to specify the script is an + // ES Module syntax script. + Module bool + + // Logpush opts the worker into Workers Logpush logging. A nil value leaves + // the current setting unchanged. + // + // Documentation: https://developers.cloudflare.com/workers/platform/logpush/ + Logpush *bool + + // TailConsumers specifies a list of Workers that will consume the logs of + // the attached Worker. + // Documentation: https://developers.cloudflare.com/workers/platform/tail-workers/ + TailConsumers *[]WorkersTailConsumer + + // Bindings should be a map where the keys are the binding name, and the + // values are the binding content + Bindings map[string]WorkerBinding + + // CompatibilityDate is a date in the form yyyy-mm-dd, + // which will be used to determine which version of the Workers runtime is used. + // https://developers.cloudflare.com/workers/platform/compatibility-dates/ + CompatibilityDate string + + // CompatibilityFlags are the names of features of the Workers runtime to be enabled or disabled, + // usually used together with CompatibilityDate. + // https://developers.cloudflare.com/workers/platform/compatibility-dates/#compatibility-flags + CompatibilityFlags []string + + Placement *Placement + + // Tags are used to better manage CRUD operations at scale. + // https://developers.cloudflare.com/cloudflare-for-platforms/workers-for-platforms/platform/tags/ + Tags []string +} + +func (p CreateWorkerParams) RequiresMultipart() bool { + switch { + case p.Module: + return true + case p.Logpush != nil: + return true + case p.Placement != nil: + return true + case len(p.Bindings) > 0: + return true + case p.CompatibilityDate != "": + return true + case len(p.CompatibilityFlags) > 0: + return true + case p.TailConsumers != nil: + return true + case len(p.Tags) > 0: + return true + } + + return false +} + +type UpdateWorkersScriptContentParams struct { + ScriptName string + Script string + + // DispatchNamespaceName uploads the worker to a WFP dispatch namespace if provided + DispatchNamespaceName *string + + // Module changes the Content-Type header to specify the script is an + // ES Module syntax script. + Module bool +} + +type UpdateWorkersScriptSettingsParams struct { + ScriptName string + + // Logpush opts the worker into Workers Logpush logging. A nil value leaves + // the current setting unchanged. + // + // Documentation: https://developers.cloudflare.com/workers/platform/logpush/ + Logpush *bool + + // TailConsumers specifies a list of Workers that will consume the logs of + // the attached Worker. + // Documentation: https://developers.cloudflare.com/workers/platform/tail-workers/ + TailConsumers *[]WorkersTailConsumer + + // Bindings should be a map where the keys are the binding name, and the + // values are the binding content + Bindings map[string]WorkerBinding + + // CompatibilityDate is a date in the form yyyy-mm-dd, + // which will be used to determine which version of the Workers runtime is used. + // https://developers.cloudflare.com/workers/platform/compatibility-dates/ + CompatibilityDate string + + // CompatibilityFlags are the names of features of the Workers runtime to be enabled or disabled, + // usually used together with CompatibilityDate. + // https://developers.cloudflare.com/workers/platform/compatibility-dates/#compatibility-flags + CompatibilityFlags []string + + Placement *Placement +} + +// WorkerScriptParams provides a worker script and the associated bindings. +type WorkerScriptParams struct { + ScriptName string + + // Module changes the Content-Type header to specify the script is an + // ES Module syntax script. + Module bool + + // Bindings should be a map where the keys are the binding name, and the + // values are the binding content + Bindings map[string]WorkerBinding +} + +// WorkerRoute is used to map traffic matching a URL pattern to a workers +// +// API reference: https://api.cloudflare.com/#worker-routes-properties +type WorkerRoute struct { + ID string `json:"id,omitempty"` + Pattern string `json:"pattern"` + ScriptName string `json:"script,omitempty"` +} + +// WorkerRoutesResponse embeds Response struct and slice of WorkerRoutes. +type WorkerRoutesResponse struct { + Response + Routes []WorkerRoute `json:"result"` +} + +// WorkerRouteResponse embeds Response struct and a single WorkerRoute. +type WorkerRouteResponse struct { + Response + WorkerRoute `json:"result"` +} + +// WorkerScript Cloudflare Worker struct with metadata. +type WorkerScript struct { + WorkerMetaData + Script string `json:"script"` + UsageModel string `json:"usage_model,omitempty"` +} + +type WorkersTailConsumer struct { + Service string `json:"service"` + Environment *string `json:"environment,omitempty"` + Namespace *string `json:"namespace,omitempty"` +} + +// WorkerMetaData contains worker script information such as size, creation & modification dates. +type WorkerMetaData struct { + ID string `json:"id,omitempty"` + ETAG string `json:"etag,omitempty"` + Size int `json:"size,omitempty"` + CreatedOn time.Time `json:"created_on,omitempty"` + ModifiedOn time.Time `json:"modified_on,omitempty"` + Logpush *bool `json:"logpush,omitempty"` + TailConsumers *[]WorkersTailConsumer `json:"tail_consumers,omitempty"` + LastDeployedFrom *string `json:"last_deployed_from,omitempty"` + DeploymentId *string `json:"deployment_id,omitempty"` + PlacementMode *PlacementMode `json:"placement_mode,omitempty"` + PipelineHash *string `json:"pipeline_hash,omitempty"` +} + +// WorkerListResponse wrapper struct for API response to worker script list API call. +type WorkerListResponse struct { + Response + ResultInfo + WorkerList []WorkerMetaData `json:"result"` +} + +// WorkerScriptResponse wrapper struct for API response to worker script calls. +type WorkerScriptResponse struct { + Response + Module bool + WorkerScript `json:"result"` +} + +// WorkerScriptSettingsResponse wrapper struct for API response to worker script settings calls. +type WorkerScriptSettingsResponse struct { + Response + WorkerMetaData +} + +type ListWorkersParams struct{} + +type DeleteWorkerParams struct { + ScriptName string + + // DispatchNamespaceName is the dispatch namespace the Worker is uploaded to. + DispatchNamespace *string +} + +type PlacementMode string + +const ( + PlacementModeOff PlacementMode = "" + PlacementModeSmart PlacementMode = "smart" +) + +type Placement struct { + Mode PlacementMode `json:"mode"` +} + +// DeleteWorker deletes a single Worker. +// +// API reference: https://developers.cloudflare.com/api/operations/worker-script-delete-worker +func (api *API) DeleteWorker(ctx context.Context, rc *ResourceContainer, params DeleteWorkerParams) error { + if rc.Level != AccountRouteLevel { + return ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return ErrMissingAccountID + } + + uri := fmt.Sprintf("/accounts/%s/workers/scripts/%s", rc.Identifier, params.ScriptName) + if params.DispatchNamespace != nil && *params.DispatchNamespace != "" { + uri = fmt.Sprintf("/accounts/%s/workers/dispatch/namespaces/%s/scripts/%s", rc.Identifier, *params.DispatchNamespace, params.ScriptName) + } + + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + + var r WorkerScriptResponse + if err != nil { + return err + } + + err = json.Unmarshal(res, &r) + if err != nil { + return fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return nil +} + +// GetWorker fetch raw script content for your worker returns string containing +// worker code js. +// +// API reference: https://developers.cloudflare.com/api/operations/worker-script-download-worker +func (api *API) GetWorker(ctx context.Context, rc *ResourceContainer, scriptName string) (WorkerScriptResponse, error) { + return api.GetWorkerWithDispatchNamespace(ctx, rc, scriptName, "") +} + +// GetWorker fetch raw script content for your worker returns string containing +// worker code js. +// +// API reference: https://developers.cloudflare.com/api/operations/worker-script-download-worker +func (api *API) GetWorkerWithDispatchNamespace(ctx context.Context, rc *ResourceContainer, scriptName string, dispatchNamespace string) (WorkerScriptResponse, error) { + if rc.Level != AccountRouteLevel { + return WorkerScriptResponse{}, ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return WorkerScriptResponse{}, ErrMissingAccountID + } + + uri := fmt.Sprintf("/accounts/%s/workers/scripts/%s", rc.Identifier, scriptName) + if dispatchNamespace != "" { + uri = fmt.Sprintf("/accounts/%s/workers/dispatch/namespaces/%s/scripts/%s/content", rc.Identifier, dispatchNamespace, scriptName) + } + res, err := api.makeRequestContextWithHeadersComplete(ctx, http.MethodGet, uri, nil, nil) + var r WorkerScriptResponse + if err != nil { + return r, err + } + + // Check if the response type is multipart, in which case this was a module worker + mediaType, mediaParams, _ := mime.ParseMediaType(res.Headers.Get("content-type")) + if strings.HasPrefix(mediaType, "multipart/") { + bytesReader := bytes.NewReader(res.Body) + mimeReader := multipart.NewReader(bytesReader, mediaParams["boundary"]) + mimePart, err := mimeReader.NextPart() + if err != nil { + return r, fmt.Errorf("could not get multipart response body: %w", err) + } + mimePartBody, err := io.ReadAll(mimePart) + if err != nil { + return r, fmt.Errorf("could not read multipart response body: %w", err) + } + r.Script = string(mimePartBody) + r.Module = true + } else { + r.Script = string(res.Body) + r.Module = false + } + + r.Success = true + return r, nil +} + +// ListWorkers returns list of Workers for given account. +// +// API reference: https://developers.cloudflare.com/api/operations/worker-script-list-workers +func (api *API) ListWorkers(ctx context.Context, rc *ResourceContainer, params ListWorkersParams) (WorkerListResponse, *ResultInfo, error) { + if rc.Level != AccountRouteLevel { + return WorkerListResponse{}, &ResultInfo{}, ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return WorkerListResponse{}, &ResultInfo{}, ErrMissingAccountID + } + + uri := fmt.Sprintf("/accounts/%s/workers/scripts", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return WorkerListResponse{}, &ResultInfo{}, err + } + + var r WorkerListResponse + err = json.Unmarshal(res, &r) + if err != nil { + return WorkerListResponse{}, &ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r, &r.ResultInfo, nil +} + +// UploadWorker pushes raw script content for your Worker. +// +// API reference: https://developers.cloudflare.com/api/operations/worker-script-upload-worker-module +func (api *API) UploadWorker(ctx context.Context, rc *ResourceContainer, params CreateWorkerParams) (WorkerScriptResponse, error) { + if rc.Level != AccountRouteLevel { + return WorkerScriptResponse{}, ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return WorkerScriptResponse{}, ErrMissingAccountID + } + + body := []byte(params.Script) + var ( + contentType = "application/javascript" + err error + ) + + if params.RequiresMultipart() { + contentType, body, err = formatMultipartBody(params) + if err != nil { + return WorkerScriptResponse{}, err + } + } + + uri := fmt.Sprintf("/accounts/%s/workers/scripts/%s", rc.Identifier, params.ScriptName) + if params.DispatchNamespaceName != nil && *params.DispatchNamespaceName != "" { + uri = fmt.Sprintf("/accounts/%s/workers/dispatch/namespaces/%s/scripts/%s", rc.Identifier, *params.DispatchNamespaceName, params.ScriptName) + } + + headers := make(http.Header) + headers.Set("Content-Type", contentType) + res, err := api.makeRequestContextWithHeaders(ctx, http.MethodPut, uri, body, headers) + + var r WorkerScriptResponse + if err != nil { + return r, err + } + + err = json.Unmarshal(res, &r) + if err != nil { + return r, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r, nil +} + +// GetWorkersScriptContent returns the pure script content of a worker. +// +// API reference: https://developers.cloudflare.com/api/operations/worker-script-get-content +func (api *API) GetWorkersScriptContent(ctx context.Context, rc *ResourceContainer, scriptName string) (string, error) { + if rc.Level != AccountRouteLevel { + return "", ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return "", ErrMissingAccountID + } + + uri := fmt.Sprintf("/accounts/%s/workers/scripts/%s/content/v2", rc.Identifier, scriptName) + res, err := api.makeRequestContextWithHeadersComplete(ctx, http.MethodGet, uri, nil, nil) + if err != nil { + return "", err + } + + return string(res.Body), nil +} + +// UpdateWorkersScriptContent pushes only script content, no metadata. +// +// API reference: https://developers.cloudflare.com/api/operations/worker-script-put-content +func (api *API) UpdateWorkersScriptContent(ctx context.Context, rc *ResourceContainer, params UpdateWorkersScriptContentParams) (WorkerScriptResponse, error) { + if rc.Level != AccountRouteLevel { + return WorkerScriptResponse{}, ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return WorkerScriptResponse{}, ErrMissingAccountID + } + + body := []byte(params.Script) + var ( + contentType = "application/javascript" + err error + ) + + if params.Module { + var formattedParams CreateWorkerParams + formattedParams.Script = params.Script + formattedParams.ScriptName = params.ScriptName + formattedParams.Module = params.Module + formattedParams.DispatchNamespaceName = params.DispatchNamespaceName + contentType, body, err = formatMultipartBody(formattedParams) + if err != nil { + return WorkerScriptResponse{}, err + } + } + + uri := fmt.Sprintf("/accounts/%s/workers/scripts/%s/content", rc.Identifier, params.ScriptName) + if params.DispatchNamespaceName != nil { + uri = fmt.Sprintf("/accounts/%s/workers/dispatch_namespaces/%s/scripts/%s/content", rc.Identifier, *params.DispatchNamespaceName, params.ScriptName) + } + + headers := make(http.Header) + headers.Set("Content-Type", contentType) + res, err := api.makeRequestContextWithHeaders(ctx, http.MethodPut, uri, body, headers) + + var r WorkerScriptResponse + if err != nil { + return r, err + } + + err = json.Unmarshal(res, &r) + if err != nil { + return r, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r, nil +} + +// GetWorkersScriptSettings returns the metadata of a worker. +// +// API reference: https://developers.cloudflare.com/api/operations/worker-script-get-settings +func (api *API) GetWorkersScriptSettings(ctx context.Context, rc *ResourceContainer, scriptName string) (WorkerScriptSettingsResponse, error) { + if rc.Level != AccountRouteLevel { + return WorkerScriptSettingsResponse{}, ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return WorkerScriptSettingsResponse{}, ErrMissingAccountID + } + + uri := fmt.Sprintf("/accounts/%s/workers/scripts/%s/settings", rc.Identifier, scriptName) + res, err := api.makeRequestContextWithHeaders(ctx, http.MethodGet, uri, nil, nil) + var r WorkerScriptSettingsResponse + if err != nil { + return r, err + } + + err = json.Unmarshal(res, &r) + if err != nil { + return r, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + r.Success = true + + return r, nil +} + +// UpdateWorkersScriptSettings pushes only script metadata. +// +// API reference: https://developers.cloudflare.com/api/operations/worker-script-patch-settings +func (api *API) UpdateWorkersScriptSettings(ctx context.Context, rc *ResourceContainer, params UpdateWorkersScriptSettingsParams) (WorkerScriptSettingsResponse, error) { + if rc.Level != AccountRouteLevel { + return WorkerScriptSettingsResponse{}, ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return WorkerScriptSettingsResponse{}, ErrMissingAccountID + } + + body, err := json.Marshal(params) + if err != nil { + return WorkerScriptSettingsResponse{}, err + } + headers := make(http.Header) + headers.Set("Content-Type", "application/json") + + uri := fmt.Sprintf("/accounts/%s/workers/scripts/%s/settings", rc.Identifier, params.ScriptName) + res, err := api.makeRequestContextWithHeaders(ctx, http.MethodPatch, uri, body, headers) + var r WorkerScriptSettingsResponse + if err != nil { + return r, err + } + + err = json.Unmarshal(res, &r) + if err != nil { + return r, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + r.Success = true + + return r, nil +} + +// Returns content-type, body, error. +func formatMultipartBody(params CreateWorkerParams) (string, []byte, error) { + var buf = &bytes.Buffer{} + var mpw = multipart.NewWriter(buf) + defer mpw.Close() + + // Write metadata part + var scriptPartName string + meta := struct { + BodyPart string `json:"body_part,omitempty"` + MainModule string `json:"main_module,omitempty"` + Bindings []workerBindingMeta `json:"bindings"` + Logpush *bool `json:"logpush,omitempty"` + TailConsumers *[]WorkersTailConsumer `json:"tail_consumers,omitempty"` + CompatibilityDate string `json:"compatibility_date,omitempty"` + CompatibilityFlags []string `json:"compatibility_flags,omitempty"` + Placement *Placement `json:"placement,omitempty"` + Tags []string `json:"tags"` + }{ + Bindings: make([]workerBindingMeta, 0, len(params.Bindings)), + Logpush: params.Logpush, + TailConsumers: params.TailConsumers, + CompatibilityDate: params.CompatibilityDate, + CompatibilityFlags: params.CompatibilityFlags, + Placement: params.Placement, + Tags: params.Tags, + } + + if params.Module { + scriptPartName = "worker.mjs" + meta.MainModule = scriptPartName + } else { + scriptPartName = "script" + meta.BodyPart = scriptPartName + } + + bodyWriters := make([]workerBindingBodyWriter, 0, len(params.Bindings)) + for name, b := range params.Bindings { + bindingMeta, bodyWriter, err := b.serialize(name) + if err != nil { + return "", nil, err + } + + meta.Bindings = append(meta.Bindings, bindingMeta) + bodyWriters = append(bodyWriters, bodyWriter) + } + + var hdr = textproto.MIMEHeader{} + hdr.Set("content-disposition", fmt.Sprintf(`form-data; name="%s"`, "metadata")) + hdr.Set("content-type", "application/json") + pw, err := mpw.CreatePart(hdr) + if err != nil { + return "", nil, err + } + metaJSON, err := json.Marshal(meta) + if err != nil { + return "", nil, err + } + _, err = pw.Write(metaJSON) + if err != nil { + return "", nil, err + } + + // Write script part + hdr = textproto.MIMEHeader{} + + contentType := "application/javascript" + if params.Module { + contentType = "application/javascript+module" + hdr.Set("content-disposition", fmt.Sprintf(`form-data; name="%s"; filename="%[1]s"`, scriptPartName)) + } else { + hdr.Set("content-disposition", fmt.Sprintf(`form-data; name="%s"`, scriptPartName)) + } + hdr.Set("content-type", contentType) + + pw, err = mpw.CreatePart(hdr) + if err != nil { + return "", nil, err + } + _, err = pw.Write([]byte(params.Script)) + if err != nil { + return "", nil, err + } + + // Write other bindings with parts + for _, w := range bodyWriters { + if w != nil { + err = w(mpw) + if err != nil { + return "", nil, err + } + } + } + + mpw.Close() + + return mpw.FormDataContentType(), buf.Bytes(), nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/workers_account_settings.go b/vendor/github.com/cloudflare/cloudflare-go/workers_account_settings.go new file mode 100644 index 0000000..55e0d30 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/workers_account_settings.go @@ -0,0 +1,83 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +type WorkersAccountSettings struct { + DefaultUsageModel string `json:"default_usage_model,omitempty"` + GreenCompute bool `json:"green_compute,omitempty"` +} + +type CreateWorkersAccountSettingsParameters struct { + DefaultUsageModel string `json:"default_usage_model,omitempty"` + GreenCompute bool `json:"green_compute,omitempty"` +} + +type CreateWorkersAccountSettingsResponse struct { + Response + Result WorkersAccountSettings +} + +type WorkersAccountSettingsParameters struct{} + +type WorkersAccountSettingsResponse struct { + Response + Result WorkersAccountSettings +} + +// CreateWorkersAccountSettings sets the account settings for Workers. +// +// API reference: https://developers.cloudflare.com/api/operations/worker-account-settings-create-worker-account-settings +func (api *API) CreateWorkersAccountSettings(ctx context.Context, rc *ResourceContainer, params CreateWorkersAccountSettingsParameters) (WorkersAccountSettings, error) { + if rc.Identifier == "" { + return WorkersAccountSettings{}, ErrMissingAccountID + } + + if rc.Level != AccountRouteLevel { + return WorkersAccountSettings{}, ErrRequiredAccountLevelResourceContainer + } + + uri := fmt.Sprintf("/accounts/%s/workers/account-settings", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return WorkersAccountSettings{}, err + } + + var workersAccountSettingsResponse CreateWorkersAccountSettingsResponse + if err := json.Unmarshal(res, &workersAccountSettingsResponse); err != nil { + return WorkersAccountSettings{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return workersAccountSettingsResponse.Result, nil +} + +// WorkersAccountSettings returns the current account settings for Workers. +// +// API reference: https://developers.cloudflare.com/api/operations/worker-account-settings-fetch-worker-account-settings +func (api *API) WorkersAccountSettings(ctx context.Context, rc *ResourceContainer, params WorkersAccountSettingsParameters) (WorkersAccountSettings, error) { + if rc.Identifier == "" { + return WorkersAccountSettings{}, ErrMissingAccountID + } + + if rc.Level != AccountRouteLevel { + return WorkersAccountSettings{}, ErrRequiredAccountLevelResourceContainer + } + + uri := fmt.Sprintf("/accounts/%s/workers/account-settings", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, params) + if err != nil { + return WorkersAccountSettings{}, err + } + + var workersAccountSettingsResponse CreateWorkersAccountSettingsResponse + if err := json.Unmarshal(res, &workersAccountSettingsResponse); err != nil { + return WorkersAccountSettings{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return workersAccountSettingsResponse.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/workers_bindings.go b/vendor/github.com/cloudflare/cloudflare-go/workers_bindings.go new file mode 100644 index 0000000..04916ce --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/workers_bindings.go @@ -0,0 +1,651 @@ +package cloudflare + +import ( + "context" + rand "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "mime/multipart" + "net/http" + "net/textproto" + + "github.com/goccy/go-json" +) + +// WorkerBindingType represents a particular type of binding. +type WorkerBindingType string + +func (b WorkerBindingType) String() string { + return string(b) +} + +const ( + // WorkerDurableObjectBindingType is the type for Durable Object bindings. + WorkerDurableObjectBindingType WorkerBindingType = "durable_object_namespace" + // WorkerInheritBindingType is the type for inherited bindings. + WorkerInheritBindingType WorkerBindingType = "inherit" + // WorkerKvNamespaceBindingType is the type for KV Namespace bindings. + WorkerKvNamespaceBindingType WorkerBindingType = "kv_namespace" + // WorkerWebAssemblyBindingType is the type for Web Assembly module bindings. + WorkerWebAssemblyBindingType WorkerBindingType = "wasm_module" + // WorkerSecretTextBindingType is the type for secret text bindings. + WorkerSecretTextBindingType WorkerBindingType = "secret_text" + // WorkerPlainTextBindingType is the type for plain text bindings. + WorkerPlainTextBindingType WorkerBindingType = "plain_text" + // WorkerServiceBindingType is the type for service bindings. + WorkerServiceBindingType WorkerBindingType = "service" + // WorkerR2BucketBindingType is the type for R2 bucket bindings. + WorkerR2BucketBindingType WorkerBindingType = "r2_bucket" + // WorkerAnalyticsEngineBindingType is the type for Analytics Engine dataset bindings. + WorkerAnalyticsEngineBindingType WorkerBindingType = "analytics_engine" + // WorkerQueueBindingType is the type for queue bindings. + WorkerQueueBindingType WorkerBindingType = "queue" + // DispatchNamespaceBindingType is the type for WFP namespace bindings. + DispatchNamespaceBindingType WorkerBindingType = "dispatch_namespace" + // WorkerD1DataseBindingType is for D1 databases. + WorkerD1DataseBindingType WorkerBindingType = "d1" + // WorkerHyperdriveBindingType is for Hyperdrive config bindings. + WorkerHyperdriveBindingType WorkerBindingType = "hyperdrive" +) + +type ListWorkerBindingsParams struct { + ScriptName string + DispatchNamespace *string +} + +// WorkerBindingListItem a struct representing an individual binding in a list of bindings. +type WorkerBindingListItem struct { + Name string `json:"name"` + Binding WorkerBinding +} + +// WorkerBindingListResponse wrapper struct for API response to worker binding list API call. +type WorkerBindingListResponse struct { + Response + BindingList []WorkerBindingListItem +} + +// Workers supports multiple types of bindings, e.g. KV namespaces or WebAssembly modules, and each type +// of binding will be represented differently in the upload request body. At a high-level, every binding +// will specify metadata, which is a JSON object with the properties "name" and "type". Some types of bindings +// will also have additional metadata properties. For example, KV bindings also specify the KV namespace. +// In addition to the metadata, some binding types may need to include additional data as part of the +// multipart form. For example, WebAssembly bindings will include the contents of the WebAssembly module. + +// WorkerBinding is the generic interface implemented by all of +// the various binding types. +type WorkerBinding interface { + Type() WorkerBindingType + + // serialize is responsible for returning the binding metadata as well as an optionally + // returning a function that can modify the multipart form body. For example, this is used + // by WebAssembly bindings to add a new part containing the WebAssembly module contents. + serialize(bindingName string) (workerBindingMeta, workerBindingBodyWriter, error) +} + +// workerBindingMeta is the metadata portion of the binding. +type workerBindingMeta = map[string]interface{} + +// workerBindingBodyWriter allows for a binding to add additional parts to the multipart body. +type workerBindingBodyWriter func(*multipart.Writer) error + +// WorkerInheritBinding will just persist whatever binding content was previously uploaded. +type WorkerInheritBinding struct { + // Optional parameter that allows for renaming a binding without changing + // its contents. If `OldName` is empty, the binding name will not be changed. + OldName string +} + +// Type returns the type of the binding. +func (b WorkerInheritBinding) Type() WorkerBindingType { + return WorkerInheritBindingType +} + +func (b WorkerInheritBinding) serialize(bindingName string) (workerBindingMeta, workerBindingBodyWriter, error) { + meta := workerBindingMeta{ + "name": bindingName, + "type": b.Type(), + } + + if b.OldName != "" { + meta["old_name"] = b.OldName + } + + return meta, nil, nil +} + +// WorkerKvNamespaceBinding is a binding to a Workers KV Namespace. +// +// https://developers.cloudflare.com/workers/archive/api/resource-bindings/kv-namespaces/ +type WorkerKvNamespaceBinding struct { + NamespaceID string +} + +// Type returns the type of the binding. +func (b WorkerKvNamespaceBinding) Type() WorkerBindingType { + return WorkerKvNamespaceBindingType +} + +func (b WorkerKvNamespaceBinding) serialize(bindingName string) (workerBindingMeta, workerBindingBodyWriter, error) { + if b.NamespaceID == "" { + return nil, nil, fmt.Errorf(`namespace ID for binding "%s" cannot be empty`, bindingName) + } + + return workerBindingMeta{ + "name": bindingName, + "type": b.Type(), + "namespace_id": b.NamespaceID, + }, nil, nil +} + +// WorkerDurableObjectBinding is a binding to a Workers Durable Object. +// +// https://api.cloudflare.com/#durable-objects-namespace-properties +type WorkerDurableObjectBinding struct { + ClassName string + ScriptName string +} + +// Type returns the type of the binding. +func (b WorkerDurableObjectBinding) Type() WorkerBindingType { + return WorkerDurableObjectBindingType +} + +func (b WorkerDurableObjectBinding) serialize(bindingName string) (workerBindingMeta, workerBindingBodyWriter, error) { + if b.ClassName == "" { + return nil, nil, fmt.Errorf(`ClassName for binding "%s" cannot be empty`, bindingName) + } + + return workerBindingMeta{ + "name": bindingName, + "type": b.Type(), + "class_name": b.ClassName, + "script_name": b.ScriptName, + }, nil, nil +} + +// WorkerWebAssemblyBinding is a binding to a WebAssembly module. +// +// https://developers.cloudflare.com/workers/archive/api/resource-bindings/webassembly-modules/ +type WorkerWebAssemblyBinding struct { + Module io.Reader +} + +// Type returns the type of the binding. +func (b WorkerWebAssemblyBinding) Type() WorkerBindingType { + return WorkerWebAssemblyBindingType +} + +func (b WorkerWebAssemblyBinding) serialize(bindingName string) (workerBindingMeta, workerBindingBodyWriter, error) { + partName := getRandomPartName() + + bodyWriter := func(mpw *multipart.Writer) error { + var hdr = textproto.MIMEHeader{} + hdr.Set("content-disposition", fmt.Sprintf(`form-data; name="%s"`, partName)) + hdr.Set("content-type", "application/wasm") + pw, err := mpw.CreatePart(hdr) + if err != nil { + return err + } + _, err = io.Copy(pw, b.Module) + return err + } + + return workerBindingMeta{ + "name": bindingName, + "type": b.Type(), + "part": partName, + }, bodyWriter, nil +} + +// WorkerPlainTextBinding is a binding to plain text. +// +// https://developers.cloudflare.com/workers/tooling/api/scripts/#add-a-plain-text-binding +type WorkerPlainTextBinding struct { + Text string +} + +// Type returns the type of the binding. +func (b WorkerPlainTextBinding) Type() WorkerBindingType { + return WorkerPlainTextBindingType +} + +func (b WorkerPlainTextBinding) serialize(bindingName string) (workerBindingMeta, workerBindingBodyWriter, error) { + if b.Text == "" { + return nil, nil, fmt.Errorf(`text for binding "%s" cannot be empty`, bindingName) + } + + return workerBindingMeta{ + "name": bindingName, + "type": b.Type(), + "text": b.Text, + }, nil, nil +} + +// WorkerSecretTextBinding is a binding to secret text. +// +// https://developers.cloudflare.com/workers/tooling/api/scripts/#add-a-secret-text-binding +type WorkerSecretTextBinding struct { + Text string +} + +// Type returns the type of the binding. +func (b WorkerSecretTextBinding) Type() WorkerBindingType { + return WorkerSecretTextBindingType +} + +func (b WorkerSecretTextBinding) serialize(bindingName string) (workerBindingMeta, workerBindingBodyWriter, error) { + if b.Text == "" { + return nil, nil, fmt.Errorf(`text for binding "%s" cannot be empty`, bindingName) + } + + return workerBindingMeta{ + "name": bindingName, + "type": b.Type(), + "text": b.Text, + }, nil, nil +} + +type WorkerServiceBinding struct { + Service string + Environment *string +} + +func (b WorkerServiceBinding) Type() WorkerBindingType { + return WorkerServiceBindingType +} + +func (b WorkerServiceBinding) serialize(bindingName string) (workerBindingMeta, workerBindingBodyWriter, error) { + if b.Service == "" { + return nil, nil, fmt.Errorf(`service for binding "%s" cannot be empty`, bindingName) + } + + meta := workerBindingMeta{ + "name": bindingName, + "type": b.Type(), + "service": b.Service, + } + + if b.Environment != nil { + meta["environment"] = *b.Environment + } + + return meta, nil, nil +} + +// WorkerR2BucketBinding is a binding to an R2 bucket. +type WorkerR2BucketBinding struct { + BucketName string +} + +// Type returns the type of the binding. +func (b WorkerR2BucketBinding) Type() WorkerBindingType { + return WorkerR2BucketBindingType +} + +func (b WorkerR2BucketBinding) serialize(bindingName string) (workerBindingMeta, workerBindingBodyWriter, error) { + if b.BucketName == "" { + return nil, nil, fmt.Errorf(`BucketName for binding "%s" cannot be empty`, bindingName) + } + + return workerBindingMeta{ + "name": bindingName, + "type": b.Type(), + "bucket_name": b.BucketName, + }, nil, nil +} + +// WorkerAnalyticsEngineBinding is a binding to an Analytics Engine dataset. +type WorkerAnalyticsEngineBinding struct { + Dataset string +} + +// Type returns the type of the binding. +func (b WorkerAnalyticsEngineBinding) Type() WorkerBindingType { + return WorkerAnalyticsEngineBindingType +} + +func (b WorkerAnalyticsEngineBinding) serialize(bindingName string) (workerBindingMeta, workerBindingBodyWriter, error) { + if b.Dataset == "" { + return nil, nil, fmt.Errorf(`dataset for binding "%s" cannot be empty`, bindingName) + } + + return workerBindingMeta{ + "name": bindingName, + "type": b.Type(), + "dataset": b.Dataset, + }, nil, nil +} + +// WorkerQueueBinding is a binding to a Workers Queue. +// +// https://developers.cloudflare.com/workers/platform/bindings/#queue-bindings +type WorkerQueueBinding struct { + Binding string + Queue string +} + +// Type returns the type of the binding. +func (b WorkerQueueBinding) Type() WorkerBindingType { + return WorkerQueueBindingType +} + +func (b WorkerQueueBinding) serialize(bindingName string) (workerBindingMeta, workerBindingBodyWriter, error) { + if b.Binding == "" { + return nil, nil, fmt.Errorf(`binding name for binding "%s" cannot be empty`, bindingName) + } + if b.Queue == "" { + return nil, nil, fmt.Errorf(`queue name for binding "%s" cannot be empty`, bindingName) + } + + return workerBindingMeta{ + "type": b.Type(), + "name": b.Binding, + "queue_name": b.Queue, + }, nil, nil +} + +// DispatchNamespaceBinding is a binding to a Workers for Platforms namespace +// +// https://developers.cloudflare.com/workers/platform/bindings/#dispatch-namespace-bindings-workers-for-platforms +type DispatchNamespaceBinding struct { + Binding string + Namespace string + Outbound *NamespaceOutboundOptions +} + +type NamespaceOutboundOptions struct { + Worker WorkerReference + Params []OutboundParamSchema +} + +type WorkerReference struct { + Service string + Environment *string +} + +type OutboundParamSchema struct { + Name string +} + +// Type returns the type of the binding. +func (b DispatchNamespaceBinding) Type() WorkerBindingType { + return DispatchNamespaceBindingType +} + +func (b DispatchNamespaceBinding) serialize(bindingName string) (workerBindingMeta, workerBindingBodyWriter, error) { + if b.Binding == "" { + return nil, nil, fmt.Errorf(`binding name for binding "%s" cannot be empty`, bindingName) + } + if b.Namespace == "" { + return nil, nil, fmt.Errorf(`namespace name for binding "%s" cannot be empty`, bindingName) + } + + meta := workerBindingMeta{ + "type": b.Type(), + "name": b.Binding, + "namespace": b.Namespace, + } + + if b.Outbound != nil { + if b.Outbound.Worker.Service == "" { + return nil, nil, fmt.Errorf(`outbound options for binding "%s" must have a service name`, bindingName) + } + + var params []map[string]interface{} + for _, param := range b.Outbound.Params { + params = append(params, map[string]interface{}{ + "name": param.Name, + }) + } + + meta["outbound"] = map[string]interface{}{ + "worker": map[string]interface{}{ + "service": b.Outbound.Worker.Service, + "environment": b.Outbound.Worker.Environment, + }, + "params": params, + } + } + + return meta, nil, nil +} + +// WorkerD1DatabaseBinding is a binding to a D1 instance. +type WorkerD1DatabaseBinding struct { + DatabaseID string +} + +// Type returns the type of the binding. +func (b WorkerD1DatabaseBinding) Type() WorkerBindingType { + return WorkerD1DataseBindingType +} + +func (b WorkerD1DatabaseBinding) serialize(bindingName string) (workerBindingMeta, workerBindingBodyWriter, error) { + if b.DatabaseID == "" { + return nil, nil, fmt.Errorf(`database ID for binding "%s" cannot be empty`, bindingName) + } + + return workerBindingMeta{ + "name": bindingName, + "type": b.Type(), + "id": b.DatabaseID, + }, nil, nil +} + +// WorkerHyperdriveBinding is a binding to a Hyperdrive config. +type WorkerHyperdriveBinding struct { + Binding string + ConfigID string +} + +// Type returns the type of the binding. +func (b WorkerHyperdriveBinding) Type() WorkerBindingType { + return WorkerHyperdriveBindingType +} + +func (b WorkerHyperdriveBinding) serialize(bindingName string) (workerBindingMeta, workerBindingBodyWriter, error) { + if b.Binding == "" { + return nil, nil, fmt.Errorf(`binding name for binding "%s" cannot be empty`, bindingName) + } + if b.ConfigID == "" { + return nil, nil, fmt.Errorf(`config ID for binding "%s" cannot be empty`, bindingName) + } + + return workerBindingMeta{ + "name": b.Binding, + "type": b.Type(), + "id": b.ConfigID, + }, nil, nil +} + +// UnsafeBinding is for experimental or deprecated bindings, and allows specifying any binding type or property. +type UnsafeBinding map[string]interface{} + +// Type returns the type of the binding. +func (b UnsafeBinding) Type() WorkerBindingType { + return "" +} + +func (b UnsafeBinding) serialize(bindingName string) (workerBindingMeta, workerBindingBodyWriter, error) { + b["name"] = bindingName + return b, nil, nil +} + +// Each binding that adds a part to the multipart form body will need +// a unique part name so we just generate a random 128bit hex string. +func getRandomPartName() string { + randBytes := make([]byte, 16) + rand.Read(randBytes) //nolint:errcheck + return hex.EncodeToString(randBytes) +} + +// ListWorkerBindings returns all the bindings for a particular worker. +func (api *API) ListWorkerBindings(ctx context.Context, rc *ResourceContainer, params ListWorkerBindingsParams) (WorkerBindingListResponse, error) { + if params.ScriptName == "" { + return WorkerBindingListResponse{}, errors.New("script name is required") + } + + if rc.Level != AccountRouteLevel { + return WorkerBindingListResponse{}, ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return WorkerBindingListResponse{}, ErrMissingAccountID + } + + uri := fmt.Sprintf("/accounts/%s/workers/scripts/%s/bindings", rc.Identifier, params.ScriptName) + if params.DispatchNamespace != nil && *params.DispatchNamespace != "" { + uri = fmt.Sprintf("/accounts/%s/workers/dispatch/namespaces/%s/scripts/%s/bindings", rc.Identifier, *params.DispatchNamespace, params.ScriptName) + } + + var jsonRes struct { + Response + Bindings []workerBindingMeta `json:"result"` + } + var r WorkerBindingListResponse + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return r, err + } + err = json.Unmarshal(res, &jsonRes) + if err != nil { + return r, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + r = WorkerBindingListResponse{ + Response: jsonRes.Response, + BindingList: make([]WorkerBindingListItem, 0, len(jsonRes.Bindings)), + } + for _, jsonBinding := range jsonRes.Bindings { + name, ok := jsonBinding["name"].(string) + if !ok { + return r, fmt.Errorf("binding missing name %v", jsonBinding) + } + bType, ok := jsonBinding["type"].(string) + if !ok { + return r, fmt.Errorf("binding missing type %v", jsonBinding) + } + bindingListItem := WorkerBindingListItem{ + Name: name, + } + + switch WorkerBindingType(bType) { + case WorkerDurableObjectBindingType: + class_name := jsonBinding["class_name"].(string) + script_name := jsonBinding["script_name"].(string) + bindingListItem.Binding = WorkerDurableObjectBinding{ + ClassName: class_name, + ScriptName: script_name, + } + case WorkerKvNamespaceBindingType: + namespaceID := jsonBinding["namespace_id"].(string) + bindingListItem.Binding = WorkerKvNamespaceBinding{ + NamespaceID: namespaceID, + } + case WorkerQueueBindingType: + queueName := jsonBinding["queue_name"].(string) + bindingListItem.Binding = WorkerQueueBinding{ + Binding: name, + Queue: queueName, + } + case WorkerWebAssemblyBindingType: + bindingListItem.Binding = WorkerWebAssemblyBinding{ + Module: &bindingContentReader{ + api: api, + ctx: ctx, + accountID: rc.Identifier, + params: ¶ms, + bindingName: name, + }, + } + case WorkerPlainTextBindingType: + text := jsonBinding["text"].(string) + bindingListItem.Binding = WorkerPlainTextBinding{ + Text: text, + } + case WorkerServiceBindingType: + service := jsonBinding["service"].(string) + environment := jsonBinding["environment"].(string) + bindingListItem.Binding = WorkerServiceBinding{ + Service: service, + Environment: &environment, + } + case WorkerSecretTextBindingType: + bindingListItem.Binding = WorkerSecretTextBinding{} + case WorkerR2BucketBindingType: + bucketName := jsonBinding["bucket_name"].(string) + bindingListItem.Binding = WorkerR2BucketBinding{ + BucketName: bucketName, + } + case WorkerAnalyticsEngineBindingType: + dataset := jsonBinding["dataset"].(string) + bindingListItem.Binding = WorkerAnalyticsEngineBinding{ + Dataset: dataset, + } + case WorkerD1DataseBindingType: + database_id := jsonBinding["database_id"].(string) + bindingListItem.Binding = WorkerD1DatabaseBinding{ + DatabaseID: database_id, + } + case WorkerHyperdriveBindingType: + id := jsonBinding["id"].(string) + bindingListItem.Binding = WorkerHyperdriveBinding{ + Binding: name, + ConfigID: id, + } + default: + bindingListItem.Binding = WorkerInheritBinding{} + } + r.BindingList = append(r.BindingList, bindingListItem) + } + + return r, nil +} + +// bindingContentReader is an io.Reader that will lazily load the +// raw bytes for a binding from the API when the Read() method +// is first called. This is only useful for binding types +// that store raw bytes, like WebAssembly modules. +type bindingContentReader struct { + api *API + accountID string + params *ListWorkerBindingsParams + ctx context.Context + bindingName string + content []byte + position int +} + +func (b *bindingContentReader) Read(p []byte) (n int, err error) { + // Lazily load the content when Read() is first called + if b.content == nil { + uri := fmt.Sprintf("/accounts/%s/workers/scripts/%s/bindings/%s/content", b.accountID, b.params.ScriptName, b.bindingName) + res, err := b.api.makeRequestContext(b.ctx, http.MethodGet, uri, nil) + if err != nil { + return 0, err + } + b.content = res + } + + if b.position >= len(b.content) { + return 0, io.EOF + } + + bytesRemaining := len(b.content) - b.position + bytesToProcess := 0 + if len(p) < bytesRemaining { + bytesToProcess = len(p) + } else { + bytesToProcess = bytesRemaining + } + + for i := 0; i < bytesToProcess; i++ { + p[i] = b.content[b.position] + b.position = b.position + 1 + } + + return bytesToProcess, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/workers_cron_triggers.go b/vendor/github.com/cloudflare/cloudflare-go/workers_cron_triggers.go new file mode 100644 index 0000000..c231c99 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/workers_cron_triggers.go @@ -0,0 +1,91 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// WorkerCronTriggerResponse represents the response from the Worker cron trigger +// API endpoint. +type WorkerCronTriggerResponse struct { + Response + Result WorkerCronTriggerSchedules `json:"result"` +} + +// WorkerCronTriggerSchedules contains the schedule of Worker cron triggers. +type WorkerCronTriggerSchedules struct { + Schedules []WorkerCronTrigger `json:"schedules"` +} + +// WorkerCronTrigger holds an individual cron schedule for a worker. +type WorkerCronTrigger struct { + Cron string `json:"cron"` + CreatedOn *time.Time `json:"created_on,omitempty"` + ModifiedOn *time.Time `json:"modified_on,omitempty"` +} + +type ListWorkerCronTriggersParams struct { + ScriptName string +} + +type UpdateWorkerCronTriggersParams struct { + ScriptName string + Crons []WorkerCronTrigger +} + +// ListWorkerCronTriggers fetches all available cron triggers for a single Worker +// script. +// +// API reference: https://developers.cloudflare.com/api/operations/worker-cron-trigger-get-cron-triggers +func (api *API) ListWorkerCronTriggers(ctx context.Context, rc *ResourceContainer, params ListWorkerCronTriggersParams) ([]WorkerCronTrigger, error) { + if rc.Level != AccountRouteLevel { + return []WorkerCronTrigger{}, ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return []WorkerCronTrigger{}, ErrMissingIdentifier + } + + uri := fmt.Sprintf("/accounts/%s/workers/scripts/%s/schedules", rc.Identifier, params.ScriptName) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []WorkerCronTrigger{}, err + } + + result := WorkerCronTriggerResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return []WorkerCronTrigger{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result.Schedules, err +} + +// UpdateWorkerCronTriggers updates a single schedule for a Worker cron trigger. +// +// API reference: https://developers.cloudflare.com/api/operations/worker-cron-trigger-update-cron-triggers +func (api *API) UpdateWorkerCronTriggers(ctx context.Context, rc *ResourceContainer, params UpdateWorkerCronTriggersParams) ([]WorkerCronTrigger, error) { + if rc.Level != AccountRouteLevel { + return []WorkerCronTrigger{}, ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return []WorkerCronTrigger{}, ErrMissingIdentifier + } + + uri := fmt.Sprintf("/accounts/%s/workers/scripts/%s/schedules", rc.Identifier, params.ScriptName) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params.Crons) + if err != nil { + return []WorkerCronTrigger{}, err + } + + result := WorkerCronTriggerResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return []WorkerCronTrigger{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result.Result.Schedules, err +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/workers_domain.go b/vendor/github.com/cloudflare/cloudflare-go/workers_domain.go new file mode 100644 index 0000000..a709542 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/workers_domain.go @@ -0,0 +1,151 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +var ( + ErrMissingHostname = errors.New("required hostname missing") + ErrMissingService = errors.New("required service missing") + ErrMissingEnvironment = errors.New("required environment missing") +) + +type AttachWorkersDomainParams struct { + ID string `json:"id,omitempty"` + ZoneID string `json:"zone_id,omitempty"` + ZoneName string `json:"zone_name,omitempty"` + Hostname string `json:"hostname,omitempty"` + Service string `json:"service,omitempty"` + Environment string `json:"environment,omitempty"` +} + +type WorkersDomain struct { + ID string `json:"id,omitempty"` + ZoneID string `json:"zone_id,omitempty"` + ZoneName string `json:"zone_name,omitempty"` + Hostname string `json:"hostname,omitempty"` + Service string `json:"service,omitempty"` + Environment string `json:"environment,omitempty"` +} + +type WorkersDomainResponse struct { + Response + Result WorkersDomain `json:"result"` +} + +type ListWorkersDomainParams struct { + ZoneID string `url:"zone_id,omitempty"` + ZoneName string `url:"zone_name,omitempty"` + Hostname string `url:"hostname,omitempty"` + Service string `url:"service,omitempty"` + Environment string `url:"environment,omitempty"` +} + +type WorkersDomainListResponse struct { + Response + Result []WorkersDomain `json:"result"` +} + +// ListWorkersDomains lists all Worker Domains. +// +// API reference: https://developers.cloudflare.com/api/operations/worker-domain-list-domains +func (api *API) ListWorkersDomains(ctx context.Context, rc *ResourceContainer, params ListWorkersDomainParams) ([]WorkersDomain, error) { + if rc.Identifier == "" { + return []WorkersDomain{}, ErrMissingAccountID + } + + uri := buildURI(fmt.Sprintf("/accounts/%s/workers/domains", rc.Identifier), params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []WorkersDomain{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var r WorkersDomainListResponse + if err := json.Unmarshal(res, &r); err != nil { + return []WorkersDomain{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// AttachWorkersDomain attaches a worker to a zone and hostname. +// +// API reference: https://developers.cloudflare.com/api/operations/worker-domain-attach-to-domain +func (api *API) AttachWorkersDomain(ctx context.Context, rc *ResourceContainer, domain AttachWorkersDomainParams) (WorkersDomain, error) { + if rc.Identifier == "" { + return WorkersDomain{}, ErrMissingAccountID + } + + if domain.ZoneID == "" { + return WorkersDomain{}, ErrMissingZoneID + } + + if domain.Hostname == "" { + return WorkersDomain{}, ErrMissingHostname + } + + if domain.Service == "" { + return WorkersDomain{}, ErrMissingService + } + + if domain.Environment == "" { + return WorkersDomain{}, ErrMissingEnvironment + } + + uri := fmt.Sprintf("/accounts/%s/workers/domains", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, domain) + if err != nil { + return WorkersDomain{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var r WorkersDomainResponse + if err := json.Unmarshal(res, &r); err != nil { + return WorkersDomain{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// GetWorkersDomain gets a single Worker Domain. +// +// API reference: https://developers.cloudflare.com/api/operations/worker-domain-get-a-domain +func (api *API) GetWorkersDomain(ctx context.Context, rc *ResourceContainer, domainID string) (WorkersDomain, error) { + if rc.Identifier == "" { + return WorkersDomain{}, ErrMissingAccountID + } + + uri := fmt.Sprintf("/accounts/%s/workers/domains/%s", rc.Identifier, domainID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return WorkersDomain{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var r WorkersDomainResponse + if err := json.Unmarshal(res, &r); err != nil { + return WorkersDomain{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// DetachWorkersDomain detaches a worker from a zone and hostname. +// +// API reference: https://developers.cloudflare.com/api/operations/worker-domain-detach-from-domain +func (api *API) DetachWorkersDomain(ctx context.Context, rc *ResourceContainer, domainID string) error { + if rc.Identifier == "" { + return ErrMissingAccountID + } + + uri := fmt.Sprintf("/accounts/%s/workers/domains/%s", rc.Identifier, domainID) + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/workers_for_platforms.go b/vendor/github.com/cloudflare/cloudflare-go/workers_for_platforms.go new file mode 100644 index 0000000..eda77ea --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/workers_for_platforms.go @@ -0,0 +1,139 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +type WorkersForPlatformsDispatchNamespace struct { + NamespaceId string `json:"namespace_id"` + NamespaceName string `json:"namespace_name"` + CreatedOn *time.Time `json:"created_on,omitempty"` + CreatedBy string `json:"created_by"` + ModifiedOn *time.Time `json:"modified_on,omitempty"` + ModifiedBy string `json:"modified_by"` +} + +type ListWorkersForPlatformsDispatchNamespaceResponse struct { + Response + Result []WorkersForPlatformsDispatchNamespace `json:"result"` +} + +type GetWorkersForPlatformsDispatchNamespaceResponse struct { + Response + Result WorkersForPlatformsDispatchNamespace `json:"result"` +} + +type CreateWorkersForPlatformsDispatchNamespaceParams struct { + Name string `json:"name"` +} + +// ListWorkersForPlatformsDispatchNamespaces lists the dispatch namespaces. +// +// API reference: https://developers.cloudflare.com/api/operations/namespace-worker-list +func (api *API) ListWorkersForPlatformsDispatchNamespaces(ctx context.Context, rc *ResourceContainer) (*ListWorkersForPlatformsDispatchNamespaceResponse, error) { + if rc.Level != AccountRouteLevel { + return nil, ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return nil, ErrMissingAccountID + } + + uri := fmt.Sprintf("/accounts/%s/workers/dispatch/namespaces", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + + var r ListWorkersForPlatformsDispatchNamespaceResponse + if err != nil { + return nil, err + } + + err = json.Unmarshal(res, &r) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return &r, nil +} + +// GetWorkersForPlatformsDispatchNamespace gets a specific dispatch namespace. +// +// API reference: https://developers.cloudflare.com/api/operations/namespace-worker-get-namespace +func (api *API) GetWorkersForPlatformsDispatchNamespace(ctx context.Context, rc *ResourceContainer, name string) (*GetWorkersForPlatformsDispatchNamespaceResponse, error) { + if rc.Level != AccountRouteLevel { + return nil, ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return nil, ErrMissingAccountID + } + + uri := fmt.Sprintf("/accounts/%s/workers/dispatch/namespaces/%s", rc.Identifier, name) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + + var r GetWorkersForPlatformsDispatchNamespaceResponse + if err != nil { + return nil, err + } + + err = json.Unmarshal(res, &r) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return &r, nil +} + +// CreateWorkersForPlatformsDispatchNamespace creates a new dispatch namespace. +// +// API reference: https://developers.cloudflare.com/api/operations/namespace-worker-create +func (api *API) CreateWorkersForPlatformsDispatchNamespace(ctx context.Context, rc *ResourceContainer, params CreateWorkersForPlatformsDispatchNamespaceParams) (*GetWorkersForPlatformsDispatchNamespaceResponse, error) { + if rc.Level != AccountRouteLevel { + return nil, ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return nil, ErrMissingAccountID + } + + uri := fmt.Sprintf("/accounts/%s/workers/dispatch/namespaces", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + + var r GetWorkersForPlatformsDispatchNamespaceResponse + if err != nil { + return nil, err + } + + err = json.Unmarshal(res, &r) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return &r, nil +} + +// DeleteWorkersForPlatformsDispatchNamespace deletes a dispatch namespace. +// +// API reference: https://developers.cloudflare.com/api/operations/namespace-worker-delete-namespace +func (api *API) DeleteWorkersForPlatformsDispatchNamespace(ctx context.Context, rc *ResourceContainer, name string) error { + if rc.Level != AccountRouteLevel { + return ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return ErrMissingAccountID + } + + uri := fmt.Sprintf("/accounts/%s/workers/dispatch/namespaces/%s", rc.Identifier, name) + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/workers_kv.go b/vendor/github.com/cloudflare/cloudflare-go/workers_kv.go new file mode 100644 index 0000000..d7e1f04 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/workers_kv.go @@ -0,0 +1,378 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "github.com/goccy/go-json" +) + +// CreateWorkersKVNamespaceParams provides parameters for creating and updating storage namespaces. +type CreateWorkersKVNamespaceParams struct { + Title string `json:"title"` +} + +type UpdateWorkersKVNamespaceParams struct { + NamespaceID string `json:"-"` + Title string `json:"title"` +} + +// WorkersKVPair is used in an array in the request to the bulk KV api. +type WorkersKVPair struct { + Key string `json:"key"` + Value string `json:"value"` + Expiration int `json:"expiration,omitempty"` + ExpirationTTL int `json:"expiration_ttl,omitempty"` + Metadata interface{} `json:"metadata,omitempty"` + Base64 bool `json:"base64,omitempty"` +} + +// WorkersKVNamespaceResponse is the response received when creating storage namespaces. +type WorkersKVNamespaceResponse struct { + Response + Result WorkersKVNamespace `json:"result"` +} + +// WorkersKVNamespace contains the unique identifier and title of a storage namespace. +type WorkersKVNamespace struct { + ID string `json:"id"` + Title string `json:"title"` +} + +// ListWorkersKVNamespacesResponse contains a slice of storage namespaces associated with an +// account, pagination information, and an embedded response struct. +type ListWorkersKVNamespacesResponse struct { + Response + Result []WorkersKVNamespace `json:"result"` + ResultInfo `json:"result_info"` +} + +// StorageKey is a key name used to identify a storage value. +type StorageKey struct { + Name string `json:"name"` + Expiration int `json:"expiration"` + Metadata interface{} `json:"metadata"` +} + +// ListStorageKeysResponse contains a slice of keys belonging to a storage namespace, +// pagination information, and an embedded response struct. +type ListStorageKeysResponse struct { + Response + Result []StorageKey `json:"result"` + ResultInfo `json:"result_info"` +} + +type ListWorkersKVNamespacesParams struct { + ResultInfo +} + +type WriteWorkersKVEntryParams struct { + NamespaceID string + Key string + Value []byte +} + +type WriteWorkersKVEntriesParams struct { + NamespaceID string + KVs []*WorkersKVPair +} + +type GetWorkersKVParams struct { + NamespaceID string + Key string +} + +type DeleteWorkersKVEntryParams struct { + NamespaceID string + Key string +} + +type DeleteWorkersKVEntriesParams struct { + NamespaceID string + Keys []string +} + +type ListWorkersKVsParams struct { + NamespaceID string `url:"-"` + Limit int `url:"limit,omitempty"` + Cursor string `url:"cursor,omitempty"` + Prefix string `url:"prefix,omitempty"` +} + +// CreateWorkersKVNamespace creates a namespace under the given title. +// A 400 is returned if the account already owns a namespace with this title. +// A namespace must be explicitly deleted to be replaced. +// +// API reference: https://developers.cloudflare.com/api/operations/workers-kv-namespace-create-a-namespace +func (api *API) CreateWorkersKVNamespace(ctx context.Context, rc *ResourceContainer, params CreateWorkersKVNamespaceParams) (WorkersKVNamespaceResponse, error) { + if rc.Level != AccountRouteLevel { + return WorkersKVNamespaceResponse{}, ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return WorkersKVNamespaceResponse{}, ErrMissingIdentifier + } + uri := fmt.Sprintf("/accounts/%s/storage/kv/namespaces", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return WorkersKVNamespaceResponse{}, err + } + + result := WorkersKVNamespaceResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return result, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result, err +} + +// ListWorkersKVNamespaces lists storage namespaces. +// +// API reference: https://developers.cloudflare.com/api/operations/workers-kv-namespace-list-namespaces +func (api *API) ListWorkersKVNamespaces(ctx context.Context, rc *ResourceContainer, params ListWorkersKVNamespacesParams) ([]WorkersKVNamespace, *ResultInfo, error) { + if rc.Level != AccountRouteLevel { + return []WorkersKVNamespace{}, &ResultInfo{}, ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return []WorkersKVNamespace{}, &ResultInfo{}, ErrMissingIdentifier + } + + autoPaginate := true + if params.PerPage >= 1 || params.Page >= 1 { + autoPaginate = false + } + if params.PerPage < 1 { + params.PerPage = 50 + } + if params.Page < 1 { + params.Page = 1 + } + + var namespaces []WorkersKVNamespace + var nsResponse ListWorkersKVNamespacesResponse + for { + nsResponse = ListWorkersKVNamespacesResponse{} + uri := buildURI(fmt.Sprintf("/accounts/%s/storage/kv/namespaces", rc.Identifier), params) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []WorkersKVNamespace{}, &ResultInfo{}, err + } + + err = json.Unmarshal(res, &nsResponse) + if err != nil { + return []WorkersKVNamespace{}, &ResultInfo{}, fmt.Errorf("failed to unmarshal workers KV namespaces JSON data: %w", err) + } + + namespaces = append(namespaces, nsResponse.Result...) + params.ResultInfo = nsResponse.ResultInfo.Next() + + if params.ResultInfo.Done() || !autoPaginate { + break + } + } + + return namespaces, &nsResponse.ResultInfo, nil +} + +// DeleteWorkersKVNamespace deletes the namespace corresponding to the given ID. +// +// API reference: https://developers.cloudflare.com/api/operations/workers-kv-namespace-remove-a-namespace +func (api *API) DeleteWorkersKVNamespace(ctx context.Context, rc *ResourceContainer, namespaceID string) (Response, error) { + uri := fmt.Sprintf("/accounts/%s/storage/kv/namespaces/%s", rc.Identifier, namespaceID) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return Response{}, err + } + + result := Response{} + if err := json.Unmarshal(res, &result); err != nil { + return result, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result, err +} + +// UpdateWorkersKVNamespace modifies a KV namespace based on the ID. +// +// API reference: https://developers.cloudflare.com/api/operations/workers-kv-namespace-rename-a-namespace +func (api *API) UpdateWorkersKVNamespace(ctx context.Context, rc *ResourceContainer, params UpdateWorkersKVNamespaceParams) (Response, error) { + if rc.Level != AccountRouteLevel { + return Response{}, ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return Response{}, ErrMissingIdentifier + } + + uri := fmt.Sprintf("/accounts/%s/storage/kv/namespaces/%s", rc.Identifier, params.NamespaceID) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return Response{}, err + } + + result := Response{} + if err := json.Unmarshal(res, &result); err != nil { + return result, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result, err +} + +// WriteWorkersKVEntry writes a single KV value based on the key. +// +// API reference: https://developers.cloudflare.com/api/operations/workers-kv-namespace-write-key-value-pair-with-metadata +func (api *API) WriteWorkersKVEntry(ctx context.Context, rc *ResourceContainer, params WriteWorkersKVEntryParams) (Response, error) { + if rc.Level != AccountRouteLevel { + return Response{}, ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return Response{}, ErrMissingIdentifier + } + + uri := fmt.Sprintf("/accounts/%s/storage/kv/namespaces/%s/values/%s", rc.Identifier, params.NamespaceID, url.PathEscape(params.Key)) + res, err := api.makeRequestContextWithHeaders( + ctx, http.MethodPut, uri, params.Value, http.Header{"Content-Type": []string{"application/octet-stream"}}, + ) + if err != nil { + return Response{}, err + } + + result := Response{} + if err := json.Unmarshal(res, &result); err != nil { + return result, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result, err +} + +// WriteWorkersKVEntries writes multiple KVs at once. +// +// API reference: https://developers.cloudflare.com/api/operations/workers-kv-namespace-write-multiple-key-value-pairs +func (api *API) WriteWorkersKVEntries(ctx context.Context, rc *ResourceContainer, params WriteWorkersKVEntriesParams) (Response, error) { + if rc.Level != AccountRouteLevel { + return Response{}, ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return Response{}, ErrMissingIdentifier + } + + uri := fmt.Sprintf("/accounts/%s/storage/kv/namespaces/%s/bulk", rc.Identifier, params.NamespaceID) + res, err := api.makeRequestContextWithHeaders( + ctx, http.MethodPut, uri, params.KVs, http.Header{"Content-Type": []string{"application/json"}}, + ) + if err != nil { + return Response{}, err + } + + result := Response{} + if err := json.Unmarshal(res, &result); err != nil { + return result, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result, err +} + +// GetWorkersKV returns the value associated with the given key in the +// given namespace. +// +// API reference: https://developers.cloudflare.com/api/operations/workers-kv-namespace-read-key-value-pair +func (api API) GetWorkersKV(ctx context.Context, rc *ResourceContainer, params GetWorkersKVParams) ([]byte, error) { + if rc.Level != AccountRouteLevel { + return []byte(``), ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return []byte(``), ErrMissingIdentifier + } + uri := fmt.Sprintf("/accounts/%s/storage/kv/namespaces/%s/values/%s", rc.Identifier, params.NamespaceID, url.PathEscape(params.Key)) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + return res, nil +} + +// DeleteWorkersKVEntry deletes a key and value for a provided storage namespace. +// +// API reference: https://developers.cloudflare.com/api/operations/workers-kv-namespace-delete-key-value-pair +func (api API) DeleteWorkersKVEntry(ctx context.Context, rc *ResourceContainer, params DeleteWorkersKVEntryParams) (Response, error) { + if rc.Level != AccountRouteLevel { + return Response{}, ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return Response{}, ErrMissingIdentifier + } + uri := fmt.Sprintf("/accounts/%s/storage/kv/namespaces/%s/values/%s", rc.Identifier, params.NamespaceID, url.PathEscape(params.Key)) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return Response{}, err + } + + result := Response{} + if err := json.Unmarshal(res, &result); err != nil { + return result, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return result, err +} + +// DeleteWorkersKVEntries deletes multiple KVs at once. +// +// API reference: https://developers.cloudflare.com/api/operations/workers-kv-namespace-delete-multiple-key-value-pairs +func (api *API) DeleteWorkersKVEntries(ctx context.Context, rc *ResourceContainer, params DeleteWorkersKVEntriesParams) (Response, error) { + if rc.Level != AccountRouteLevel { + return Response{}, ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return Response{}, ErrMissingIdentifier + } + uri := fmt.Sprintf("/accounts/%s/storage/kv/namespaces/%s/bulk", rc.Identifier, params.NamespaceID) + res, err := api.makeRequestContextWithHeaders( + ctx, http.MethodDelete, uri, params.Keys, http.Header{"Content-Type": []string{"application/json"}}, + ) + if err != nil { + return Response{}, err + } + + result := Response{} + if err := json.Unmarshal(res, &result); err != nil { + return result, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result, err +} + +// ListWorkersKVKeys lists a namespace's keys. +// +// API Reference: https://developers.cloudflare.com/api/operations/workers-kv-namespace-list-a-namespace'-s-keys +func (api API) ListWorkersKVKeys(ctx context.Context, rc *ResourceContainer, params ListWorkersKVsParams) (ListStorageKeysResponse, error) { + if rc.Level != AccountRouteLevel { + return ListStorageKeysResponse{}, ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return ListStorageKeysResponse{}, ErrMissingIdentifier + } + + uri := buildURI( + fmt.Sprintf("/accounts/%s/storage/kv/namespaces/%s/keys", rc.Identifier, params.NamespaceID), + params, + ) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return ListStorageKeysResponse{}, err + } + + result := ListStorageKeysResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return result, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return result, err +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/workers_routes.go b/vendor/github.com/cloudflare/cloudflare-go/workers_routes.go new file mode 100644 index 0000000..9fc7f7a --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/workers_routes.go @@ -0,0 +1,162 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +var ErrMissingWorkerRouteID = errors.New("missing required route ID") + +type ListWorkerRoutes struct{} + +type CreateWorkerRouteParams struct { + Pattern string `json:"pattern"` + Script string `json:"script,omitempty"` +} + +type ListWorkerRoutesParams struct{} + +type UpdateWorkerRouteParams struct { + ID string `json:"id,omitempty"` + Pattern string `json:"pattern"` + Script string `json:"script,omitempty"` +} + +// CreateWorkerRoute creates worker route for a script. +// +// API reference: https://developers.cloudflare.com/api/operations/worker-routes-create-route +func (api *API) CreateWorkerRoute(ctx context.Context, rc *ResourceContainer, params CreateWorkerRouteParams) (WorkerRouteResponse, error) { + if rc.Level != ZoneRouteLevel { + return WorkerRouteResponse{}, fmt.Errorf(errInvalidResourceContainerAccess, ZoneRouteLevel) + } + + if rc.Identifier == "" { + return WorkerRouteResponse{}, ErrMissingIdentifier + } + + uri := fmt.Sprintf("/zones/%s/workers/routes", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return WorkerRouteResponse{}, err + } + + var r WorkerRouteResponse + err = json.Unmarshal(res, &r) + if err != nil { + return WorkerRouteResponse{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r, nil +} + +// DeleteWorkerRoute deletes worker route for a script. +// +// API reference: https://developers.cloudflare.com/api/operations/worker-routes-delete-route +func (api *API) DeleteWorkerRoute(ctx context.Context, rc *ResourceContainer, routeID string) (WorkerRouteResponse, error) { + if rc.Level != ZoneRouteLevel { + return WorkerRouteResponse{}, fmt.Errorf(errInvalidResourceContainerAccess, ZoneRouteLevel) + } + + if rc.Identifier == "" { + return WorkerRouteResponse{}, ErrMissingIdentifier + } + + if routeID == "" { + return WorkerRouteResponse{}, errors.New("missing required route ID") + } + + uri := fmt.Sprintf("/zones/%s/workers/routes/%s", rc.Identifier, routeID) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return WorkerRouteResponse{}, err + } + var r WorkerRouteResponse + err = json.Unmarshal(res, &r) + if err != nil { + return WorkerRouteResponse{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r, nil +} + +// ListWorkerRoutes returns list of Worker routes. +// +// API reference: https://developers.cloudflare.com/api/operations/worker-routes-list-routes +func (api *API) ListWorkerRoutes(ctx context.Context, rc *ResourceContainer, params ListWorkerRoutesParams) (WorkerRoutesResponse, error) { + if rc.Level != ZoneRouteLevel { + return WorkerRoutesResponse{}, fmt.Errorf(errInvalidResourceContainerAccess, ZoneRouteLevel) + } + + if rc.Identifier == "" { + return WorkerRoutesResponse{}, ErrMissingIdentifier + } + + uri := fmt.Sprintf("/zones/%s/workers/routes", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return WorkerRoutesResponse{}, err + } + var r WorkerRoutesResponse + err = json.Unmarshal(res, &r) + if err != nil { + return WorkerRoutesResponse{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r, nil +} + +// GetWorkerRoute returns a Workers route. +// +// API reference: https://developers.cloudflare.com/api/operations/worker-routes-get-route +func (api *API) GetWorkerRoute(ctx context.Context, rc *ResourceContainer, routeID string) (WorkerRouteResponse, error) { + if rc.Level != ZoneRouteLevel { + return WorkerRouteResponse{}, fmt.Errorf(errInvalidResourceContainerAccess, ZoneRouteLevel) + } + + if rc.Identifier == "" { + return WorkerRouteResponse{}, ErrMissingIdentifier + } + + uri := fmt.Sprintf("/zones/%s/workers/routes/%s", rc.Identifier, routeID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return WorkerRouteResponse{}, err + } + var r WorkerRouteResponse + err = json.Unmarshal(res, &r) + if err != nil { + return WorkerRouteResponse{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r, nil +} + +// UpdateWorkerRoute updates worker route for a script. +// +// API reference: https://developers.cloudflare.com/api/operations/worker-routes-update-route +func (api *API) UpdateWorkerRoute(ctx context.Context, rc *ResourceContainer, params UpdateWorkerRouteParams) (WorkerRouteResponse, error) { + if rc.Level != ZoneRouteLevel { + return WorkerRouteResponse{}, fmt.Errorf(errInvalidResourceContainerAccess, ZoneRouteLevel) + } + + if rc.Identifier == "" { + return WorkerRouteResponse{}, ErrMissingIdentifier + } + + if params.ID == "" { + return WorkerRouteResponse{}, ErrMissingWorkerRouteID + } + + uri := fmt.Sprintf("/zones/%s/workers/routes/%s", rc.Identifier, params.ID) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return WorkerRouteResponse{}, err + } + var r WorkerRouteResponse + err = json.Unmarshal(res, &r) + if err != nil { + return WorkerRouteResponse{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/workers_secrets.go b/vendor/github.com/cloudflare/cloudflare-go/workers_secrets.go new file mode 100644 index 0000000..cfcd597 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/workers_secrets.go @@ -0,0 +1,125 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +// WorkersPutSecretRequest provides parameters for creating and updating secrets. +type WorkersPutSecretRequest struct { + Name string `json:"name"` + Text string `json:"text"` + Type WorkerBindingType `json:"type"` +} + +// WorkersSecret contains the name and type of the secret. +type WorkersSecret struct { + Name string `json:"name"` + Type string `json:"secret_text"` +} + +// WorkersPutSecretResponse is the response received when creating or updating a secret. +type WorkersPutSecretResponse struct { + Response + Result WorkersSecret `json:"result"` +} + +// WorkersListSecretsResponse is the response received when listing secrets. +type WorkersListSecretsResponse struct { + Response + Result []WorkersSecret `json:"result"` +} + +type SetWorkersSecretParams struct { + ScriptName string + Secret *WorkersPutSecretRequest +} + +type DeleteWorkersSecretParams struct { + ScriptName string + SecretName string +} + +type ListWorkersSecretsParams struct { + ScriptName string +} + +// SetWorkersSecret creates or updates a secret. +// +// API reference: https://api.cloudflare.com/ +func (api *API) SetWorkersSecret(ctx context.Context, rc *ResourceContainer, params SetWorkersSecretParams) (WorkersPutSecretResponse, error) { + if rc.Level != AccountRouteLevel { + return WorkersPutSecretResponse{}, ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return WorkersPutSecretResponse{}, ErrMissingAccountID + } + + uri := fmt.Sprintf("/accounts/%s/workers/scripts/%s/secrets", rc.Identifier, params.ScriptName) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params.Secret) + if err != nil { + return WorkersPutSecretResponse{}, err + } + + result := WorkersPutSecretResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return result, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result, err +} + +// DeleteWorkersSecret deletes a secret. +// +// API reference: https://api.cloudflare.com/ +func (api *API) DeleteWorkersSecret(ctx context.Context, rc *ResourceContainer, params DeleteWorkersSecretParams) (Response, error) { + if rc.Level != AccountRouteLevel { + return Response{}, ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return Response{}, ErrMissingAccountID + } + + uri := fmt.Sprintf("/accounts/%s/workers/scripts/%s/secrets/%s", rc.Identifier, params.ScriptName, params.SecretName) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return Response{}, err + } + + result := Response{} + if err := json.Unmarshal(res, &result); err != nil { + return result, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result, err +} + +// ListWorkersSecrets lists secrets for a given worker +// API reference: https://api.cloudflare.com/ +func (api *API) ListWorkersSecrets(ctx context.Context, rc *ResourceContainer, params ListWorkersSecretsParams) (WorkersListSecretsResponse, error) { + if rc.Level != AccountRouteLevel { + return WorkersListSecretsResponse{}, ErrRequiredAccountLevelResourceContainer + } + + if rc.Identifier == "" { + return WorkersListSecretsResponse{}, ErrMissingAccountID + } + + uri := fmt.Sprintf("/accounts/%s/workers/scripts/%s/secrets", rc.Identifier, params.ScriptName) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return WorkersListSecretsResponse{}, err + } + + result := WorkersListSecretsResponse{} + if err := json.Unmarshal(res, &result); err != nil { + return result, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return result, err +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/workers_subdomain.go b/vendor/github.com/cloudflare/cloudflare-go/workers_subdomain.go new file mode 100644 index 0000000..4fc2e7c --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/workers_subdomain.go @@ -0,0 +1,58 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + + "github.com/goccy/go-json" +) + +type WorkersSubdomain struct { + Name string `json:"name,omitempty"` +} + +type WorkersSubdomainResponse struct { + Response + Result WorkersSubdomain +} + +// WorkersCreateSubdomain Creates a Workers subdomain for an account. +// +// API reference: https://developers.cloudflare.com/api/operations/worker-subdomain-create-subdomain +func (api *API) WorkersCreateSubdomain(ctx context.Context, rc *ResourceContainer, params WorkersSubdomain) (WorkersSubdomain, error) { + if rc.Identifier == "" { + return WorkersSubdomain{}, ErrMissingAccountID + } + + uri := fmt.Sprintf("/accounts/%s/workers/subdomain", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return WorkersSubdomain{}, err + } + var r WorkersSubdomainResponse + if err := json.Unmarshal(res, &r); err != nil { + return WorkersSubdomain{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// WorkersGetSubdomain Creates a Workers subdomain for an account. +// +// API reference: https://developers.cloudflare.com/api/operations/worker-subdomain-get-subdomain +func (api *API) WorkersGetSubdomain(ctx context.Context, rc *ResourceContainer) (WorkersSubdomain, error) { + if rc.Identifier == "" { + return WorkersSubdomain{}, ErrMissingAccountID + } + + uri := fmt.Sprintf("/accounts/%s/workers/subdomain", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return WorkersSubdomain{}, err + } + var r WorkersSubdomainResponse + if err := json.Unmarshal(res, &r); err != nil { + return WorkersSubdomain{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/workers_tail.go b/vendor/github.com/cloudflare/cloudflare-go/workers_tail.go new file mode 100644 index 0000000..733bc98 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/workers_tail.go @@ -0,0 +1,114 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +var ( + ErrMissingScriptName = errors.New("required script name missing") + ErrMissingTailID = errors.New("required tail id missing") +) + +type WorkersTail struct { + ID string `json:"id"` + URL string `json:"url"` + ExpiresAt *time.Time `json:"expires_at"` +} + +type StartWorkersTailResponse struct { + Response + Result WorkersTail +} + +type ListWorkersTailParameters struct { + AccountID string + ScriptName string +} + +type ListWorkersTailResponse struct { + Response + Result WorkersTail +} + +// StartWorkersTail Starts a tail that receives logs and exception from a Worker. +// +// API reference: https://developers.cloudflare.com/api/operations/worker-tail-logs-start-tail +func (api *API) StartWorkersTail(ctx context.Context, rc *ResourceContainer, scriptName string) (WorkersTail, error) { + if rc.Identifier == "" { + return WorkersTail{}, ErrMissingAccountID + } + + if scriptName == "" { + return WorkersTail{}, ErrMissingScriptName + } + + uri := fmt.Sprintf("/accounts/%s/workers/scripts/%s/tails", rc.Identifier, scriptName) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, nil) + if err != nil { + return WorkersTail{}, err + } + + var workerstailResponse StartWorkersTailResponse + if err := json.Unmarshal(res, &workerstailResponse); err != nil { + return WorkersTail{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return workerstailResponse.Result, nil +} + +// ListWorkersTail Get list of tails currently deployed on a Worker. +// +// API reference: https://developers.cloudflare.com/api/operations/worker-tail-logs-list-tails +func (api *API) ListWorkersTail(ctx context.Context, rc *ResourceContainer, params ListWorkersTailParameters) (WorkersTail, error) { + if rc.Identifier == "" { + return WorkersTail{}, ErrMissingAccountID + } + + if params.ScriptName == "" { + return WorkersTail{}, ErrMissingScriptName + } + + uri := fmt.Sprintf("/accounts/%s/workers/scripts/%s/tails", rc.Identifier, params.ScriptName) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return WorkersTail{}, err + } + + var workerstailResponse ListWorkersTailResponse + if err := json.Unmarshal(res, &workerstailResponse); err != nil { + return WorkersTail{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return workerstailResponse.Result, nil +} + +// DeleteWorkersTail Deletes a tail from a Worker. +// +// API reference: https://developers.cloudflare.com/api/operations/worker-tail-logs-delete-tail +func (api *API) DeleteWorkersTail(ctx context.Context, rc *ResourceContainer, scriptName, tailID string) error { + if rc.Identifier == "" { + return ErrMissingAccountID + } + + if scriptName == "" { + return ErrMissingScriptName + } + + if tailID == "" { + return ErrMissingTailID + } + + uri := fmt.Sprintf("/accounts/%s/workers/scripts/%s/tails/%s", rc.Identifier, scriptName, tailID) + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/zaraz.go b/vendor/github.com/cloudflare/cloudflare-go/zaraz.go new file mode 100644 index 0000000..3ffc4f5 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/zaraz.go @@ -0,0 +1,430 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +type ZarazConfig struct { + DebugKey string `json:"debugKey"` + Tools map[string]ZarazTool `json:"tools"` + Triggers map[string]ZarazTrigger `json:"triggers"` + ZarazVersion int64 `json:"zarazVersion"` + Consent ZarazConsent `json:"consent,omitempty"` + DataLayer *bool `json:"dataLayer,omitempty"` + Dlp []any `json:"dlp,omitempty"` + HistoryChange *bool `json:"historyChange,omitempty"` + Settings ZarazConfigSettings `json:"settings,omitempty"` + Variables map[string]ZarazVariable `json:"variables,omitempty"` +} + +type ZarazWorker struct { + EscapedWorkerName string `json:"escapedWorkerName"` + WorkerTag string `json:"workerTag"` + MutableId string `json:"mutableId,omitempty"` +} +type ZarazConfigSettings struct { + AutoInjectScript *bool `json:"autoInjectScript"` + InjectIframes *bool `json:"injectIframes,omitempty"` + Ecommerce *bool `json:"ecommerce,omitempty"` + HideQueryParams *bool `json:"hideQueryParams,omitempty"` + HideIpAddress *bool `json:"hideIPAddress,omitempty"` + HideUserAgent *bool `json:"hideUserAgent,omitempty"` + HideExternalReferer *bool `json:"hideExternalReferer,omitempty"` + CookieDomain string `json:"cookieDomain,omitempty"` + InitPath string `json:"initPath,omitempty"` + ScriptPath string `json:"scriptPath,omitempty"` + TrackPath string `json:"trackPath,omitempty"` + EventsApiPath string `json:"eventsApiPath,omitempty"` + McRootPath string `json:"mcRootPath,omitempty"` + ContextEnricher ZarazWorker `json:"contextEnricher,omitempty"` +} + +// Deprecated: To be removed pending migration of existing configs. +type ZarazNeoEvent struct { + BlockingTriggers []string `json:"blockingTriggers"` + FiringTriggers []string `json:"firingTriggers"` + Data map[string]any `json:"data"` + ActionType string `json:"actionType,omitempty"` +} + +type ZarazAction struct { + BlockingTriggers []string `json:"blockingTriggers"` + FiringTriggers []string `json:"firingTriggers"` + Data map[string]any `json:"data"` + ActionType string `json:"actionType,omitempty"` +} + +type ZarazToolType string + +const ( + ZarazToolLibrary ZarazToolType = "library" + ZarazToolComponent ZarazToolType = "component" + ZarazToolCustomMc ZarazToolType = "custom-mc" +) + +type ZarazTool struct { + BlockingTriggers []string `json:"blockingTriggers"` + Enabled *bool `json:"enabled"` + DefaultFields map[string]any `json:"defaultFields"` + Name string `json:"name"` + NeoEvents []ZarazNeoEvent `json:"neoEvents"` + Actions map[string]ZarazAction `json:"actions"` + Type ZarazToolType `json:"type"` + DefaultPurpose string `json:"defaultPurpose,omitempty"` + Library string `json:"library,omitempty"` + Component string `json:"component,omitempty"` + Permissions []string `json:"permissions"` + Settings map[string]any `json:"settings"` + Worker ZarazWorker `json:"worker,omitempty"` +} + +type ZarazTriggerSystem string + +const ZarazPageload ZarazTriggerSystem = "pageload" + +type ZarazLoadRuleOp string + +type ZarazRuleType string + +const ( + ZarazClickListener ZarazRuleType = "clickListener" + ZarazTimer ZarazRuleType = "timer" + ZarazFormSubmission ZarazRuleType = "formSubmission" + ZarazVariableMatch ZarazRuleType = "variableMatch" + ZarazScrollDepth ZarazRuleType = "scrollDepth" + ZarazElementVisibility ZarazRuleType = "elementVisibility" + ZarazClientEval ZarazRuleType = "clientEval" +) + +type ZarazSelectorType string + +const ( + ZarazXPath ZarazSelectorType = "xpath" + ZarazCSS ZarazSelectorType = "css" +) + +type ZarazRuleSettings struct { + Type ZarazSelectorType `json:"type,omitempty"` + Selector string `json:"selector,omitempty"` + WaitForTags int `json:"waitForTags,omitempty"` + Interval int `json:"interval,omitempty"` + Limit int `json:"limit,omitempty"` + Validate *bool `json:"validate,omitempty"` + Variable string `json:"variable,omitempty"` + Match string `json:"match,omitempty"` + Positions string `json:"positions,omitempty"` + Op ZarazLoadRuleOp `json:"op,omitempty"` + Value string `json:"value,omitempty"` +} + +type ZarazTriggerRule struct { + Id string `json:"id"` + Match string `json:"match,omitempty"` + Op ZarazLoadRuleOp `json:"op,omitempty"` + Value string `json:"value,omitempty"` + Action ZarazRuleType `json:"action"` + Settings ZarazRuleSettings `json:"settings"` +} + +type ZarazTrigger struct { + Name string `json:"name"` + Description string `json:"description,omitempty"` + LoadRules []ZarazTriggerRule `json:"loadRules"` + ExcludeRules []ZarazTriggerRule `json:"excludeRules"` + ClientRules []any `json:"clientRules,omitempty"` // what is this? + System ZarazTriggerSystem `json:"system,omitempty"` +} + +type ZarazVariableType string + +const ( + ZarazVarString ZarazVariableType = "string" + ZarazVarSecret ZarazVariableType = "secret" + ZarazVarWorker ZarazVariableType = "worker" +) + +type ZarazVariable struct { + Name string `json:"name"` + Type ZarazVariableType `json:"type"` + Value interface{} `json:"value"` +} + +type ZarazButtonTextTranslations struct { + AcceptAll map[string]string `json:"accept_all"` + RejectAll map[string]string `json:"reject_all"` + ConfirmMyChoices map[string]string `json:"confirm_my_choices"` +} + +type ZarazPurpose struct { + Name string `json:"name"` + Description string `json:"description"` +} + +type ZarazPurposeWithTranslations struct { + Name map[string]string `json:"name"` + Description map[string]string `json:"description"` + Order int `json:"order"` +} + +type ZarazConsent struct { + Enabled *bool `json:"enabled"` + ButtonTextTranslations ZarazButtonTextTranslations `json:"buttonTextTranslations,omitempty"` + CompanyEmail string `json:"companyEmail,omitempty"` + CompanyName string `json:"companyName,omitempty"` + CompanyStreetAddress string `json:"companyStreetAddress,omitempty"` + ConsentModalIntroHTML string `json:"consentModalIntroHTML,omitempty"` + ConsentModalIntroHTMLWithTranslations map[string]string `json:"consentModalIntroHTMLWithTranslations,omitempty"` + CookieName string `json:"cookieName,omitempty"` + CustomCSS string `json:"customCSS,omitempty"` + CustomIntroDisclaimerDismissed *bool `json:"customIntroDisclaimerDismissed,omitempty"` + DefaultLanguage string `json:"defaultLanguage,omitempty"` + HideModal *bool `json:"hideModal,omitempty"` + Purposes map[string]ZarazPurpose `json:"purposes,omitempty"` + PurposesWithTranslations map[string]ZarazPurposeWithTranslations `json:"purposesWithTranslations,omitempty"` +} + +type ZarazConfigResponse struct { + Result ZarazConfig `json:"result"` + Response +} + +type ZarazWorkflowResponse struct { + Result string `json:"result"` + Response +} + +type ZarazPublishResponse struct { + Result string `json:"result"` + Response +} + +type UpdateZarazConfigParams struct { + DebugKey string `json:"debugKey"` + Tools map[string]ZarazTool `json:"tools"` + Triggers map[string]ZarazTrigger `json:"triggers"` + ZarazVersion int64 `json:"zarazVersion"` + Consent ZarazConsent `json:"consent,omitempty"` + DataLayer *bool `json:"dataLayer,omitempty"` + Dlp []any `json:"dlp,omitempty"` + HistoryChange *bool `json:"historyChange,omitempty"` + Settings ZarazConfigSettings `json:"settings,omitempty"` + Variables map[string]ZarazVariable `json:"variables,omitempty"` +} + +type UpdateZarazWorkflowParams struct { + Workflow string `json:"workflow"` +} + +type PublishZarazConfigParams struct { + Description string `json:"description"` +} + +type ZarazHistoryRecord struct { + ID int64 `json:"id,omitempty"` + UserID string `json:"userId,omitempty"` + Description string `json:"description,omitempty"` + CreatedAt *time.Time `json:"createdAt,omitempty"` + UpdatedAt *time.Time `json:"updatedAt,omitempty"` +} + +type ZarazConfigHistoryListResponse struct { + Result []ZarazHistoryRecord `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +type ListZarazConfigHistoryParams struct { + ResultInfo +} + +type GetZarazConfigsByIdResponse = map[string]interface{} + +// listZarazConfigHistoryDefaultPageSize represents the default per_page size of the API. +var listZarazConfigHistoryDefaultPageSize int = 100 + +func (api *API) GetZarazConfig(ctx context.Context, rc *ResourceContainer) (ZarazConfigResponse, error) { + if rc.Identifier == "" { + return ZarazConfigResponse{}, ErrMissingZoneID + } + + uri := fmt.Sprintf("/zones/%s/settings/zaraz/v2/config", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return ZarazConfigResponse{}, err + } + + var recordResp ZarazConfigResponse + err = json.Unmarshal(res, &recordResp) + if err != nil { + return ZarazConfigResponse{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return recordResp, nil +} + +func (api *API) UpdateZarazConfig(ctx context.Context, rc *ResourceContainer, params UpdateZarazConfigParams) (ZarazConfigResponse, error) { + if rc.Identifier == "" { + return ZarazConfigResponse{}, ErrMissingZoneID + } + + uri := fmt.Sprintf("/zones/%s/settings/zaraz/v2/config", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return ZarazConfigResponse{}, err + } + + var updateResp ZarazConfigResponse + err = json.Unmarshal(res, &updateResp) + if err != nil { + return ZarazConfigResponse{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return updateResp, nil +} + +func (api *API) GetZarazWorkflow(ctx context.Context, rc *ResourceContainer) (ZarazWorkflowResponse, error) { + if rc.Identifier == "" { + return ZarazWorkflowResponse{}, ErrMissingZoneID + } + + uri := fmt.Sprintf("/zones/%s/settings/zaraz/v2/workflow", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return ZarazWorkflowResponse{}, err + } + + var response ZarazWorkflowResponse + err = json.Unmarshal(res, &response) + if err != nil { + return ZarazWorkflowResponse{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response, nil +} + +func (api *API) UpdateZarazWorkflow(ctx context.Context, rc *ResourceContainer, params UpdateZarazWorkflowParams) (ZarazWorkflowResponse, error) { + if rc.Identifier == "" { + return ZarazWorkflowResponse{}, ErrMissingZoneID + } + + uri := fmt.Sprintf("/zones/%s/settings/zaraz/v2/workflow", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params.Workflow) + if err != nil { + return ZarazWorkflowResponse{}, err + } + + var response ZarazWorkflowResponse + err = json.Unmarshal(res, &response) + if err != nil { + return ZarazWorkflowResponse{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response, nil +} + +func (api *API) PublishZarazConfig(ctx context.Context, rc *ResourceContainer, params PublishZarazConfigParams) (ZarazPublishResponse, error) { + if rc.Identifier == "" { + return ZarazPublishResponse{}, ErrMissingZoneID + } + + uri := fmt.Sprintf("/zones/%s/settings/zaraz/v2/publish", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params.Description) + if err != nil { + return ZarazPublishResponse{}, err + } + + var response ZarazPublishResponse + err = json.Unmarshal(res, &response) + if err != nil { + return ZarazPublishResponse{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response, nil +} + +func (api *API) ListZarazConfigHistory(ctx context.Context, rc *ResourceContainer, params ListZarazConfigHistoryParams) ([]ZarazHistoryRecord, *ResultInfo, error) { + if rc.Identifier == "" { + return nil, nil, ErrMissingZoneID + } + + autoPaginate := true + if params.PerPage >= 1 || params.Page >= 1 { + autoPaginate = false + } + + if params.PerPage < 1 { + params.PerPage = listZarazConfigHistoryDefaultPageSize + } + + if params.Page < 1 { + params.Page = 1 + } + + var records []ZarazHistoryRecord + var lastResultInfo ResultInfo + + for { + uri := buildURI(fmt.Sprintf("/zones/%s/settings/zaraz/v2/history", rc.Identifier), params) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []ZarazHistoryRecord{}, &ResultInfo{}, err + } + var listResponse ZarazConfigHistoryListResponse + err = json.Unmarshal(res, &listResponse) + if err != nil { + return []ZarazHistoryRecord{}, &ResultInfo{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + records = append(records, listResponse.Result...) + lastResultInfo = listResponse.ResultInfo + params.ResultInfo = listResponse.ResultInfo.Next() + if params.ResultInfo.Done() || !autoPaginate { + break + } + } + return records, &lastResultInfo, nil +} + +func (api *API) GetDefaultZarazConfig(ctx context.Context, rc *ResourceContainer) (ZarazConfigResponse, error) { + if rc.Identifier == "" { + return ZarazConfigResponse{}, ErrMissingZoneID + } + + uri := fmt.Sprintf("/zones/%s/settings/zaraz/v2/default", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return ZarazConfigResponse{}, err + } + + var recordResp ZarazConfigResponse + err = json.Unmarshal(res, &recordResp) + if err != nil { + return ZarazConfigResponse{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return recordResp, nil +} + +func (api *API) ExportZarazConfig(ctx context.Context, rc *ResourceContainer) error { + if rc.Identifier == "" { + return ErrMissingZoneID + } + + uri := fmt.Sprintf("/zones/%s/settings/zaraz/v2/export", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return err + } + + var recordResp ZarazConfig + err = json.Unmarshal(res, &recordResp) + if err != nil { + return fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/zone.go b/vendor/github.com/cloudflare/cloudflare-go/zone.go new file mode 100644 index 0000000..714c0ed --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/zone.go @@ -0,0 +1,1077 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "strconv" + "sync" + "time" + + "github.com/goccy/go-json" + + "golang.org/x/net/idna" +) + +var ( + // ErrMissingSettingName is for when setting name is required but missing. + ErrMissingSettingName = errors.New("zone setting name required but missing") +) + +// Owner describes the resource owner. +type Owner struct { + ID string `json:"id"` + Email string `json:"email"` + Name string `json:"name"` + OwnerType string `json:"type"` +} + +// Zone describes a Cloudflare zone. +type Zone struct { + ID string `json:"id"` + Name string `json:"name"` + // DevMode contains the time in seconds until development expires (if + // positive) or since it expired (if negative). It will be 0 if never used. + DevMode int `json:"development_mode"` + OriginalNS []string `json:"original_name_servers"` + OriginalRegistrar string `json:"original_registrar"` + OriginalDNSHost string `json:"original_dnshost"` + CreatedOn time.Time `json:"created_on"` + ModifiedOn time.Time `json:"modified_on"` + NameServers []string `json:"name_servers"` + Owner Owner `json:"owner"` + Permissions []string `json:"permissions"` + Plan ZonePlan `json:"plan"` + PlanPending ZonePlan `json:"plan_pending,omitempty"` + Status string `json:"status"` + Paused bool `json:"paused"` + Type string `json:"type"` + Host struct { + Name string + Website string + } `json:"host"` + VanityNS []string `json:"vanity_name_servers"` + Betas []string `json:"betas"` + DeactReason string `json:"deactivation_reason"` + Meta ZoneMeta `json:"meta"` + Account Account `json:"account"` + VerificationKey string `json:"verification_key"` +} + +// ZoneMeta describes metadata about a zone. +type ZoneMeta struct { + // custom_certificate_quota is broken - sometimes it's a string, sometimes a number! + // CustCertQuota int `json:"custom_certificate_quota"` + PageRuleQuota int `json:"page_rule_quota"` + WildcardProxiable bool `json:"wildcard_proxiable"` + PhishingDetected bool `json:"phishing_detected"` +} + +// ZonePlan contains the plan information for a zone. +type ZonePlan struct { + ZonePlanCommon + LegacyID string `json:"legacy_id"` + IsSubscribed bool `json:"is_subscribed"` + CanSubscribe bool `json:"can_subscribe"` + LegacyDiscount bool `json:"legacy_discount"` + ExternallyManaged bool `json:"externally_managed"` +} + +// ZoneRatePlan contains the plan information for a zone. +type ZoneRatePlan struct { + ZonePlanCommon + Components []zoneRatePlanComponents `json:"components,omitempty"` +} + +// ZonePlanCommon contains fields used by various Plan endpoints. +type ZonePlanCommon struct { + ID string `json:"id"` + Name string `json:"name,omitempty"` + Price int `json:"price,omitempty"` + Currency string `json:"currency,omitempty"` + Frequency string `json:"frequency,omitempty"` +} + +type zoneRatePlanComponents struct { + Name string `json:"name"` + Default int `json:"Default"` + UnitPrice int `json:"unit_price"` +} + +// ZoneID contains only the zone ID. +type ZoneID struct { + ID string `json:"id"` +} + +// ZoneResponse represents the response from the Zone endpoint containing a single zone. +type ZoneResponse struct { + Response + Result Zone `json:"result"` +} + +// ZonesResponse represents the response from the Zone endpoint containing an array of zones. +type ZonesResponse struct { + Response + Result []Zone `json:"result"` + ResultInfo `json:"result_info"` +} + +// ZoneIDResponse represents the response from the Zone endpoint, containing only a zone ID. +type ZoneIDResponse struct { + Response + Result ZoneID `json:"result"` +} + +// AvailableZoneRatePlansResponse represents the response from the Available Rate Plans endpoint. +type AvailableZoneRatePlansResponse struct { + Response + Result []ZoneRatePlan `json:"result"` + ResultInfo `json:"result_info"` +} + +// AvailableZonePlansResponse represents the response from the Available Plans endpoint. +type AvailableZonePlansResponse struct { + Response + Result []ZonePlan `json:"result"` + ResultInfo +} + +// ZoneRatePlanResponse represents the response from the Plan Details endpoint. +type ZoneRatePlanResponse struct { + Response + Result ZoneRatePlan `json:"result"` +} + +// ZoneSetting contains settings for a zone. +type ZoneSetting struct { + ID string `json:"id"` + Editable bool `json:"editable"` + ModifiedOn string `json:"modified_on,omitempty"` + Value interface{} `json:"value"` + TimeRemaining int `json:"time_remaining"` + NextScheduledScan string `json:"next_scheduled_scan,omitempty"` +} + +// ZoneSettingResponse represents the response from the Zone Setting endpoint. +type ZoneSettingResponse struct { + Response + Result []ZoneSetting `json:"result"` +} + +// ZoneSettingSingleResponse represents the response from the Zone Setting endpoint for the specified setting. +type ZoneSettingSingleResponse struct { + Response + Result ZoneSetting `json:"result"` +} + +// ZoneSSLSetting contains ssl setting for a zone. +type ZoneSSLSetting struct { + ID string `json:"id"` + Editable bool `json:"editable"` + ModifiedOn string `json:"modified_on"` + Value string `json:"value"` + CertificateStatus string `json:"certificate_status"` +} + +// ZoneSSLSettingResponse represents the response from the Zone SSL Setting +type ZoneSSLSettingResponse struct { + Response + Result ZoneSSLSetting `json:"result"` +} + +// ZoneAnalyticsData contains totals and timeseries analytics data for a zone. +type ZoneAnalyticsData struct { + Totals ZoneAnalytics `json:"totals"` + Timeseries []ZoneAnalytics `json:"timeseries"` +} + +// zoneAnalyticsDataResponse represents the response from the Zone Analytics Dashboard endpoint. +type zoneAnalyticsDataResponse struct { + Response + Result ZoneAnalyticsData `json:"result"` +} + +// ZoneAnalyticsColocation contains analytics data by datacenter. +type ZoneAnalyticsColocation struct { + ColocationID string `json:"colo_id"` + Timeseries []ZoneAnalytics `json:"timeseries"` +} + +// zoneAnalyticsColocationResponse represents the response from the Zone Analytics By Co-location endpoint. +type zoneAnalyticsColocationResponse struct { + Response + Result []ZoneAnalyticsColocation `json:"result"` +} + +// ZoneAnalytics contains analytics data for a zone. +type ZoneAnalytics struct { + Since time.Time `json:"since"` + Until time.Time `json:"until"` + Requests struct { + All int `json:"all"` + Cached int `json:"cached"` + Uncached int `json:"uncached"` + ContentType map[string]int `json:"content_type"` + Country map[string]int `json:"country"` + SSL struct { + Encrypted int `json:"encrypted"` + Unencrypted int `json:"unencrypted"` + } `json:"ssl"` + HTTPStatus map[string]int `json:"http_status"` + } `json:"requests"` + Bandwidth struct { + All int `json:"all"` + Cached int `json:"cached"` + Uncached int `json:"uncached"` + ContentType map[string]int `json:"content_type"` + Country map[string]int `json:"country"` + SSL struct { + Encrypted int `json:"encrypted"` + Unencrypted int `json:"unencrypted"` + } `json:"ssl"` + } `json:"bandwidth"` + Threats struct { + All int `json:"all"` + Country map[string]int `json:"country"` + Type map[string]int `json:"type"` + } `json:"threats"` + Pageviews struct { + All int `json:"all"` + SearchEngines map[string]int `json:"search_engines"` + } `json:"pageviews"` + Uniques struct { + All int `json:"all"` + } +} + +// ZoneAnalyticsOptions represents the optional parameters in Zone Analytics +// endpoint requests. +type ZoneAnalyticsOptions struct { + Since *time.Time + Until *time.Time + Continuous *bool +} + +// PurgeCacheRequest represents the request format made to the purge endpoint. +type PurgeCacheRequest struct { + Everything bool `json:"purge_everything,omitempty"` + // Purge by filepath (exact match). Limit of 30 + Files []string `json:"files,omitempty"` + // Purge by Tag (Enterprise only): + // https://support.cloudflare.com/hc/en-us/articles/206596608-How-to-Purge-Cache-Using-Cache-Tags-Enterprise-only- + Tags []string `json:"tags,omitempty"` + // Purge by hostname - e.g. "assets.example.com" + Hosts []string `json:"hosts,omitempty"` + // Purge by prefix - e.g. "example.com/css" + Prefixes []string `json:"prefixes,omitempty"` +} + +// PurgeCacheResponse represents the response from the purge endpoint. +type PurgeCacheResponse struct { + Response + Result struct { + ID string `json:"id"` + } `json:"result"` +} + +// newZone describes a new zone. +type newZone struct { + Name string `json:"name"` + JumpStart bool `json:"jump_start"` + Type string `json:"type"` + // We use a pointer to get a nil type when the field is empty. + // This allows us to completely omit this with json.Marshal(). + Account *Account `json:"organization,omitempty"` +} + +// FallbackOrigin describes a fallback origin. +type FallbackOrigin struct { + Value string `json:"value"` + ID string `json:"id,omitempty"` +} + +// FallbackOriginResponse represents the response from the fallback_origin endpoint. +type FallbackOriginResponse struct { + Response + Result FallbackOrigin `json:"result"` +} + +// zoneSubscriptionRatePlanPayload is used to build the JSON payload for +// setting a particular rate plan on an existing zone. +type zoneSubscriptionRatePlanPayload struct { + RatePlan struct { + ID string `json:"id"` + } `json:"rate_plan"` +} + +type GetZoneSettingParams struct { + Name string `json:"-"` + PathPrefix string `json:"-"` +} + +type UpdateZoneSettingParams struct { + Name string `json:"-"` + PathPrefix string `json:"-"` + Value interface{} `json:"value"` +} + +// CreateZone creates a zone on an account. +// +// Setting jumpstart to true will attempt to automatically scan for existing +// DNS records. Setting this to false will create the zone with no DNS records. +// +// If account is non-empty, it must have at least the ID field populated. +// This will add the new zone to the specified multi-user account. +// +// API reference: https://api.cloudflare.com/#zone-create-a-zone +func (api *API) CreateZone(ctx context.Context, name string, jumpstart bool, account Account, zoneType string) (Zone, error) { + var newzone newZone + newzone.Name = name + newzone.JumpStart = jumpstart + if account.ID != "" { + newzone.Account = &account + } + + if zoneType == "partial" { + newzone.Type = "partial" + } else { + newzone.Type = "full" + } + + res, err := api.makeRequestContext(ctx, http.MethodPost, "/zones", newzone) + if err != nil { + return Zone{}, err + } + + var r ZoneResponse + err = json.Unmarshal(res, &r) + if err != nil { + return Zone{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// ZoneActivationCheck initiates another zone activation check for newly-created zones. +// +// API reference: https://api.cloudflare.com/#zone-initiate-another-zone-activation-check +func (api *API) ZoneActivationCheck(ctx context.Context, zoneID string) (Response, error) { + res, err := api.makeRequestContext(ctx, http.MethodPut, "/zones/"+zoneID+"/activation_check", nil) + if err != nil { + return Response{}, err + } + var r Response + err = json.Unmarshal(res, &r) + if err != nil { + return Response{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r, nil +} + +// ListZones lists zones on an account. Optionally takes a list of zone names +// to filter against. +// +// API reference: https://api.cloudflare.com/#zone-list-zones +func (api *API) ListZones(ctx context.Context, z ...string) ([]Zone, error) { + var zones []Zone + if len(z) > 0 { + var ( + v = url.Values{} + r ZonesResponse + ) + for _, zone := range z { + v.Set("name", normalizeZoneName(zone)) + res, err := api.makeRequestContext(ctx, http.MethodGet, "/zones?"+v.Encode(), nil) + if err != nil { + return []Zone{}, err + } + err = json.Unmarshal(res, &r) + if err != nil { + return []Zone{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + if !r.Success { + // TODO: Provide an actual error message instead of always returning nil + return []Zone{}, err + } + zones = append(zones, r.Result...) + } + } else { + res, err := api.ListZonesContext(ctx) + if err != nil { + return nil, err + } + + zones = res.Result + } + + return zones, nil +} + +const listZonesPerPage = 50 + +// listZonesFetch fetches one page of zones. +// This is placed as a separate function to prevent any possibility of unintended capturing. +func (api *API) listZonesFetch(ctx context.Context, wg *sync.WaitGroup, errc chan error, + path string, pageSize int, buf []Zone) { + defer wg.Done() + + // recordError sends the error to errc in a non-blocking manner + recordError := func(err error) { + select { + case errc <- err: + default: + } + } + + res, err := api.makeRequestContext(ctx, http.MethodGet, path, nil) + if err != nil { + recordError(err) + return + } + + var r ZonesResponse + err = json.Unmarshal(res, &r) + if err != nil { + recordError(err) + return + } + + if len(r.Result) != pageSize { + recordError(errors.New(errResultInfo)) + return + } + + copy(buf, r.Result) +} + +// ListZonesContext lists all zones on an account automatically handling the +// pagination. Optionally takes a list of ReqOptions. +func (api *API) ListZonesContext(ctx context.Context, opts ...ReqOption) (r ZonesResponse, err error) { + opt := reqOption{ + params: url.Values{}, + } + for _, of := range opts { + of(&opt) + } + + if opt.params.Get("page") != "" || opt.params.Get("per_page") != "" { + return ZonesResponse{}, errors.New(errManualPagination) + } + + opt.params.Add("per_page", strconv.Itoa(listZonesPerPage)) + + res, err := api.makeRequestContext(ctx, http.MethodGet, "/zones?"+opt.params.Encode(), nil) + if err != nil { + return ZonesResponse{}, err + } + err = json.Unmarshal(res, &r) + if err != nil { + return ZonesResponse{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + // avoid overhead in most common cases where the total #zones <= 50 + if r.TotalPages < 2 { + return r, nil + } + + // parameters of pagination + var ( + totalPageCount = r.TotalPages + totalCount = r.Total + + // zones is a large slice to prevent resizing during concurrent writes. + zones = make([]Zone, totalCount) + ) + + // Copy the first page into zones. + copy(zones, r.Result) + + var wg sync.WaitGroup + wg.Add(totalPageCount - 1) // all pages except the first one. + errc := make(chan error, 1) // getting the first error + + // Creating all the workers. + for pageNum := 2; pageNum <= totalPageCount; pageNum++ { + // Note: URL.Values is just a map[string], so this would override the existing 'page' + opt.params.Set("page", strconv.Itoa(pageNum)) + + // start is the first index in the zone buffer + start := listZonesPerPage * (pageNum - 1) + + pageSize := listZonesPerPage + if pageNum == totalPageCount { + // The size of the last page (which would be <= 50). + pageSize = totalCount - start + } + + go api.listZonesFetch(ctx, &wg, errc, "/zones?"+opt.params.Encode(), pageSize, zones[start:]) + } + + wg.Wait() + + select { + case err := <-errc: // if there were any errors + return ZonesResponse{}, err + default: // if there were no errors, the receive statement should block + r.Result = zones + return r, nil + } +} + +// ZoneDetails fetches information about a zone. +// +// API reference: https://api.cloudflare.com/#zone-zone-details +func (api *API) ZoneDetails(ctx context.Context, zoneID string) (Zone, error) { + res, err := api.makeRequestContext(ctx, http.MethodGet, "/zones/"+zoneID, nil) + if err != nil { + return Zone{}, err + } + var r ZoneResponse + err = json.Unmarshal(res, &r) + if err != nil { + return Zone{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// ZoneOptions is a subset of Zone, for editable options. +type ZoneOptions struct { + Paused *bool `json:"paused,omitempty"` + VanityNS []string `json:"vanity_name_servers,omitempty"` + Plan *ZonePlan `json:"plan,omitempty"` + Type string `json:"type,omitempty"` +} + +// ZoneSetPaused pauses Cloudflare service for the entire zone, sending all +// traffic direct to the origin. +func (api *API) ZoneSetPaused(ctx context.Context, zoneID string, paused bool) (Zone, error) { + zoneopts := ZoneOptions{Paused: &paused} + zone, err := api.EditZone(ctx, zoneID, zoneopts) + if err != nil { + return Zone{}, err + } + + return zone, nil +} + +// ZoneSetType toggles the type for an existing zone. +// +// Valid values for `type` are "full" and "partial" +// +// API reference: https://api.cloudflare.com/#zone-edit-zone +func (api *API) ZoneSetType(ctx context.Context, zoneID string, zoneType string) (Zone, error) { + zoneopts := ZoneOptions{Type: zoneType} + zone, err := api.EditZone(ctx, zoneID, zoneopts) + if err != nil { + return Zone{}, err + } + + return zone, nil +} + +// ZoneSetVanityNS sets custom nameservers for the zone. +// These names must be within the same zone. +func (api *API) ZoneSetVanityNS(ctx context.Context, zoneID string, ns []string) (Zone, error) { + zoneopts := ZoneOptions{VanityNS: ns} + zone, err := api.EditZone(ctx, zoneID, zoneopts) + if err != nil { + return Zone{}, err + } + + return zone, nil +} + +// ZoneSetPlan sets the rate plan of an existing zone. +// +// Valid values for `planType` are "CF_FREE", "CF_PRO", "CF_BIZ" and +// "CF_ENT". +// +// API reference: https://api.cloudflare.com/#zone-subscription-create-zone-subscription +func (api *API) ZoneSetPlan(ctx context.Context, zoneID string, planType string) error { + zonePayload := zoneSubscriptionRatePlanPayload{} + zonePayload.RatePlan.ID = planType + + uri := fmt.Sprintf("/zones/%s/subscription", zoneID) + + _, err := api.makeRequestContext(ctx, http.MethodPost, uri, zonePayload) + if err != nil { + return err + } + + return nil +} + +// ZoneUpdatePlan updates the rate plan of an existing zone. +// +// Valid values for `planType` are "CF_FREE", "CF_PRO", "CF_BIZ" and +// "CF_ENT". +// +// API reference: https://api.cloudflare.com/#zone-subscription-update-zone-subscription +func (api *API) ZoneUpdatePlan(ctx context.Context, zoneID string, planType string) error { + zonePayload := zoneSubscriptionRatePlanPayload{} + zonePayload.RatePlan.ID = planType + + uri := fmt.Sprintf("/zones/%s/subscription", zoneID) + + _, err := api.makeRequestContext(ctx, http.MethodPut, uri, zonePayload) + if err != nil { + return err + } + + return nil +} + +// EditZone edits the given zone. +// +// This is usually called by ZoneSetPaused, ZoneSetType, or ZoneSetVanityNS. +// +// API reference: https://api.cloudflare.com/#zone-edit-zone-properties +func (api *API) EditZone(ctx context.Context, zoneID string, zoneOpts ZoneOptions) (Zone, error) { + res, err := api.makeRequestContext(ctx, http.MethodPatch, "/zones/"+zoneID, zoneOpts) + if err != nil { + return Zone{}, err + } + var r ZoneResponse + err = json.Unmarshal(res, &r) + if err != nil { + return Zone{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// PurgeEverything purges the cache for the given zone. +// +// Note: this will substantially increase load on the origin server for that +// zone if there is a high cached vs. uncached request ratio. +// +// API reference: https://api.cloudflare.com/#zone-purge-all-files +func (api *API) PurgeEverything(ctx context.Context, zoneID string) (PurgeCacheResponse, error) { + uri := fmt.Sprintf("/zones/%s/purge_cache", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, PurgeCacheRequest{true, nil, nil, nil, nil}) + if err != nil { + return PurgeCacheResponse{}, err + } + var r PurgeCacheResponse + err = json.Unmarshal(res, &r) + if err != nil { + return PurgeCacheResponse{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r, nil +} + +// PurgeCache purges the cache using the given PurgeCacheRequest (zone/url/tag). +// +// API reference: https://api.cloudflare.com/#zone-purge-individual-files-by-url-and-cache-tags +func (api *API) PurgeCache(ctx context.Context, zoneID string, pcr PurgeCacheRequest) (PurgeCacheResponse, error) { + return api.PurgeCacheContext(ctx, zoneID, pcr) +} + +// PurgeCacheContext purges the cache using the given PurgeCacheRequest (zone/url/tag). +// +// API reference: https://api.cloudflare.com/#zone-purge-individual-files-by-url-and-cache-tags +func (api *API) PurgeCacheContext(ctx context.Context, zoneID string, pcr PurgeCacheRequest) (PurgeCacheResponse, error) { + // manually build the payload to ensure we don't escape HTML entities to + // match their keys for purging. + payload, err := json.MarshalWithOption(pcr, json.DisableHTMLEscape()) + if err != nil { + return PurgeCacheResponse{}, err + } + + uri := fmt.Sprintf("/zones/%s/purge_cache", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, payload) + if err != nil { + return PurgeCacheResponse{}, err + } + var r PurgeCacheResponse + err = json.Unmarshal(res, &r) + if err != nil { + return PurgeCacheResponse{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r, nil +} + +// DeleteZone deletes the given zone. +// +// API reference: https://api.cloudflare.com/#zone-delete-a-zone +func (api *API) DeleteZone(ctx context.Context, zoneID string) (ZoneID, error) { + res, err := api.makeRequestContext(ctx, http.MethodDelete, "/zones/"+zoneID, nil) + if err != nil { + return ZoneID{}, err + } + var r ZoneIDResponse + err = json.Unmarshal(res, &r) + if err != nil { + return ZoneID{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// AvailableZoneRatePlans returns information about all plans available to the specified zone. +// +// API reference: https://api.cloudflare.com/#zone-plan-available-plans +func (api *API) AvailableZoneRatePlans(ctx context.Context, zoneID string) ([]ZoneRatePlan, error) { + uri := fmt.Sprintf("/zones/%s/available_rate_plans", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []ZoneRatePlan{}, err + } + var r AvailableZoneRatePlansResponse + err = json.Unmarshal(res, &r) + if err != nil { + return []ZoneRatePlan{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// AvailableZonePlans returns information about all plans available to the specified zone. +// +// API reference: https://api.cloudflare.com/#zone-rate-plan-list-available-plans +func (api *API) AvailableZonePlans(ctx context.Context, zoneID string) ([]ZonePlan, error) { + uri := fmt.Sprintf("/zones/%s/available_plans", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []ZonePlan{}, err + } + var r AvailableZonePlansResponse + err = json.Unmarshal(res, &r) + if err != nil { + return []ZonePlan{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// encode encodes non-nil fields into URL encoded form. +func (o ZoneAnalyticsOptions) encode() string { + v := url.Values{} + if o.Since != nil { + v.Set("since", o.Since.Format(time.RFC3339)) + } + if o.Until != nil { + v.Set("until", o.Until.Format(time.RFC3339)) + } + if o.Continuous != nil { + v.Set("continuous", fmt.Sprintf("%t", *o.Continuous)) + } + return v.Encode() +} + +// ZoneAnalyticsDashboard returns zone analytics information. +// +// API reference: https://api.cloudflare.com/#zone-analytics-dashboard +func (api *API) ZoneAnalyticsDashboard(ctx context.Context, zoneID string, options ZoneAnalyticsOptions) (ZoneAnalyticsData, error) { + uri := fmt.Sprintf("/zones/%s/analytics/dashboard?%s", zoneID, options.encode()) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return ZoneAnalyticsData{}, err + } + var r zoneAnalyticsDataResponse + err = json.Unmarshal(res, &r) + if err != nil { + return ZoneAnalyticsData{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// ZoneAnalyticsByColocation returns zone analytics information by datacenter. +// +// API reference: https://api.cloudflare.com/#zone-analytics-analytics-by-co-locations +func (api *API) ZoneAnalyticsByColocation(ctx context.Context, zoneID string, options ZoneAnalyticsOptions) ([]ZoneAnalyticsColocation, error) { + uri := fmt.Sprintf("/zones/%s/analytics/colos?%s", zoneID, options.encode()) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + var r zoneAnalyticsColocationResponse + err = json.Unmarshal(res, &r) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// ZoneSettings returns all of the settings for a given zone. +// +// API reference: https://api.cloudflare.com/#zone-settings-get-all-zone-settings +func (api *API) ZoneSettings(ctx context.Context, zoneID string) (*ZoneSettingResponse, error) { + uri := fmt.Sprintf("/zones/%s/settings", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + + response := &ZoneSettingResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response, nil +} + +// UpdateZoneSettings updates the settings for a given zone. +// +// API reference: https://api.cloudflare.com/#zone-settings-edit-zone-settings-info +func (api *API) UpdateZoneSettings(ctx context.Context, zoneID string, settings []ZoneSetting) (*ZoneSettingResponse, error) { + uri := fmt.Sprintf("/zones/%s/settings", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, struct { + Items []ZoneSetting `json:"items"` + }{settings}) + if err != nil { + return nil, err + } + + response := &ZoneSettingResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response, nil +} + +// ZoneSSLSettings returns information about SSL setting to the specified zone. +// +// API reference: https://api.cloudflare.com/#zone-settings-get-ssl-setting +func (api *API) ZoneSSLSettings(ctx context.Context, zoneID string) (ZoneSSLSetting, error) { + uri := fmt.Sprintf("/zones/%s/settings/ssl", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return ZoneSSLSetting{}, err + } + var r ZoneSSLSettingResponse + err = json.Unmarshal(res, &r) + if err != nil { + return ZoneSSLSetting{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// UpdateZoneSSLSettings update information about SSL setting to the specified zone. +// +// API reference: https://api.cloudflare.com/#zone-settings-change-ssl-setting +func (api *API) UpdateZoneSSLSettings(ctx context.Context, zoneID string, sslValue string) (ZoneSSLSetting, error) { + uri := fmt.Sprintf("/zones/%s/settings/ssl", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, ZoneSSLSetting{Value: sslValue}) + if err != nil { + return ZoneSSLSetting{}, err + } + var r ZoneSSLSettingResponse + err = json.Unmarshal(res, &r) + if err != nil { + return ZoneSSLSetting{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// FallbackOrigin returns information about the fallback origin for the specified zone. +// +// API reference: https://developers.cloudflare.com/ssl/ssl-for-saas/api-calls/#fallback-origin-configuration +func (api *API) FallbackOrigin(ctx context.Context, zoneID string) (FallbackOrigin, error) { + uri := fmt.Sprintf("/zones/%s/fallback_origin", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return FallbackOrigin{}, err + } + + var r FallbackOriginResponse + err = json.Unmarshal(res, &r) + if err != nil { + return FallbackOrigin{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +// UpdateFallbackOrigin updates the fallback origin for a given zone. +// +// API reference: https://developers.cloudflare.com/ssl/ssl-for-saas/api-calls/#4-example-patch-to-change-fallback-origin +func (api *API) UpdateFallbackOrigin(ctx context.Context, zoneID string, fbo FallbackOrigin) (*FallbackOriginResponse, error) { + uri := fmt.Sprintf("/zones/%s/fallback_origin", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, fbo) + if err != nil { + return nil, err + } + + response := &FallbackOriginResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response, nil +} + +// normalizeZoneName tries to convert IDNs (international domain names) +// from Punycode to Unicode form. If the given zone name is not represented +// as Punycode, or converting fails (for invalid representations), it +// is returned unchanged. +// +// Because all the zone name comparison is currently done using the API service +// (except for comparison with the empty string), theoretically, we could +// remove this function from the Go library. However, there should be no harm +// calling this function other than gelable performance penalty. +// +// Note: conversion errors are silently discarded. +func normalizeZoneName(name string) string { + if n, err := idna.ToUnicode(name); err == nil { + return n + } + return name +} + +// GetZoneSetting returns information about specified setting to the specified +// zone. +// +// API reference: https://api.cloudflare.com/#zone-settings-get-all-zone-settings +func (api *API) GetZoneSetting(ctx context.Context, rc *ResourceContainer, params GetZoneSettingParams) (ZoneSetting, error) { + if rc.Level != ZoneRouteLevel { + return ZoneSetting{}, ErrRequiredZoneLevelResourceContainer + } + + if rc.Identifier == "" { + return ZoneSetting{}, ErrMissingName + } + + pathPrefix := "settings" + if params.PathPrefix != "" { + pathPrefix = params.PathPrefix + } + + uri := fmt.Sprintf("/zones/%s/%s/%s", rc.Identifier, pathPrefix, params.Name) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return ZoneSetting{}, err + } + var r ZoneSettingSingleResponse + err = json.Unmarshal(res, &r) + if err != nil { + return ZoneSetting{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// UpdateZoneSetting updates the specified setting for a given zone. +// +// API reference: https://api.cloudflare.com/#zone-settings-edit-zone-settings-info +func (api *API) UpdateZoneSetting(ctx context.Context, rc *ResourceContainer, params UpdateZoneSettingParams) (ZoneSetting, error) { + if rc.Level != ZoneRouteLevel { + return ZoneSetting{}, ErrRequiredZoneLevelResourceContainer + } + + if rc.Identifier == "" { + return ZoneSetting{}, ErrMissingName + } + + pathPrefix := "settings" + if params.PathPrefix != "" { + pathPrefix = params.PathPrefix + } + + uri := fmt.Sprintf("/zones/%s/%s/%s", rc.Identifier, pathPrefix, params.Name) + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, params) + if err != nil { + return ZoneSetting{}, err + } + + response := &ZoneSettingSingleResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return ZoneSetting{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response.Result, nil +} + +// ZoneExport returns the text BIND config for the given zone +// +// API reference: https://api.cloudflare.com/#dns-records-for-a-zone-export-dns-records +func (api *API) ZoneExport(ctx context.Context, zoneID string) (string, error) { + res, err := api.makeRequestContext(ctx, http.MethodGet, "/zones/"+zoneID+"/dns_records/export", nil) + if err != nil { + return "", err + } + return string(res), nil +} + +// ZoneDNSSECResponse represents the response from the Zone DNSSEC Setting. +type ZoneDNSSECResponse struct { + Response + Result ZoneDNSSEC `json:"result"` +} + +// ZoneDNSSEC represents the response from the Zone DNSSEC Setting result. +type ZoneDNSSEC struct { + Status string `json:"status"` + Flags int `json:"flags"` + Algorithm string `json:"algorithm"` + KeyType string `json:"key_type"` + DigestType string `json:"digest_type"` + DigestAlgorithm string `json:"digest_algorithm"` + Digest string `json:"digest"` + DS string `json:"ds"` + KeyTag int `json:"key_tag"` + PublicKey string `json:"public_key"` + ModifiedOn time.Time `json:"modified_on"` +} + +// ZoneDNSSECSetting returns the DNSSEC details of a zone +// +// API reference: https://api.cloudflare.com/#dnssec-dnssec-details +func (api *API) ZoneDNSSECSetting(ctx context.Context, zoneID string) (ZoneDNSSEC, error) { + res, err := api.makeRequestContext(ctx, http.MethodGet, "/zones/"+zoneID+"/dnssec", nil) + if err != nil { + return ZoneDNSSEC{}, err + } + response := ZoneDNSSECResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return ZoneDNSSEC{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response.Result, nil +} + +// ZoneDNSSECDeleteResponse represents the response from the Zone DNSSEC Delete request. +type ZoneDNSSECDeleteResponse struct { + Response + Result string `json:"result"` +} + +// DeleteZoneDNSSEC deletes DNSSEC for zone +// +// API reference: https://api.cloudflare.com/#dnssec-delete-dnssec-records +func (api *API) DeleteZoneDNSSEC(ctx context.Context, zoneID string) (string, error) { + res, err := api.makeRequestContext(ctx, http.MethodDelete, "/zones/"+zoneID+"/dnssec", nil) + if err != nil { + return "", err + } + response := ZoneDNSSECDeleteResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return "", fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return response.Result, nil +} + +// ZoneDNSSECUpdateOptions represents the options for DNSSEC update. +type ZoneDNSSECUpdateOptions struct { + Status string `json:"status"` +} + +// UpdateZoneDNSSEC updates DNSSEC for a zone +// +// API reference: https://api.cloudflare.com/#dnssec-edit-dnssec-status +func (api *API) UpdateZoneDNSSEC(ctx context.Context, zoneID string, options ZoneDNSSECUpdateOptions) (ZoneDNSSEC, error) { + res, err := api.makeRequestContext(ctx, http.MethodPatch, "/zones/"+zoneID+"/dnssec", options) + if err != nil { + return ZoneDNSSEC{}, err + } + response := ZoneDNSSECResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return ZoneDNSSEC{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return response.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/zone_cache_variants.go b/vendor/github.com/cloudflare/cloudflare-go/zone_cache_variants.go new file mode 100644 index 0000000..101f388 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/zone_cache_variants.go @@ -0,0 +1,89 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +type ZoneCacheVariantsValues struct { + Avif []string `json:"avif,omitempty"` + Bmp []string `json:"bmp,omitempty"` + Gif []string `json:"gif,omitempty"` + Jpeg []string `json:"jpeg,omitempty"` + Jpg []string `json:"jpg,omitempty"` + Jpg2 []string `json:"jpg2,omitempty"` + Jp2 []string `json:"jp2,omitempty"` + Png []string `json:"png,omitempty"` + Tiff []string `json:"tiff,omitempty"` + Tif []string `json:"tif,omitempty"` + Webp []string `json:"webp,omitempty"` +} + +type ZoneCacheVariants struct { + ModifiedOn time.Time `json:"modified_on"` + Value ZoneCacheVariantsValues `json:"value"` +} + +type updateZoneCacheVariantsRequest struct { + Value ZoneCacheVariantsValues `json:"value"` +} + +type zoneCacheVariantsSingleResponse struct { + Response + Result ZoneCacheVariants `json:"result"` +} + +// ZoneCacheVariants returns information about the current cache variants +// +// API reference: https://api.cloudflare.com/#zone-cache-settings-get-variants-setting +func (api *API) ZoneCacheVariants(ctx context.Context, zoneID string) (ZoneCacheVariants, error) { + uri := fmt.Sprintf("/zones/%s/cache/variants", zoneID) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return ZoneCacheVariants{}, err + } + var r zoneCacheVariantsSingleResponse + err = json.Unmarshal(res, &r) + if err != nil { + return ZoneCacheVariants{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// UpdateZoneCacheVariants updates the cache variants for a given zone. +// +// API reference: https://api.cloudflare.com/#zone-cache-settings-change-variants-setting +func (api *API) UpdateZoneCacheVariants(ctx context.Context, zoneID string, variants ZoneCacheVariantsValues) (ZoneCacheVariants, error) { + uri := fmt.Sprintf("/zones/%s/cache/variants", zoneID) + + updateReq := updateZoneCacheVariantsRequest{Value: variants} + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, updateReq) + if err != nil { + return ZoneCacheVariants{}, err + } + + response := &zoneCacheVariantsSingleResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return ZoneCacheVariants{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response.Result, nil +} + +// DeleteZoneCacheVariants deletes cache variants for a given zone. +// +// API reference: https://api.cloudflare.com/#zone-cache-settings-delete-variants-setting +func (api *API) DeleteZoneCacheVariants(ctx context.Context, zoneID string) error { + uri := fmt.Sprintf("/zones/%s/cache/variants", zoneID) + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/zone_hold.go b/vendor/github.com/cloudflare/cloudflare-go/zone_hold.go new file mode 100644 index 0000000..c7f30cc --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/zone_hold.go @@ -0,0 +1,111 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +// Retrieve whether the zone is subject to a zone hold, and metadata about the +// hold. +type ZoneHold struct { + Hold *bool `json:"hold,omitempty"` + IncludeSubdomains *bool `json:"include_subdomains,omitempty"` + HoldAfter *time.Time `json:"hold_after,omitempty"` +} + +// ZoneHoldResponse represents a response from the Zone Hold endpoint. +type ZoneHoldResponse struct { + Result ZoneHold `json:"result"` + Response + ResultInfo `json:"result_info"` +} + +// CreateZoneHoldParams represents params for the Create Zone Hold +// endpoint. +type CreateZoneHoldParams struct { + IncludeSubdomains *bool `url:"include_subdomains,omitempty"` +} + +// DeleteZoneHoldParams represents params for the Delete Zone Hold +// endpoint. +type DeleteZoneHoldParams struct { + HoldAfter *time.Time `url:"hold_after,omitempty"` +} + +type GetZoneHoldParams struct{} + +// CreateZoneHold enforces a zone hold on the zone, blocking the creation and +// activation of zone. +// +// API reference: https://developers.cloudflare.com/api/operations/zones-0-hold-post +func (api *API) CreateZoneHold(ctx context.Context, rc *ResourceContainer, params CreateZoneHoldParams) (ZoneHold, error) { + if rc.Level != ZoneRouteLevel { + return ZoneHold{}, ErrRequiredZoneLevelResourceContainer + } + + uri := buildURI(fmt.Sprintf("/zones/%s/hold", rc.Identifier), params) + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, nil) + if err != nil { + return ZoneHold{}, err + } + + response := &ZoneHoldResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return ZoneHold{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response.Result, nil +} + +// DeleteZoneHold removes enforcement of a zone hold on the zone, permanently or +// temporarily, allowing the creation and activation of zones with this hostname. +// +// API reference:https://developers.cloudflare.com/api/operations/zones-0-hold-delete +func (api *API) DeleteZoneHold(ctx context.Context, rc *ResourceContainer, params DeleteZoneHoldParams) (ZoneHold, error) { + if rc.Level != ZoneRouteLevel { + return ZoneHold{}, ErrRequiredZoneLevelResourceContainer + } + + uri := buildURI(fmt.Sprintf("/zones/%s/hold", rc.Identifier), params) + res, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + if err != nil { + return ZoneHold{}, err + } + + response := &ZoneHoldResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return ZoneHold{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response.Result, nil +} + +// GetZoneHold retrieves whether the zone is subject to a zone hold, and the +// metadata about the hold. +// +// API reference: https://developers.cloudflare.com/api/operations/zones-0-hold-get +func (api *API) GetZoneHold(ctx context.Context, rc *ResourceContainer, params GetZoneHoldParams) (ZoneHold, error) { + if rc.Level != ZoneRouteLevel { + return ZoneHold{}, ErrRequiredZoneLevelResourceContainer + } + + uri := fmt.Sprintf("/zones/%s/hold", rc.Identifier) + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return ZoneHold{}, err + } + + response := &ZoneHoldResponse{} + err = json.Unmarshal(res, &response) + if err != nil { + return ZoneHold{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return response.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/zt_risk_behaviors.go b/vendor/github.com/cloudflare/cloudflare-go/zt_risk_behaviors.go new file mode 100644 index 0000000..370e3c3 --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/zt_risk_behaviors.go @@ -0,0 +1,126 @@ +package cloudflare + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/goccy/go-json" +) + +// Behavior represents a single zt risk behavior config. +type Behavior struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + RiskLevel RiskLevel `json:"risk_level"` + Enabled *bool `json:"enabled"` +} + +// Wrapper used to have full-fidelity repro of json structure. +type Behaviors struct { + Behaviors map[string]Behavior `json:"behaviors"` +} + +// BehaviorResponse represents the response from the zt risk scoring endpoint +// and contains risk behaviors for an account. +type BehaviorResponse struct { + Success bool `json:"success"` + Result Behaviors `json:"result"` + Errors []string `json:"errors"` + Messages []string `json:"messages"` +} + +// Behaviors returns all zero trust risk scoring behaviors for the provided account +// +// API reference: https://developers.cloudflare.com/api/operations/dlp-zt-risk-score-get-behaviors +func (api *API) Behaviors(ctx context.Context, accountID string) (Behaviors, error) { + uri := fmt.Sprintf("/accounts/%s/zt_risk_scoring/behaviors", accountID) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return Behaviors{}, err + } + + var r BehaviorResponse + err = json.Unmarshal(res, &r) + if err != nil { + return Behaviors{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + return r.Result, nil +} + +// UpdateBehaviors returns all zero trust risk scoring behaviors for the provided account +// NOTE: description/name updates are no-ops, risk_level [low medium high] and enabled [true/false] results in modifications +// +// API reference: https://developers.cloudflare.com/api/operations/dlp-zt-risk-score-put-behaviors +func (api *API) UpdateBehaviors(ctx context.Context, accountID string, behaviors Behaviors) (Behaviors, error) { + uri := fmt.Sprintf("/accounts/%s/zt_risk_scoring/behaviors", accountID) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, behaviors) + if err != nil { + return Behaviors{}, err + } + + var r BehaviorResponse + err = json.Unmarshal(res, &r) + if err != nil { + return Behaviors{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return r.Result, nil +} + +type RiskLevel int + +const ( + _ RiskLevel = iota + Low + Medium + High +) + +func (p RiskLevel) MarshalJSON() ([]byte, error) { + return json.Marshal(p.String()) +} + +func (p RiskLevel) String() string { + return [...]string{"low", "medium", "high"}[p-1] +} + +func (p *RiskLevel) UnmarshalJSON(data []byte) error { + var ( + s string + err error + ) + err = json.Unmarshal(data, &s) + if err != nil { + return err + } + v, err := RiskLevelFromString(s) + if err != nil { + return err + } + *p = *v + return nil +} + +func RiskLevelFromString(s string) (*RiskLevel, error) { + s = strings.ToLower(s) + var v RiskLevel + switch s { + case "low": + v = Low + case "medium": + v = Medium + case "high": + v = High + default: + return nil, fmt.Errorf("unknown variant for risk level: %s", s) + } + return &v, nil +} + +func (p RiskLevel) IntoRef() *RiskLevel { + return &p +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/zt_risk_score_integration.go b/vendor/github.com/cloudflare/cloudflare-go/zt_risk_score_integration.go new file mode 100644 index 0000000..a80ef1d --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/zt_risk_score_integration.go @@ -0,0 +1,180 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +var ( + ErrMissingRiskScoreIntegrationID = errors.New("missing required Risk Score Integration UUID") +) + +// Represents a Risk Score Sharing Integration. +// If enabled, Cloudflare will share changes in user risk score with the specified vendor. +type RiskScoreIntegration struct { + ID string `json:"id,omitempty"` + IntegrationType string `json:"integration_type,omitempty"` + TenantUrl string `json:"tenant_url,omitempty"` + ReferenceID string `json:"reference_id,omitempty"` + AccountTag string `json:"account_tag,omitempty"` + WellKnownUrl string `json:"well_known_url,omitempty"` + Active *bool `json:"active,omitempty"` + CreatedAt *time.Time `json:"created_at,omitempty"` +} + +// The properties required to create a new risk score integration. +type RiskScoreIntegrationCreateRequest struct { + IntegrationType string `json:"integration_type,omitempty"` + TenantUrl string `json:"tenant_url,omitempty"` + ReferenceID string `json:"reference_id,omitempty"` +} + +// The properties required to update a risk score integration. +type RiskScoreIntegrationUpdateRequest struct { + IntegrationType string `json:"integration_type,omitempty"` + TenantUrl string `json:"tenant_url,omitempty"` + ReferenceID string `json:"reference_id,omitempty"` + Active *bool `json:"active,omitempty"` +} + +// A list of risk score integrations as returned from the API. +type RiskScoreIntegrationListResponse struct { + Result []RiskScoreIntegration `json:"result"` + Response +} + +// A risk score integration as returned from the API. +type RiskScoreIntegrationResponse struct { + Result RiskScoreIntegration `json:"result"` + Response +} + +type ListRiskScoreIntegrationParams struct{} + +// GetRiskScoreIntegration returns a single Risk Score Integration by its ID. +// +// API reference: https://developers.cloudflare.com/api/operations/dlp-zt-risk-score-integration-get +func (api *API) GetRiskScoreIntegration(ctx context.Context, rc *ResourceContainer, integrationID string) (RiskScoreIntegration, error) { + if rc.Identifier == "" { + return RiskScoreIntegration{}, ErrMissingResourceIdentifier + } + + if integrationID == "" { + return RiskScoreIntegration{}, ErrMissingRiskScoreIntegrationID + } + + uri := buildURI(fmt.Sprintf("/%s/%s/zt_risk_scoring/integrations/%s", rc.Level, rc.Identifier, integrationID), nil) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return RiskScoreIntegration{}, err + } + + var integrationResponse RiskScoreIntegrationResponse + err = json.Unmarshal(res, &integrationResponse) + if err != nil { + return RiskScoreIntegration{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return integrationResponse.Result, nil +} + +// ListRiskScoreIntegrations returns all Risk Score Integrations for an account. +// +// API reference: https://developers.cloudflare.com/api/operations/dlp-zt-risk-score-integration-list +func (api *API) ListRiskScoreIntegrations(ctx context.Context, rc *ResourceContainer, params ListRiskScoreIntegrationParams) ([]RiskScoreIntegration, error) { + if rc.Identifier == "" { + return []RiskScoreIntegration{}, ErrMissingResourceIdentifier + } + + uri := buildURI(fmt.Sprintf("/%s/%s/zt_risk_scoring/integrations", rc.Level, rc.Identifier), nil) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return []RiskScoreIntegration{}, err + } + + var integrationsResponse RiskScoreIntegrationListResponse + err = json.Unmarshal(res, &integrationsResponse) + if err != nil { + return []RiskScoreIntegration{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return integrationsResponse.Result, nil +} + +// CreateRiskScoreIntegration creates a new Risk Score Integration. +// +// API reference: https://developers.cloudflare.com/api/operations/dlp-zt-risk-score-integration-create +func (api *API) CreateRiskScoreIntegration(ctx context.Context, rc *ResourceContainer, params RiskScoreIntegrationCreateRequest) (RiskScoreIntegration, error) { + if rc.Identifier == "" { + return RiskScoreIntegration{}, ErrMissingResourceIdentifier + } + + uri := buildURI(fmt.Sprintf("/%s/%s/zt_risk_scoring/integrations", rc.Level, rc.Identifier), nil) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return RiskScoreIntegration{}, err + } + + var integrationResponse RiskScoreIntegrationResponse + err = json.Unmarshal(res, &integrationResponse) + if err != nil { + return RiskScoreIntegration{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return integrationResponse.Result, nil +} + +// UpdateRiskScoreIntegration updates a Risk Score Integration. +// +// Can be used to disable or active the integration using the "active" boolean property. +// API reference: https://developers.cloudflare.com/api/operations/dlp-zt-risk-score-integration-update +func (api *API) UpdateRiskScoreIntegration(ctx context.Context, rc *ResourceContainer, integrationID string, params RiskScoreIntegrationUpdateRequest) (RiskScoreIntegration, error) { + if rc.Identifier == "" { + return RiskScoreIntegration{}, ErrMissingResourceIdentifier + } + + if integrationID == "" { + return RiskScoreIntegration{}, ErrMissingRiskScoreIntegrationID + } + + uri := buildURI(fmt.Sprintf("/%s/%s/zt_risk_scoring/integrations/%s", rc.Level, rc.Identifier, integrationID), nil) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return RiskScoreIntegration{}, err + } + + var integrationResponse RiskScoreIntegrationResponse + err = json.Unmarshal(res, &integrationResponse) + if err != nil { + return RiskScoreIntegration{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return integrationResponse.Result, nil +} + +// DeleteRiskScoreIntegration deletes a Risk Score Integration. +// +// API reference: https://developers.cloudflare.com/api/operations/dlp-zt-risk-score-integration-delete +func (api *API) DeleteRiskScoreIntegration(ctx context.Context, rc *ResourceContainer, integrationID string) error { + if rc.Identifier == "" { + return ErrMissingResourceIdentifier + } + + if integrationID == "" { + return ErrMissingRiskScoreIntegrationID + } + + uri := buildURI(fmt.Sprintf("/%s/%s/zt_risk_scoring/integrations/%s", rc.Level, rc.Identifier, integrationID), nil) + + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + return err +} diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE new file mode 100644 index 0000000..bc52e96 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go new file mode 100644 index 0000000..7929947 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -0,0 +1,145 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// Go versions prior to 1.4 are disabled because they use a different layout +// for interfaces which make the implementation of unsafeReflectValue more complex. +// +build !js,!appengine,!safe,!disableunsafe,go1.4 + +package spew + +import ( + "reflect" + "unsafe" +) + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = false + + // ptrSize is the size of a pointer on the current arch. + ptrSize = unsafe.Sizeof((*byte)(nil)) +) + +type flag uintptr + +var ( + // flagRO indicates whether the value field of a reflect.Value + // is read-only. + flagRO flag + + // flagAddr indicates whether the address of the reflect.Value's + // value may be taken. + flagAddr flag +) + +// flagKindMask holds the bits that make up the kind +// part of the flags field. In all the supported versions, +// it is in the lower 5 bits. +const flagKindMask = flag(0x1f) + +// Different versions of Go have used different +// bit layouts for the flags type. This table +// records the known combinations. +var okFlags = []struct { + ro, addr flag +}{{ + // From Go 1.4 to 1.5 + ro: 1 << 5, + addr: 1 << 7, +}, { + // Up to Go tip. + ro: 1<<5 | 1<<6, + addr: 1 << 8, +}} + +var flagValOffset = func() uintptr { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + return field.Offset +}() + +// flagField returns a pointer to the flag field of a reflect.Value. +func flagField(v *reflect.Value) *flag { + return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) +} + +// unsafeReflectValue converts the passed reflect.Value into a one that bypasses +// the typical safety restrictions preventing access to unaddressable and +// unexported data. It works by digging the raw pointer to the underlying +// value out of the protected value and generating a new unprotected (unsafe) +// reflect.Value to it. +// +// This allows us to check for implementations of the Stringer and error +// interfaces to be used for pretty printing ordinarily unaddressable and +// inaccessible values such as unexported struct fields. +func unsafeReflectValue(v reflect.Value) reflect.Value { + if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { + return v + } + flagFieldPtr := flagField(&v) + *flagFieldPtr &^= flagRO + *flagFieldPtr |= flagAddr + return v +} + +// Sanity checks against future reflect package changes +// to the type or semantics of the Value.flag field. +func init() { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { + panic("reflect.Value flag field has changed kind") + } + type t0 int + var t struct { + A t0 + // t0 will have flagEmbedRO set. + t0 + // a will have flagStickyRO set + a t0 + } + vA := reflect.ValueOf(t).FieldByName("A") + va := reflect.ValueOf(t).FieldByName("a") + vt0 := reflect.ValueOf(t).FieldByName("t0") + + // Infer flagRO from the difference between the flags + // for the (otherwise identical) fields in t. + flagPublic := *flagField(&vA) + flagWithRO := *flagField(&va) | *flagField(&vt0) + flagRO = flagPublic ^ flagWithRO + + // Infer flagAddr from the difference between a value + // taken from a pointer and not. + vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") + flagNoPtr := *flagField(&vA) + flagPtr := *flagField(&vPtrA) + flagAddr = flagNoPtr ^ flagPtr + + // Check that the inferred flags tally with one of the known versions. + for _, f := range okFlags { + if flagRO == f.ro && flagAddr == f.addr { + return + } + } + panic("reflect.Value read-only flag has changed semantics") +} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go new file mode 100644 index 0000000..205c28d --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -0,0 +1,38 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is running on Google App Engine, compiled by GopherJS, or +// "-tags safe" is added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build js appengine safe disableunsafe !go1.4 + +package spew + +import "reflect" + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = true +) + +// unsafeReflectValue typically converts the passed reflect.Value into a one +// that bypasses the typical safety restrictions preventing access to +// unaddressable and unexported data. However, doing this relies on access to +// the unsafe package. This is a stub version which simply returns the passed +// reflect.Value when the unsafe package is not available. +func unsafeReflectValue(v reflect.Value) reflect.Value { + return v +} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go new file mode 100644 index 0000000..1be8ce9 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "reflect" + "sort" + "strconv" +) + +// Some constants in the form of bytes to avoid string overhead. This mirrors +// the technique used in the fmt package. +var ( + panicBytes = []byte("(PANIC=") + plusBytes = []byte("+") + iBytes = []byte("i") + trueBytes = []byte("true") + falseBytes = []byte("false") + interfaceBytes = []byte("(interface {})") + commaNewlineBytes = []byte(",\n") + newlineBytes = []byte("\n") + openBraceBytes = []byte("{") + openBraceNewlineBytes = []byte("{\n") + closeBraceBytes = []byte("}") + asteriskBytes = []byte("*") + colonBytes = []byte(":") + colonSpaceBytes = []byte(": ") + openParenBytes = []byte("(") + closeParenBytes = []byte(")") + spaceBytes = []byte(" ") + pointerChainBytes = []byte("->") + nilAngleBytes = []byte("") + maxNewlineBytes = []byte("\n") + maxShortBytes = []byte("") + circularBytes = []byte("") + circularShortBytes = []byte("") + invalidAngleBytes = []byte("") + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + percentBytes = []byte("%") + precisionBytes = []byte(".") + openAngleBytes = []byte("<") + closeAngleBytes = []byte(">") + openMapBytes = []byte("map[") + closeMapBytes = []byte("]") + lenEqualsBytes = []byte("len=") + capEqualsBytes = []byte("cap=") +) + +// hexDigits is used to map a decimal value to a hex digit. +var hexDigits = "0123456789abcdef" + +// catchPanic handles any panics that might occur during the handleMethods +// calls. +func catchPanic(w io.Writer, v reflect.Value) { + if err := recover(); err != nil { + w.Write(panicBytes) + fmt.Fprintf(w, "%v", err) + w.Write(closeParenBytes) + } +} + +// handleMethods attempts to call the Error and String methods on the underlying +// type the passed reflect.Value represents and outputes the result to Writer w. +// +// It handles panics in any called methods by catching and displaying the error +// as the formatted value. +func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { + // We need an interface to check if the type implements the error or + // Stringer interface. However, the reflect package won't give us an + // interface on certain things like unexported struct fields in order + // to enforce visibility rules. We use unsafe, when it's available, + // to bypass these restrictions since this package does not mutate the + // values. + if !v.CanInterface() { + if UnsafeDisabled { + return false + } + + v = unsafeReflectValue(v) + } + + // Choose whether or not to do error and Stringer interface lookups against + // the base type or a pointer to the base type depending on settings. + // Technically calling one of these methods with a pointer receiver can + // mutate the value, however, types which choose to satisify an error or + // Stringer interface with a pointer receiver should not be mutating their + // state inside these interface methods. + if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { + v = unsafeReflectValue(v) + } + if v.CanAddr() { + v = v.Addr() + } + + // Is it an error or Stringer? + switch iface := v.Interface().(type) { + case error: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.Error())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + + w.Write([]byte(iface.Error())) + return true + + case fmt.Stringer: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.String())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + w.Write([]byte(iface.String())) + return true + } + return false +} + +// printBool outputs a boolean value as true or false to Writer w. +func printBool(w io.Writer, val bool) { + if val { + w.Write(trueBytes) + } else { + w.Write(falseBytes) + } +} + +// printInt outputs a signed integer value to Writer w. +func printInt(w io.Writer, val int64, base int) { + w.Write([]byte(strconv.FormatInt(val, base))) +} + +// printUint outputs an unsigned integer value to Writer w. +func printUint(w io.Writer, val uint64, base int) { + w.Write([]byte(strconv.FormatUint(val, base))) +} + +// printFloat outputs a floating point value using the specified precision, +// which is expected to be 32 or 64bit, to Writer w. +func printFloat(w io.Writer, val float64, precision int) { + w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) +} + +// printComplex outputs a complex value using the specified float precision +// for the real and imaginary parts to Writer w. +func printComplex(w io.Writer, c complex128, floatPrecision int) { + r := real(c) + w.Write(openParenBytes) + w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) + i := imag(c) + if i >= 0 { + w.Write(plusBytes) + } + w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) + w.Write(iBytes) + w.Write(closeParenBytes) +} + +// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' +// prefix to Writer w. +func printHexPtr(w io.Writer, p uintptr) { + // Null pointer. + num := uint64(p) + if num == 0 { + w.Write(nilAngleBytes) + return + } + + // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix + buf := make([]byte, 18) + + // It's simpler to construct the hex string right to left. + base := uint64(16) + i := len(buf) - 1 + for num >= base { + buf[i] = hexDigits[num%base] + num /= base + i-- + } + buf[i] = hexDigits[num] + + // Add '0x' prefix. + i-- + buf[i] = 'x' + i-- + buf[i] = '0' + + // Strip unused leading bytes. + buf = buf[i:] + w.Write(buf) +} + +// valuesSorter implements sort.Interface to allow a slice of reflect.Value +// elements to be sorted. +type valuesSorter struct { + values []reflect.Value + strings []string // either nil or same len and values + cs *ConfigState +} + +// newValuesSorter initializes a valuesSorter instance, which holds a set of +// surrogate keys on which the data should be sorted. It uses flags in +// ConfigState to decide if and how to populate those surrogate keys. +func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { + vs := &valuesSorter{values: values, cs: cs} + if canSortSimply(vs.values[0].Kind()) { + return vs + } + if !cs.DisableMethods { + vs.strings = make([]string, len(values)) + for i := range vs.values { + b := bytes.Buffer{} + if !handleMethods(cs, &b, vs.values[i]) { + vs.strings = nil + break + } + vs.strings[i] = b.String() + } + } + if vs.strings == nil && cs.SpewKeys { + vs.strings = make([]string, len(values)) + for i := range vs.values { + vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) + } + } + return vs +} + +// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted +// directly, or whether it should be considered for sorting by surrogate keys +// (if the ConfigState allows it). +func canSortSimply(kind reflect.Kind) bool { + // This switch parallels valueSortLess, except for the default case. + switch kind { + case reflect.Bool: + return true + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return true + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Uintptr: + return true + case reflect.Array: + return true + } + return false +} + +// Len returns the number of values in the slice. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Len() int { + return len(s.values) +} + +// Swap swaps the values at the passed indices. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Swap(i, j int) { + s.values[i], s.values[j] = s.values[j], s.values[i] + if s.strings != nil { + s.strings[i], s.strings[j] = s.strings[j], s.strings[i] + } +} + +// valueSortLess returns whether the first value should sort before the second +// value. It is used by valueSorter.Less as part of the sort.Interface +// implementation. +func valueSortLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Bool: + return !a.Bool() && b.Bool() + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return a.Int() < b.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return a.Uint() < b.Uint() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.String: + return a.String() < b.String() + case reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Array: + // Compare the contents of both arrays. + l := a.Len() + for i := 0; i < l; i++ { + av := a.Index(i) + bv := b.Index(i) + if av.Interface() == bv.Interface() { + continue + } + return valueSortLess(av, bv) + } + } + return a.String() < b.String() +} + +// Less returns whether the value at index i should sort before the +// value at index j. It is part of the sort.Interface implementation. +func (s *valuesSorter) Less(i, j int) bool { + if s.strings == nil { + return valueSortLess(s.values[i], s.values[j]) + } + return s.strings[i] < s.strings[j] +} + +// sortValues is a sort function that handles both native types and any type that +// can be converted to error or Stringer. Other inputs are sorted according to +// their Value.String() value to ensure display stability. +func sortValues(values []reflect.Value, cs *ConfigState) { + if len(values) == 0 { + return + } + sort.Sort(newValuesSorter(values, cs)) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go new file mode 100644 index 0000000..2e3d22f --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/config.go @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "os" +) + +// ConfigState houses the configuration options used by spew to format and +// display values. There is a global instance, Config, that is used to control +// all top-level Formatter and Dump functionality. Each ConfigState instance +// provides methods equivalent to the top-level functions. +// +// The zero value for ConfigState provides no indentation. You would typically +// want to set it to a space or a tab. +// +// Alternatively, you can use NewDefaultConfig to get a ConfigState instance +// with default settings. See the documentation of NewDefaultConfig for default +// values. +type ConfigState struct { + // Indent specifies the string to use for each indentation level. The + // global config instance that all top-level functions use set this to a + // single space by default. If you would like more indentation, you might + // set this to a tab with "\t" or perhaps two spaces with " ". + Indent string + + // MaxDepth controls the maximum number of levels to descend into nested + // data structures. The default, 0, means there is no limit. + // + // NOTE: Circular data structures are properly detected, so it is not + // necessary to set this value unless you specifically want to limit deeply + // nested data structures. + MaxDepth int + + // DisableMethods specifies whether or not error and Stringer interfaces are + // invoked for types that implement them. + DisableMethods bool + + // DisablePointerMethods specifies whether or not to check for and invoke + // error and Stringer interfaces on types which only accept a pointer + // receiver when the current type is not a pointer. + // + // NOTE: This might be an unsafe action since calling one of these methods + // with a pointer receiver could technically mutate the value, however, + // in practice, types which choose to satisify an error or Stringer + // interface with a pointer receiver should not be mutating their state + // inside these interface methods. As a result, this option relies on + // access to the unsafe package, so it will not have any effect when + // running in environments without access to the unsafe package such as + // Google App Engine or with the "safe" build tag specified. + DisablePointerMethods bool + + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + + // ContinueOnMethod specifies whether or not recursion should continue once + // a custom error or Stringer interface is invoked. The default, false, + // means it will print the results of invoking the custom error or Stringer + // interface and return immediately instead of continuing to recurse into + // the internals of the data type. + // + // NOTE: This flag does not have any effect if method invocation is disabled + // via the DisableMethods or DisablePointerMethods options. + ContinueOnMethod bool + + // SortKeys specifies map keys should be sorted before being printed. Use + // this to have a more deterministic, diffable output. Note that only + // native types (bool, int, uint, floats, uintptr and string) and types + // that support the error or Stringer interfaces (if methods are + // enabled) are supported, with other types sorted according to the + // reflect.Value.String() output which guarantees display stability. + SortKeys bool + + // SpewKeys specifies that, as a last resort attempt, map keys should + // be spewed to strings and sorted by those strings. This is only + // considered if SortKeys is true. + SpewKeys bool +} + +// Config is the active configuration of the top-level functions. +// The configuration can be changed by modifying the contents of spew.Config. +var Config = ConfigState{Indent: " "} + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the formatted string as a value that satisfies error. See NewFormatter +// for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, c.convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, c.convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, c.convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a Formatter interface returned by c.NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, c.convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Print(a ...interface{}) (n int, err error) { + return fmt.Print(c.convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, c.convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Println(a ...interface{}) (n int, err error) { + return fmt.Println(c.convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprint(a ...interface{}) string { + return fmt.Sprint(c.convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, c.convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a Formatter interface returned by c.NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintln(a ...interface{}) string { + return fmt.Sprintln(c.convertArgs(a)...) +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +c.Printf, c.Println, or c.Printf. +*/ +func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(c, v) +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { + fdump(c, w, a...) +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by modifying the public members +of c. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func (c *ConfigState) Dump(a ...interface{}) { + fdump(c, os.Stdout, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func (c *ConfigState) Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(c, &buf, a...) + return buf.String() +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a spew Formatter interface using +// the ConfigState associated with s. +func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = newFormatter(c, arg) + } + return formatters +} + +// NewDefaultConfig returns a ConfigState with the following default settings. +// +// Indent: " " +// MaxDepth: 0 +// DisableMethods: false +// DisablePointerMethods: false +// ContinueOnMethod: false +// SortKeys: false +func NewDefaultConfig() *ConfigState { + return &ConfigState{Indent: " "} +} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go new file mode 100644 index 0000000..aacaac6 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Package spew implements a deep pretty printer for Go data structures to aid in +debugging. + +A quick overview of the additional features spew provides over the built-in +printing facilities for Go data types are as follows: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output (only when using + Dump style) + +There are two different approaches spew allows for dumping Go data structures: + + * Dump style which prints with newlines, customizable indentation, + and additional debug information such as types and all pointer addresses + used to indirect to the final value + * A custom Formatter interface that integrates cleanly with the standard fmt + package and replaces %v, %+v, %#v, and %#+v to provide inline printing + similar to the default %v while providing the additional functionality + outlined above and passing unsupported format verbs such as %x and %q + along to fmt + +Quick Start + +This section demonstrates how to quickly get started with spew. See the +sections below for further details on formatting and configuration options. + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + spew.Dump(myVar1, myVar2, ...) + spew.Fdump(someWriter, myVar1, myVar2, ...) + str := spew.Sdump(myVar1, myVar2, ...) + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with +%v (most compact), %+v (adds pointer addresses), %#v (adds types), or +%#+v (adds types and pointer addresses): + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available +via the spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +The following configuration options are available: + * Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + + * MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + + * DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + + * DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. + Pointer method invocation is enabled by default. + + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + + * ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + + * SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are + supported with other types sorted according to the + reflect.Value.String() output which guarantees display + stability. Natural map order is used by default. + + * SpewKeys + Specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only + considered if SortKeys is true. + +Dump Usage + +Simply call spew.Dump with a list of variables you want to dump: + + spew.Dump(myVar1, myVar2, ...) + +You may also call spew.Fdump if you would prefer to output to an arbitrary +io.Writer. For example, to dump to standard error: + + spew.Fdump(os.Stderr, myVar1, myVar2, ...) + +A third option is to call spew.Sdump to get the formatted output as a string: + + str := spew.Sdump(myVar1, myVar2, ...) + +Sample Dump Output + +See the Dump example for details on the setup of the types and variables being +shown here. + + (main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) (len=1) { + (string) (len=3) "one": (bool) true + } + } + +Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C +command as shown. + ([]uint8) (len=32 cap=32) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| + } + +Custom Formatter + +Spew provides a custom formatter that implements the fmt.Formatter interface +so that it integrates cleanly with standard fmt package printing functions. The +formatter is useful for inline printing of smaller data types similar to the +standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Custom Formatter Usage + +The simplest way to make use of the spew custom formatter is to call one of the +convenience functions such as spew.Printf, spew.Println, or spew.Printf. The +functions have syntax you are most likely already familiar with: + + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Println(myVar, myVar2) + spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +See the Index for the full list convenience functions. + +Sample Formatter Output + +Double pointer to a uint8: + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 + +Pointer to circular struct with a uint8 field and a pointer to itself: + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} + +See the Printf example for details on the setup of variables being shown +here. + +Errors + +Since it is possible for custom Stringer/error interfaces to panic, spew +detects them and handles them internally by printing the panic information +inline with the output. Since spew is intended to provide deep pretty printing +capabilities on structures, it intentionally does not return any errors. +*/ +package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go new file mode 100644 index 0000000..f78d89f --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "os" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + // uint8Type is a reflect.Type representing a uint8. It is used to + // convert cgo types to uint8 slices for hexdumping. + uint8Type = reflect.TypeOf(uint8(0)) + + // cCharRE is a regular expression that matches a cgo char. + // It is used to detect character arrays to hexdump them. + cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) + + // cUnsignedCharRE is a regular expression that matches a cgo unsigned + // char. It is used to detect unsigned character arrays to hexdump + // them. + cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) + + // cUint8tCharRE is a regular expression that matches a cgo uint8_t. + // It is used to detect uint8_t arrays to hexdump them. + cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) +) + +// dumpState contains information about the state of a dump operation. +type dumpState struct { + w io.Writer + depth int + pointers map[uintptr]int + ignoreNextType bool + ignoreNextIndent bool + cs *ConfigState +} + +// indent performs indentation according to the depth level and cs.Indent +// option. +func (d *dumpState) indent() { + if d.ignoreNextIndent { + d.ignoreNextIndent = false + return + } + d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) +} + +// unpackValue returns values inside of non-nil interfaces when possible. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + return v +} + +// dumpPtr handles formatting of pointers by indirecting them as necessary. +func (d *dumpState) dumpPtr(v reflect.Value) { + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range d.pointers { + if depth >= d.depth { + delete(d.pointers, k) + } + } + + // Keep list of all dereferenced pointers to show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by dereferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := d.pointers[addr]; ok && pd < d.depth { + cycleFound = true + indirects-- + break + } + d.pointers[addr] = d.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type information. + d.w.Write(openParenBytes) + d.w.Write(bytes.Repeat(asteriskBytes, indirects)) + d.w.Write([]byte(ve.Type().String())) + d.w.Write(closeParenBytes) + + // Display pointer information. + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { + d.w.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + d.w.Write(pointerChainBytes) + } + printHexPtr(d.w, addr) + } + d.w.Write(closeParenBytes) + } + + // Display dereferenced value. + d.w.Write(openParenBytes) + switch { + case nilFound: + d.w.Write(nilAngleBytes) + + case cycleFound: + d.w.Write(circularBytes) + + default: + d.ignoreNextType = true + d.dump(ve) + } + d.w.Write(closeParenBytes) +} + +// dumpSlice handles formatting of arrays and slices. Byte (uint8 under +// reflection) arrays and slices are dumped in hexdump -C fashion. +func (d *dumpState) dumpSlice(v reflect.Value) { + // Determine whether this type should be hex dumped or not. Also, + // for types which should be hexdumped, try to use the underlying data + // first, then fall back to trying to convert them to a uint8 slice. + var buf []uint8 + doConvert := false + doHexDump := false + numEntries := v.Len() + if numEntries > 0 { + vt := v.Index(0).Type() + vts := vt.String() + switch { + // C types that need to be converted. + case cCharRE.MatchString(vts): + fallthrough + case cUnsignedCharRE.MatchString(vts): + fallthrough + case cUint8tCharRE.MatchString(vts): + doConvert = true + + // Try to use existing uint8 slices and fall back to converting + // and copying if that fails. + case vt.Kind() == reflect.Uint8: + // We need an addressable interface to convert the type + // to a byte slice. However, the reflect package won't + // give us an interface on certain things like + // unexported struct fields in order to enforce + // visibility rules. We use unsafe, when available, to + // bypass these restrictions since this package does not + // mutate the values. + vs := v + if !vs.CanInterface() || !vs.CanAddr() { + vs = unsafeReflectValue(vs) + } + if !UnsafeDisabled { + vs = vs.Slice(0, numEntries) + + // Use the existing uint8 slice if it can be + // type asserted. + iface := vs.Interface() + if slice, ok := iface.([]uint8); ok { + buf = slice + doHexDump = true + break + } + } + + // The underlying data needs to be converted if it can't + // be type asserted to a uint8 slice. + doConvert = true + } + + // Copy and convert the underlying type if needed. + if doConvert && vt.ConvertibleTo(uint8Type) { + // Convert and copy each element into a uint8 byte + // slice. + buf = make([]uint8, numEntries) + for i := 0; i < numEntries; i++ { + vv := v.Index(i) + buf[i] = uint8(vv.Convert(uint8Type).Uint()) + } + doHexDump = true + } + } + + // Hexdump the entire slice as needed. + if doHexDump { + indent := strings.Repeat(d.cs.Indent, d.depth) + str := indent + hex.Dump(buf) + str = strings.Replace(str, "\n", "\n"+indent, -1) + str = strings.TrimRight(str, d.cs.Indent) + d.w.Write([]byte(str)) + return + } + + // Recursively call dump for each item. + for i := 0; i < numEntries; i++ { + d.dump(d.unpackValue(v.Index(i))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } +} + +// dump is the main workhorse for dumping a value. It uses the passed reflect +// value to figure out what kind of object we are dealing with and formats it +// appropriately. It is a recursive function, however circular data structures +// are detected and handled properly. +func (d *dumpState) dump(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + d.w.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + d.indent() + d.dumpPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !d.ignoreNextType { + d.indent() + d.w.Write(openParenBytes) + d.w.Write([]byte(v.Type().String())) + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + d.ignoreNextType = false + + // Display length and capacity if the built-in len and cap functions + // work with the value's kind and the len/cap itself is non-zero. + valueLen, valueCap := 0, 0 + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + valueLen, valueCap = v.Len(), v.Cap() + case reflect.Map, reflect.String: + valueLen = v.Len() + } + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { + d.w.Write(openParenBytes) + if valueLen != 0 { + d.w.Write(lenEqualsBytes) + printInt(d.w, int64(valueLen), 10) + } + if !d.cs.DisableCapacities && valueCap != 0 { + if valueLen != 0 { + d.w.Write(spaceBytes) + } + d.w.Write(capEqualsBytes) + printInt(d.w, int64(valueCap), 10) + } + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + + // Call Stringer/error interfaces if they exist and the handle methods flag + // is enabled + if !d.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(d.cs, d.w, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(d.w, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(d.w, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(d.w, v.Uint(), 10) + + case reflect.Float32: + printFloat(d.w, v.Float(), 32) + + case reflect.Float64: + printFloat(d.w, v.Float(), 64) + + case reflect.Complex64: + printComplex(d.w, v.Complex(), 32) + + case reflect.Complex128: + printComplex(d.w, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + d.dumpSlice(v) + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.String: + d.w.Write([]byte(strconv.Quote(v.String()))) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + d.w.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + numEntries := v.Len() + keys := v.MapKeys() + if d.cs.SortKeys { + sortValues(keys, d.cs) + } + for i, key := range keys { + d.dump(d.unpackValue(key)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.MapIndex(key))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Struct: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + vt := v.Type() + numFields := v.NumField() + for i := 0; i < numFields; i++ { + d.indent() + vtf := vt.Field(i) + d.w.Write([]byte(vtf.Name)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.Field(i))) + if i < (numFields - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(d.w, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(d.w, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it in case any new + // types are added. + default: + if v.CanInterface() { + fmt.Fprintf(d.w, "%v", v.Interface()) + } else { + fmt.Fprintf(d.w, "%v", v.String()) + } + } +} + +// fdump is a helper function to consolidate the logic from the various public +// methods which take varying writers and config states. +func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { + for _, arg := range a { + if arg == nil { + w.Write(interfaceBytes) + w.Write(spaceBytes) + w.Write(nilAngleBytes) + w.Write(newlineBytes) + continue + } + + d := dumpState{w: w, cs: cs} + d.pointers = make(map[uintptr]int) + d.dump(reflect.ValueOf(arg)) + d.w.Write(newlineBytes) + } +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func Fdump(w io.Writer, a ...interface{}) { + fdump(&Config, w, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(&Config, &buf, a...) + return buf.String() +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by an exported package global, +spew.Config. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func Dump(a ...interface{}) { + fdump(&Config, os.Stdout, a...) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go new file mode 100644 index 0000000..b04edb7 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" +) + +// supportedFlags is a list of all the character flags supported by fmt package. +const supportedFlags = "0-+# " + +// formatState implements the fmt.Formatter interface and contains information +// about the state of a formatting operation. The NewFormatter function can +// be used to get a new Formatter which can be used directly as arguments +// in standard fmt package printing calls. +type formatState struct { + value interface{} + fs fmt.State + depth int + pointers map[uintptr]int + ignoreNextType bool + cs *ConfigState +} + +// buildDefaultFormat recreates the original format string without precision +// and width information to pass in to fmt.Sprintf in the case of an +// unrecognized type. Unless new types are added to the language, this +// function won't ever be called. +func (f *formatState) buildDefaultFormat() (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + buf.WriteRune('v') + + format = buf.String() + return format +} + +// constructOrigFormat recreates the original format string including precision +// and width information to pass along to the standard fmt package. This allows +// automatic deferral of all format strings this package doesn't support. +func (f *formatState) constructOrigFormat(verb rune) (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + if width, ok := f.fs.Width(); ok { + buf.WriteString(strconv.Itoa(width)) + } + + if precision, ok := f.fs.Precision(); ok { + buf.Write(precisionBytes) + buf.WriteString(strconv.Itoa(precision)) + } + + buf.WriteRune(verb) + + format = buf.String() + return format +} + +// unpackValue returns values inside of non-nil interfaces when possible and +// ensures that types for values which have been unpacked from an interface +// are displayed when the show types flag is also set. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (f *formatState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface { + f.ignoreNextType = false + if !v.IsNil() { + v = v.Elem() + } + } + return v +} + +// formatPtr handles formatting of pointers by indirecting them as necessary. +func (f *formatState) formatPtr(v reflect.Value) { + // Display nil if top level pointer is nil. + showTypes := f.fs.Flag('#') + if v.IsNil() && (!showTypes || f.ignoreNextType) { + f.fs.Write(nilAngleBytes) + return + } + + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range f.pointers { + if depth >= f.depth { + delete(f.pointers, k) + } + } + + // Keep list of all dereferenced pointers to possibly show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by derferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := f.pointers[addr]; ok && pd < f.depth { + cycleFound = true + indirects-- + break + } + f.pointers[addr] = f.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type or indirection level depending on flags. + if showTypes && !f.ignoreNextType { + f.fs.Write(openParenBytes) + f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) + f.fs.Write([]byte(ve.Type().String())) + f.fs.Write(closeParenBytes) + } else { + if nilFound || cycleFound { + indirects += strings.Count(ve.Type().String(), "*") + } + f.fs.Write(openAngleBytes) + f.fs.Write([]byte(strings.Repeat("*", indirects))) + f.fs.Write(closeAngleBytes) + } + + // Display pointer information depending on flags. + if f.fs.Flag('+') && (len(pointerChain) > 0) { + f.fs.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + f.fs.Write(pointerChainBytes) + } + printHexPtr(f.fs, addr) + } + f.fs.Write(closeParenBytes) + } + + // Display dereferenced value. + switch { + case nilFound: + f.fs.Write(nilAngleBytes) + + case cycleFound: + f.fs.Write(circularShortBytes) + + default: + f.ignoreNextType = true + f.format(ve) + } +} + +// format is the main workhorse for providing the Formatter interface. It +// uses the passed reflect value to figure out what kind of object we are +// dealing with and formats it appropriately. It is a recursive function, +// however circular data structures are detected and handled properly. +func (f *formatState) format(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + f.fs.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + f.formatPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !f.ignoreNextType && f.fs.Flag('#') { + f.fs.Write(openParenBytes) + f.fs.Write([]byte(v.Type().String())) + f.fs.Write(closeParenBytes) + } + f.ignoreNextType = false + + // Call Stringer/error interfaces if they exist and the handle methods + // flag is enabled. + if !f.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(f.cs, f.fs, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(f.fs, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(f.fs, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(f.fs, v.Uint(), 10) + + case reflect.Float32: + printFloat(f.fs, v.Float(), 32) + + case reflect.Float64: + printFloat(f.fs, v.Float(), 64) + + case reflect.Complex64: + printComplex(f.fs, v.Complex(), 32) + + case reflect.Complex128: + printComplex(f.fs, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + f.fs.Write(openBracketBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + numEntries := v.Len() + for i := 0; i < numEntries; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(v.Index(i))) + } + } + f.depth-- + f.fs.Write(closeBracketBytes) + + case reflect.String: + f.fs.Write([]byte(v.String())) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + f.fs.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + + f.fs.Write(openMapBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + keys := v.MapKeys() + if f.cs.SortKeys { + sortValues(keys, f.cs) + } + for i, key := range keys { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(key)) + f.fs.Write(colonBytes) + f.ignoreNextType = true + f.format(f.unpackValue(v.MapIndex(key))) + } + } + f.depth-- + f.fs.Write(closeMapBytes) + + case reflect.Struct: + numFields := v.NumField() + f.fs.Write(openBraceBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + vt := v.Type() + for i := 0; i < numFields; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + vtf := vt.Field(i) + if f.fs.Flag('+') || f.fs.Flag('#') { + f.fs.Write([]byte(vtf.Name)) + f.fs.Write(colonBytes) + } + f.format(f.unpackValue(v.Field(i))) + } + } + f.depth-- + f.fs.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(f.fs, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(f.fs, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it if any get added. + default: + format := f.buildDefaultFormat() + if v.CanInterface() { + fmt.Fprintf(f.fs, format, v.Interface()) + } else { + fmt.Fprintf(f.fs, format, v.String()) + } + } +} + +// Format satisfies the fmt.Formatter interface. See NewFormatter for usage +// details. +func (f *formatState) Format(fs fmt.State, verb rune) { + f.fs = fs + + // Use standard formatting for verbs that are not v. + if verb != 'v' { + format := f.constructOrigFormat(verb) + fmt.Fprintf(fs, format, f.value) + return + } + + if f.value == nil { + if fs.Flag('#') { + fs.Write(interfaceBytes) + } + fs.Write(nilAngleBytes) + return + } + + f.format(reflect.ValueOf(f.value)) +} + +// newFormatter is a helper function to consolidate the logic from the various +// public methods which take varying config states. +func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { + fs := &formatState{value: v, cs: cs} + fs.pointers = make(map[uintptr]int) + return fs +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +Printf, Println, or Fprintf. +*/ +func NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(&Config, v) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go new file mode 100644 index 0000000..32c0e33 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/spew.go @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "fmt" + "io" +) + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the formatted string as a value that satisfies error. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a default Formatter interface returned by NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) +func Print(a ...interface{}) (n int, err error) { + return fmt.Print(convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) +func Println(a ...interface{}) (n int, err error) { + return fmt.Println(convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprint(a ...interface{}) string { + return fmt.Sprint(convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintln(a ...interface{}) string { + return fmt.Sprintln(convertArgs(a)...) +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a default spew Formatter interface. +func convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = NewFormatter(arg) + } + return formatters +} diff --git a/vendor/github.com/goccy/go-json/.codecov.yml b/vendor/github.com/goccy/go-json/.codecov.yml new file mode 100644 index 0000000..e981345 --- /dev/null +++ b/vendor/github.com/goccy/go-json/.codecov.yml @@ -0,0 +1,32 @@ +codecov: + require_ci_to_pass: yes + +coverage: + precision: 2 + round: down + range: "70...100" + + status: + project: + default: + target: 70% + threshold: 2% + patch: off + changes: no + +parsers: + gcov: + branch_detection: + conditional: yes + loop: yes + method: no + macro: no + +comment: + layout: "header,diff" + behavior: default + require_changes: no + +ignore: + - internal/encoder/vm_color + - internal/encoder/vm_color_indent diff --git a/vendor/github.com/goccy/go-json/.gitignore b/vendor/github.com/goccy/go-json/.gitignore new file mode 100644 index 0000000..3782838 --- /dev/null +++ b/vendor/github.com/goccy/go-json/.gitignore @@ -0,0 +1,2 @@ +cover.html +cover.out diff --git a/vendor/github.com/goccy/go-json/.golangci.yml b/vendor/github.com/goccy/go-json/.golangci.yml new file mode 100644 index 0000000..977acca --- /dev/null +++ b/vendor/github.com/goccy/go-json/.golangci.yml @@ -0,0 +1,86 @@ +run: + skip-files: + - encode_optype.go + - ".*_test\\.go$" + +linters-settings: + govet: + enable-all: true + disable: + - shadow + +linters: + enable-all: true + disable: + - dogsled + - dupl + - exhaustive + - exhaustivestruct + - errorlint + - forbidigo + - funlen + - gci + - gochecknoglobals + - gochecknoinits + - gocognit + - gocritic + - gocyclo + - godot + - godox + - goerr113 + - gofumpt + - gomnd + - gosec + - ifshort + - lll + - makezero + - nakedret + - nestif + - nlreturn + - paralleltest + - testpackage + - thelper + - wrapcheck + - interfacer + - lll + - nakedret + - nestif + - nlreturn + - testpackage + - wsl + - varnamelen + - nilnil + - ireturn + - govet + - forcetypeassert + - cyclop + - containedctx + - revive + - nosnakecase + - exhaustruct + - depguard + +issues: + exclude-rules: + # not needed + - path: /*.go + text: "ST1003: should not use underscores in package names" + linters: + - stylecheck + - path: /*.go + text: "don't use an underscore in package name" + linters: + - golint + - path: rtype.go + linters: + - golint + - stylecheck + - path: error.go + linters: + - staticcheck + + # Maximum issues count per one linter. Set to 0 to disable. Default is 50. + max-issues-per-linter: 0 + + # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. + max-same-issues: 0 diff --git a/vendor/github.com/goccy/go-json/CHANGELOG.md b/vendor/github.com/goccy/go-json/CHANGELOG.md new file mode 100644 index 0000000..d09bb89 --- /dev/null +++ b/vendor/github.com/goccy/go-json/CHANGELOG.md @@ -0,0 +1,425 @@ +# v0.10.2 - 2023/03/20 + +### New features + +* Support DebugDOT option for debugging encoder ( #440 ) + +### Fix bugs + +* Fix combination of embedding structure and omitempty option ( #442 ) + +# v0.10.1 - 2023/03/13 + +### Fix bugs + +* Fix checkptr error for array decoder ( #415 ) +* Fix added buffer size check when decoding key ( #430 ) +* Fix handling of anonymous fields other than struct ( #431 ) +* Fix to not optimize when lower conversion can't handle byte-by-byte ( #432 ) +* Fix a problem that MarshalIndent does not work when UnorderedMap is specified ( #435 ) +* Fix mapDecoder.DecodeStream() for empty objects containing whitespace ( #425 ) +* Fix an issue that could not set the correct NextField for fields in the embedded structure ( #438 ) + +# v0.10.0 - 2022/11/29 + +### New features + +* Support JSON Path ( #250 ) + +### Fix bugs + +* Fix marshaler for map's key ( #409 ) + +# v0.9.11 - 2022/08/18 + +### Fix bugs + +* Fix unexpected behavior when buffer ends with backslash ( #383 ) +* Fix stream decoding of escaped character ( #387 ) + +# v0.9.10 - 2022/07/15 + +### Fix bugs + +* Fix boundary exception of type caching ( #382 ) + +# v0.9.9 - 2022/07/15 + +### Fix bugs + +* Fix encoding of directed interface with typed nil ( #377 ) +* Fix embedded primitive type encoding using alias ( #378 ) +* Fix slice/array type encoding with types implementing MarshalJSON ( #379 ) +* Fix unicode decoding when the expected buffer state is not met after reading ( #380 ) + +# v0.9.8 - 2022/06/30 + +### Fix bugs + +* Fix decoding of surrogate-pair ( #365 ) +* Fix handling of embedded primitive type ( #366 ) +* Add validation of escape sequence for decoder ( #367 ) +* Fix stream tokenizing respecting UseNumber ( #369 ) +* Fix encoding when struct pointer type that implements Marshal JSON is embedded ( #375 ) + +### Improve performance + +* Improve performance of linkRecursiveCode ( #368 ) + +# v0.9.7 - 2022/04/22 + +### Fix bugs + +#### Encoder + +* Add filtering process for encoding on slow path ( #355 ) +* Fix encoding of interface{} with pointer type ( #363 ) + +#### Decoder + +* Fix map key decoder that implements UnmarshalJSON ( #353 ) +* Fix decoding of []uint8 type ( #361 ) + +### New features + +* Add DebugWith option for encoder ( #356 ) + +# v0.9.6 - 2022/03/22 + +### Fix bugs + +* Correct the handling of the minimum value of int type for decoder ( #344 ) +* Fix bugs of stream decoder's bufferSize ( #349 ) +* Add a guard to use typeptr more safely ( #351 ) + +### Improve decoder performance + +* Improve escapeString's performance ( #345 ) + +### Others + +* Update go version for CI ( #347 ) + +# v0.9.5 - 2022/03/04 + +### Fix bugs + +* Fix panic when decoding time.Time with context ( #328 ) +* Fix reading the next character in buffer to nul consideration ( #338 ) +* Fix incorrect handling on skipValue ( #341 ) + +### Improve decoder performance + +* Improve performance when a payload contains escape sequence ( #334 ) + +# v0.9.4 - 2022/01/21 + +* Fix IsNilForMarshaler for string type with omitempty ( #323 ) +* Fix the case where the embedded field is at the end ( #326 ) + +# v0.9.3 - 2022/01/14 + +* Fix logic of removing struct field for decoder ( #322 ) + +# v0.9.2 - 2022/01/14 + +* Add invalid decoder to delay type error judgment at decode ( #321 ) + +# v0.9.1 - 2022/01/11 + +* Fix encoding of MarshalText/MarshalJSON operation with head offset ( #319 ) + +# v0.9.0 - 2022/01/05 + +### New feature + +* Supports dynamic filtering of struct fields ( #314 ) + +### Improve encoding performance + +* Improve map encoding performance ( #310 ) +* Optimize encoding path for escaped string ( #311 ) +* Add encoding option for performance ( #312 ) + +### Fix bugs + +* Fix panic at encoding map value on 1.18 ( #310 ) +* Fix MarshalIndent for interface type ( #317 ) + +# v0.8.1 - 2021/12/05 + +* Fix operation conversion from PtrHead to Head in Recursive type ( #305 ) + +# v0.8.0 - 2021/12/02 + +* Fix embedded field conflict behavior ( #300 ) +* Refactor compiler for encoder ( #301 #302 ) + +# v0.7.10 - 2021/10/16 + +* Fix conversion from pointer to uint64 ( #294 ) + +# v0.7.9 - 2021/09/28 + +* Fix encoding of nil value about interface type that has method ( #291 ) + +# v0.7.8 - 2021/09/01 + +* Fix mapassign_faststr for indirect struct type ( #283 ) +* Fix encoding of not empty interface type ( #284 ) +* Fix encoding of empty struct interface type ( #286 ) + +# v0.7.7 - 2021/08/25 + +* Fix invalid utf8 on stream decoder ( #279 ) +* Fix buffer length bug on string stream decoder ( #280 ) + +Thank you @orisano !! + +# v0.7.6 - 2021/08/13 + +* Fix nil slice assignment ( #276 ) +* Improve error message ( #277 ) + +# v0.7.5 - 2021/08/12 + +* Fix encoding of embedded struct with tags ( #265 ) +* Fix encoding of embedded struct that isn't first field ( #272 ) +* Fix decoding of binary type with escaped char ( #273 ) + +# v0.7.4 - 2021/07/06 + +* Fix encoding of indirect layout structure ( #264 ) + +# v0.7.3 - 2021/06/29 + +* Fix encoding of pointer type in empty interface ( #262 ) + +# v0.7.2 - 2021/06/26 + +### Fix decoder + +* Add decoder for func type to fix decoding of nil function value ( #257 ) +* Fix stream decoding of []byte type ( #258 ) + +### Performance + +* Improve decoding performance of map[string]interface{} type ( use `mapassign_faststr` ) ( #256 ) +* Improve encoding performance of empty interface type ( remove recursive calling of `vm.Run` ) ( #259 ) + +### Benchmark + +* Add bytedance/sonic as benchmark target ( #254 ) + +# v0.7.1 - 2021/06/18 + +### Fix decoder + +* Fix error when unmarshal empty array ( #253 ) + +# v0.7.0 - 2021/06/12 + +### Support context for MarshalJSON and UnmarshalJSON ( #248 ) + +* json.MarshalContext(context.Context, interface{}, ...json.EncodeOption) ([]byte, error) +* json.NewEncoder(io.Writer).EncodeContext(context.Context, interface{}, ...json.EncodeOption) error +* json.UnmarshalContext(context.Context, []byte, interface{}, ...json.DecodeOption) error +* json.NewDecoder(io.Reader).DecodeContext(context.Context, interface{}) error + +```go +type MarshalerContext interface { + MarshalJSON(context.Context) ([]byte, error) +} + +type UnmarshalerContext interface { + UnmarshalJSON(context.Context, []byte) error +} +``` + +### Add DecodeFieldPriorityFirstWin option ( #242 ) + +In the default behavior, go-json, like encoding/json, will reflect the result of the last evaluation when a field with the same name exists. I've added new options to allow you to change this behavior. `json.DecodeFieldPriorityFirstWin` option reflects the result of the first evaluation if a field with the same name exists. This behavior has a performance advantage as it allows the subsequent strings to be skipped if all fields have been evaluated. + +### Fix encoder + +* Fix indent number contains recursive type ( #249 ) +* Fix encoding of using empty interface as map key ( #244 ) + +### Fix decoder + +* Fix decoding fields containing escaped characters ( #237 ) + +### Refactor + +* Move some tests to subdirectory ( #243 ) +* Refactor package layout for decoder ( #238 ) + +# v0.6.1 - 2021/06/02 + +### Fix encoder + +* Fix value of totalLength for encoding ( #236 ) + +# v0.6.0 - 2021/06/01 + +### Support Colorize option for encoding (#233) + +```go +b, err := json.MarshalWithOption(v, json.Colorize(json.DefaultColorScheme)) +if err != nil { + ... +} +fmt.Println(string(b)) // print colored json +``` + +### Refactor + +* Fix opcode layout - Adjust memory layout of the opcode to 128 bytes in a 64-bit environment ( #230 ) +* Refactor encode option ( #231 ) +* Refactor escape string ( #232 ) + +# v0.5.1 - 2021/5/20 + +### Optimization + +* Add type addrShift to enable bigger encoder/decoder cache ( #213 ) + +### Fix decoder + +* Keep original reference of slice element ( #229 ) + +### Refactor + +* Refactor Debug mode for encoding ( #226 ) +* Generate VM sources for encoding ( #227 ) +* Refactor validator for null/true/false for decoding ( #221 ) + +# v0.5.0 - 2021/5/9 + +### Supports using omitempty and string tags at the same time ( #216 ) + +### Fix decoder + +* Fix stream decoder for unicode char ( #215 ) +* Fix decoding of slice element ( #219 ) +* Fix calculating of buffer length for stream decoder ( #220 ) + +### Refactor + +* replace skipWhiteSpace goto by loop ( #212 ) + +# v0.4.14 - 2021/5/4 + +### Benchmark + +* Add valyala/fastjson to benchmark ( #193 ) +* Add benchmark task for CI ( #211 ) + +### Fix decoder + +* Fix decoding of slice with unmarshal json type ( #198 ) +* Fix decoding of null value for interface type that does not implement Unmarshaler ( #205 ) +* Fix decoding of null value to []byte by json.Unmarshal ( #206 ) +* Fix decoding of backslash char at the end of string ( #207 ) +* Fix stream decoder for null/true/false value ( #208 ) +* Fix stream decoder for slow reader ( #211 ) + +### Performance + +* If cap of slice is enough, reuse slice data for compatibility with encoding/json ( #200 ) + +# v0.4.13 - 2021/4/20 + +### Fix json.Compact and json.Indent + +* Support validation the input buffer for json.Compact and json.Indent ( #189 ) +* Optimize json.Compact and json.Indent ( improve memory footprint ) ( #190 ) + +# v0.4.12 - 2021/4/15 + +### Fix encoder + +* Fix unnecessary indent for empty slice type ( #181 ) +* Fix encoding of omitempty feature for the slice or interface type ( #183 ) +* Fix encoding custom types zero values with omitempty when marshaller exists ( #187 ) + +### Fix decoder + +* Fix decoder for invalid top level value ( #184 ) +* Fix decoder for invalid number value ( #185 ) + +# v0.4.11 - 2021/4/3 + +* Improve decoder performance for interface type + +# v0.4.10 - 2021/4/2 + +### Fix encoder + +* Fixed a bug when encoding slice and map containing recursive structures +* Fixed a logic to determine if indirect reference + +# v0.4.9 - 2021/3/29 + +### Add debug mode + +If you use `json.MarshalWithOption(v, json.Debug())` and `panic` occurred in `go-json`, produces debug information to console. + +### Support a new feature to compatible with encoding/json + +- invalid UTF-8 is coerced to valid UTF-8 ( without performance down ) + +### Fix encoder + +- Fixed handling of MarshalJSON of function type + +### Fix decoding of slice of pointer type + +If there is a pointer value, go-json will use it. (This behavior is necessary to achieve the ability to prioritize pre-filled values). However, since slices are reused internally, there was a bug that referred to the previous pointer value. Therefore, it is not necessary to refer to the pointer value in advance for the slice element, so we explicitly initialize slice element by `nil`. + +# v0.4.8 - 2021/3/21 + +### Reduce memory usage at compile time + +* go-json have used about 2GB of memory at compile time, but now it can compile with about less than 550MB. + +### Fix any encoder's bug + +* Add many test cases for encoder +* Fix composite type ( slice/array/map ) +* Fix pointer types +* Fix encoding of MarshalJSON or MarshalText or json.Number type + +### Refactor encoder + +* Change package layout for reducing memory usage at compile +* Remove anonymous and only operation +* Remove root property from encodeCompileContext and opcode + +### Fix CI + +* Add Go 1.16 +* Remove Go 1.13 +* Fix `make cover` task + +### Number/Delim/Token/RawMessage use the types defined in encoding/json by type alias + +# v0.4.7 - 2021/02/22 + +### Fix decoder + +* Fix decoding of deep recursive structure +* Fix decoding of embedded unexported pointer field +* Fix invalid test case +* Fix decoding of invalid value +* Fix decoding of prefilled value +* Fix not being able to return UnmarshalTypeError when it should be returned +* Fix decoding of null value +* Fix decoding of type of null string +* Use pre allocated pointer if exists it at decoding + +### Reduce memory usage at compile + +* Integrate int/int8/int16/int32/int64 and uint/uint8/uint16/uint32/uint64 operation to reduce memory usage at compile + +### Remove unnecessary optype diff --git a/vendor/github.com/goccy/go-json/LICENSE b/vendor/github.com/goccy/go-json/LICENSE new file mode 100644 index 0000000..6449c8b --- /dev/null +++ b/vendor/github.com/goccy/go-json/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Masaaki Goshima + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/goccy/go-json/Makefile b/vendor/github.com/goccy/go-json/Makefile new file mode 100644 index 0000000..c030577 --- /dev/null +++ b/vendor/github.com/goccy/go-json/Makefile @@ -0,0 +1,39 @@ +PKG := github.com/goccy/go-json + +BIN_DIR := $(CURDIR)/bin +PKGS := $(shell go list ./... | grep -v internal/cmd|grep -v test) +COVER_PKGS := $(foreach pkg,$(PKGS),$(subst $(PKG),.,$(pkg))) + +COMMA := , +EMPTY := +SPACE := $(EMPTY) $(EMPTY) +COVERPKG_OPT := $(subst $(SPACE),$(COMMA),$(COVER_PKGS)) + +$(BIN_DIR): + @mkdir -p $(BIN_DIR) + +.PHONY: cover +cover: + go test -coverpkg=$(COVERPKG_OPT) -coverprofile=cover.out ./... + +.PHONY: cover-html +cover-html: cover + go tool cover -html=cover.out + +.PHONY: lint +lint: golangci-lint + $(BIN_DIR)/golangci-lint run + +golangci-lint: | $(BIN_DIR) + @{ \ + set -e; \ + GOLANGCI_LINT_TMP_DIR=$$(mktemp -d); \ + cd $$GOLANGCI_LINT_TMP_DIR; \ + go mod init tmp; \ + GOBIN=$(BIN_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.54.2; \ + rm -rf $$GOLANGCI_LINT_TMP_DIR; \ + } + +.PHONY: generate +generate: + go generate ./internal/... diff --git a/vendor/github.com/goccy/go-json/README.md b/vendor/github.com/goccy/go-json/README.md new file mode 100644 index 0000000..7bacc54 --- /dev/null +++ b/vendor/github.com/goccy/go-json/README.md @@ -0,0 +1,529 @@ +# go-json + +![Go](https://github.com/goccy/go-json/workflows/Go/badge.svg) +[![GoDoc](https://godoc.org/github.com/goccy/go-json?status.svg)](https://pkg.go.dev/github.com/goccy/go-json?tab=doc) +[![codecov](https://codecov.io/gh/goccy/go-json/branch/master/graph/badge.svg)](https://codecov.io/gh/goccy/go-json) + +Fast JSON encoder/decoder compatible with encoding/json for Go + + + +# Roadmap + +``` +* version ( expected release date ) + +* v0.9.0 + | + | while maintaining compatibility with encoding/json, we will add convenient APIs + | + v +* v1.0.0 +``` + +We are accepting requests for features that will be implemented between v0.9.0 and v.1.0.0. +If you have the API you need, please submit your issue [here](https://github.com/goccy/go-json/issues). + +# Features + +- Drop-in replacement of `encoding/json` +- Fast ( See [Benchmark section](https://github.com/goccy/go-json#benchmarks) ) +- Flexible customization with options +- Coloring the encoded string +- Can propagate context.Context to `MarshalJSON` or `UnmarshalJSON` +- Can dynamically filter the fields of the structure type-safely + +# Installation + +``` +go get github.com/goccy/go-json +``` + +# How to use + +Replace import statement from `encoding/json` to `github.com/goccy/go-json` + +``` +-import "encoding/json" ++import "github.com/goccy/go-json" +``` + +# JSON library comparison + +| name | encoder | decoder | compatible with `encoding/json` | +| :----: | :------: | :-----: | :-----------------------------: | +| encoding/json | yes | yes | N/A | +| [json-iterator/go](https://github.com/json-iterator/go) | yes | yes | partial | +| [easyjson](https://github.com/mailru/easyjson) | yes | yes | no | +| [gojay](https://github.com/francoispqt/gojay) | yes | yes | no | +| [segmentio/encoding/json](https://github.com/segmentio/encoding/tree/master/json) | yes | yes | partial | +| [jettison](https://github.com/wI2L/jettison) | yes | no | no | +| [simdjson-go](https://github.com/minio/simdjson-go) | no | yes | no | +| goccy/go-json | yes | yes | yes | + +- `json-iterator/go` isn't compatible with `encoding/json` in many ways (e.g. https://github.com/json-iterator/go/issues/229 ), but it hasn't been supported for a long time. +- `segmentio/encoding/json` is well supported for encoders, but some are not supported for decoder APIs such as `Token` ( streaming decode ) + +## Other libraries + +- [jingo](https://github.com/bet365/jingo) + +I tried the benchmark but it didn't work. +Also, it seems to panic when it receives an unexpected value because there is no error handling... + +- [ffjson](https://github.com/pquerna/ffjson) + +Benchmarking gave very slow results. +It seems that it is assumed that the user will use the buffer pool properly. +Also, development seems to have already stopped + +# Benchmarks + +``` +$ cd benchmarks +$ go test -bench . +``` + +## Encode + + + + +## Decode + + + + + + +# Fuzzing + +[go-json-fuzz](https://github.com/goccy/go-json-fuzz) is the repository for fuzzing tests. +If you run the test in this repository and find a bug, please commit to corpus to go-json-fuzz and report the issue to [go-json](https://github.com/goccy/go-json/issues). + +# How it works + +`go-json` is very fast in both encoding and decoding compared to other libraries. +It's easier to implement by using automatic code generation for performance or by using a dedicated interface, but `go-json` dares to stick to compatibility with `encoding/json` and is the simple interface. Despite this, we are developing with the aim of being the fastest library. + +Here, we explain the various speed-up techniques implemented by `go-json`. + +## Basic technique + +The techniques listed here are the ones used by most of the libraries listed above. + +### Buffer reuse + +Since the only value required for the result of `json.Marshal(interface{}) ([]byte, error)` is `[]byte`, the only value that must be allocated during encoding is the return value `[]byte` . + +Also, as the number of allocations increases, the performance will be affected, so the number of allocations should be kept as low as possible when creating `[]byte`. + +Therefore, there is a technique to reduce the number of times a new buffer must be allocated by reusing the buffer used for the previous encoding by using `sync.Pool`. + +Finally, you allocate a buffer that is as long as the resulting buffer and copy the contents into it, you only need to allocate the buffer once in theory. + +```go +type buffer struct { + data []byte +} + +var bufPool = sync.Pool{ + New: func() interface{} { + return &buffer{data: make([]byte, 0, 1024)} + }, +} + +buf := bufPool.Get().(*buffer) +data := encode(buf.data) // reuse buf.data + +newBuf := make([]byte, len(data)) +copy(newBuf, buf) + +buf.data = data +bufPool.Put(buf) +``` + +### Elimination of reflection + +As you know, the reflection operation is very slow. + +Therefore, using the fact that the address position where the type information is stored is fixed for each binary ( we call this `typeptr` ), +we can use the address in the type information to call a pre-built optimized process. + +For example, you can get the address to the type information from `interface{}` as follows and you can use that information to call a process that does not have reflection. + +To process without reflection, pass a pointer (`unsafe.Pointer`) to the value is stored. + +```go + +type emptyInterface struct { + typ unsafe.Pointer + ptr unsafe.Pointer +} + +var typeToEncoder = map[uintptr]func(unsafe.Pointer)([]byte, error){} + +func Marshal(v interface{}) ([]byte, error) { + iface := (*emptyInterface)(unsafe.Pointer(&v) + typeptr := uintptr(iface.typ) + if enc, exists := typeToEncoder[typeptr]; exists { + return enc(iface.ptr) + } + ... +} +``` + +※ In reality, `typeToEncoder` can be referenced by multiple goroutines, so exclusive control is required. + +## Unique speed-up technique + +## Encoder + +### Do not escape arguments of `Marshal` + +`json.Marshal` and `json.Unmarshal` receive `interface{}` value and they perform type determination dynamically to process. +In normal case, you need to use the `reflect` library to determine the type dynamically, but since `reflect.Type` is defined as `interface`, when you call the method of `reflect.Type`, The reflect's argument is escaped. + +Therefore, the arguments for `Marshal` and `Unmarshal` are always escaped to the heap. +However, `go-json` can use the feature of `reflect.Type` while avoiding escaping. + +`reflect.Type` is defined as `interface`, but in reality `reflect.Type` is implemented only by the structure `rtype` defined in the `reflect` package. +For this reason, to date `reflect.Type` is the same as `*reflect.rtype`. + +Therefore, by directly handling `*reflect.rtype`, which is an implementation of `reflect.Type`, it is possible to avoid escaping because it changes from `interface` to using `struct`. + +The technique for working with `*reflect.rtype` directly from `go-json` is implemented at [rtype.go](https://github.com/goccy/go-json/blob/master/internal/runtime/rtype.go) + +Also, the same technique is cut out as a library ( https://github.com/goccy/go-reflect ) + +Initially this feature was the default behavior of `go-json`. +But after careful testing, I found that I passed a large value to `json.Marshal()` and if the argument could not be assigned to the stack, it could not be properly escaped to the heap (a bug in the Go compiler). + +Therefore, this feature will be provided as an **optional** until this issue is resolved. + +To use it, add `NoEscape` like `MarshalNoEscape()` + +### Encoding using opcode sequence + +I explained that you can use `typeptr` to call a pre-built process from type information. + +In other libraries, this dedicated process is processed by making it an function calling like anonymous function, but function calls are inherently slow processes and should be avoided as much as possible. + +Therefore, `go-json` adopted the Instruction-based execution processing system, which is also used to implement virtual machines for programming language. + +If it is the first type to encode, create the opcode ( instruction ) sequence required for encoding. +From the second time onward, use `typeptr` to get the cached pre-built opcode sequence and encode it based on it. An example of the opcode sequence is shown below. + +```go +json.Marshal(struct{ + X int `json:"x"` + Y string `json:"y"` +}{X: 1, Y: "hello"}) +``` + +When encoding a structure like the one above, create a sequence of opcodes like this: + +``` +- opStructFieldHead ( `{` ) +- opStructFieldInt ( `"x": 1,` ) +- opStructFieldString ( `"y": "hello"` ) +- opStructEnd ( `}` ) +- opEnd +``` + +※ When processing each operation, write the letters on the right. + +In addition, each opcode is managed by the following structure ( +Pseudo code ). + +```go +type opType int +const ( + opStructFieldHead opType = iota + opStructFieldInt + opStructFieldStirng + opStructEnd + opEnd +) +type opcode struct { + op opType + key []byte + next *opcode +} +``` + +The process of encoding using the opcode sequence is roughly implemented as follows. + +```go +func encode(code *opcode, b []byte, p unsafe.Pointer) ([]byte, error) { + for { + switch code.op { + case opStructFieldHead: + b = append(b, '{') + code = code.next + case opStructFieldInt: + b = append(b, code.key...) + b = appendInt((*int)(unsafe.Pointer(uintptr(p)+code.offset))) + code = code.next + case opStructFieldString: + b = append(b, code.key...) + b = appendString((*string)(unsafe.Pointer(uintptr(p)+code.offset))) + code = code.next + case opStructEnd: + b = append(b, '}') + code = code.next + case opEnd: + goto END + } + } +END: + return b, nil +} +``` + +In this way, the huge `switch-case` is used to encode by manipulating the linked list opcodes to avoid unnecessary function calls. + +### Opcode sequence optimization + +One of the advantages of encoding using the opcode sequence is the ease of optimization. +The opcode sequence mentioned above is actually converted into the following optimized operations and used. + +``` +- opStructFieldHeadInt ( `{"x": 1,` ) +- opStructEndString ( `"y": "hello"}` ) +- opEnd +``` + +It has been reduced from 5 opcodes to 3 opcodes ! +Reducing the number of opcodees means reducing the number of branches with `switch-case`. +In other words, the closer the number of operations is to 1, the faster the processing can be performed. + +In `go-json`, optimization to reduce the number of opcodes itself like the above and it speeds up by preparing opcodes with optimized paths. + +### Change recursive call from CALL to JMP + +Recursive processing is required during encoding if the type is defined recursively as follows: + +```go +type T struct { + X int + U *U +} + +type U struct { + T *T +} + +b, err := json.Marshal(&T{ + X: 1, + U: &U{ + T: &T{ + X: 2, + }, + }, +}) +fmt.Println(string(b)) // {"X":1,"U":{"T":{"X":2,"U":null}}} +``` + +In `go-json`, recursive processing is processed by the operation type of ` opStructFieldRecursive`. + +In this operation, after acquiring the opcode sequence used for recursive processing, the function is **not** called recursively as it is, but the necessary values ​​are saved by itself and implemented by moving to the next operation. + +The technique of implementing recursive processing with the `JMP` operation while avoiding the `CALL` operation is a famous technique for implementing a high-speed virtual machine. + +For more details, please refer to [the article](https://engineering.mercari.com/blog/entry/1599563768-081104c850) ( but Japanese only ). + +### Dispatch by typeptr from map to slice + +When retrieving the data cached from the type information by `typeptr`, we usually use map. +Map requires exclusive control, so use `sync.Map` for a naive implementation. + +However, this is slow, so it's a good idea to use the `atomic` package for exclusive control as implemented by `segmentio/encoding/json` ( https://github.com/segmentio/encoding/blob/master/json/codec.go#L41-L55 ). + +This implementation slows down the set instead of speeding up the get, but it works well because of the nature of the library, it encodes much more for the same type. + +However, as a result of profiling, I noticed that `runtime.mapaccess2` accounts for a significant percentage of the execution time. So I thought if I could change the lookup from map to slice. + +There is an API named `typelinks` defined in the `runtime` package that the `reflect` package uses internally. +This allows you to get all the type information defined in the binary at runtime. + +The fact that all type information can be acquired means that by constructing slices in advance with the acquired total number of type information, it is possible to look up with the value of `typeptr` without worrying about out-of-range access. + +However, if there is too much type information, it will use a lot of memory, so by default we will only use this optimization if the slice size fits within **2Mib** . + +If this approach is not available, it will fall back to the `atomic` based process described above. + +If you want to know more, please refer to the implementation [here](https://github.com/goccy/go-json/blob/master/internal/runtime/type.go#L36-L100) + +## Decoder + +### Dispatch by typeptr from map to slice + +Like the encoder, the decoder also uses typeptr to call the dedicated process. + +### Faster termination character inspection using NUL character + +In order to decode, you have to traverse the input buffer character by position. +At that time, if you check whether the buffer has reached the end, it will be very slow. + +`buf` : `[]byte` type variable. holds the string passed to the decoder +`cursor` : `int64` type variable. holds the current read position + +```go +buflen := len(buf) +for ; cursor < buflen; cursor++ { // compare cursor and buflen at all times, it is so slow. + switch buf[cursor] { + case ' ', '\n', '\r', '\t': + } +} +``` + +Therefore, by adding the `NUL` (`\000`) character to the end of the read buffer as shown below, it is possible to check the termination character at the same time as other characters. + +```go +for { + switch buf[cursor] { + case ' ', '\n', '\r', '\t': + case '\000': + return nil + } + cursor++ +} +``` + +### Use Boundary Check Elimination + +Due to the `NUL` character optimization, the Go compiler does a boundary check every time, even though `buf[cursor]` does not cause out-of-range access. + +Therefore, `go-json` eliminates boundary check by fetching characters for hotspot by pointer operation. For example, the following code. + +```go +func char(ptr unsafe.Pointer, offset int64) byte { + return *(*byte)(unsafe.Pointer(uintptr(ptr) + uintptr(offset))) +} + +p := (*sliceHeader)(&unsafe.Pointer(buf)).data +for { + switch char(p, cursor) { + case ' ', '\n', '\r', '\t': + case '\000': + return nil + } + cursor++ +} +``` + +### Checking the existence of fields of struct using Bitmaps + +I found by the profiling result, in the struct decode, lookup process for field was taking a long time. + +For example, consider decoding a string like `{"a":1,"b":2,"c":3}` into the following structure: + +```go +type T struct { + A int `json:"a"` + B int `json:"b"` + C int `json:"c"` +} +``` + +At this time, it was found that it takes a lot of time to acquire the decoding process corresponding to the field from the field name as shown below during the decoding process. + +```go +fieldName := decodeKey(buf, cursor) // "a" or "b" or "c" +decoder, exists := fieldToDecoderMap[fieldName] // so slow +if exists { + decoder(buf, cursor) +} else { + skipValue(buf, cursor) +} +``` + +To improve this process, `json-iterator/go` is optimized so that it can be branched by switch-case when the number of fields in the structure is 10 or less (switch-case is faster than map). However, there is a risk of hash collision because the value hashed by the FNV algorithm is used for conditional branching. Also, `gojay` processes this part at high speed by letting the library user yourself write `switch-case`. + + +`go-json` considers and implements a new approach that is different from these. I call this **bitmap field optimization**. + +The range of values ​​per character can be represented by `[256]byte`. Also, if the number of fields in the structure is 8 or less, `int8` type can represent the state of each field. +In other words, it has the following structure. + +- Base ( 8bit ): `00000000` +- Key "a": `00000001` ( assign key "a" to the first bit ) +- Key "b": `00000010` ( assign key "b" to the second bit ) +- Key "c": `00000100` ( assign key "c" to the third bit ) + +Bitmap structure is the following + +``` + | key index(0) | +------------------------ + 0 | 00000000 | + 1 | 00000000 | +~~ | | +97 (a) | 00000001 | +98 (b) | 00000010 | +99 (c) | 00000100 | +~~ | | +255 | 00000000 | +``` + +You can think of this as a Bitmap with a height of `256` and a width of the maximum string length in the field name. +In other words, it can be represented by the following type . + +```go +[maxFieldKeyLength][256]int8 +``` + +When decoding a field character, check whether the corresponding character exists by referring to the pre-built bitmap like the following. + +```go +var curBit int8 = math.MaxInt8 // 11111111 + +c := char(buf, cursor) +bit := bitmap[keyIdx][c] +curBit &= bit +if curBit == 0 { + // not found field +} +``` + +If `curBit` is not `0` until the end of the field string, then the string is +You may have hit one of the fields. +But the possibility is that if the decoded string is shorter than the field string, you will get a false hit. + +- input: `{"a":1}` +```go +type T struct { + X int `json:"abc"` +} +``` +※ Since `a` is shorter than `abc`, it can decode to the end of the field character without `curBit` being 0. + +Rest assured. In this case, it doesn't matter because you can tell if you hit by comparing the string length of `a` with the string length of `abc`. + +Finally, calculate the position of the bit where `1` is set and get the corresponding value, and you're done. + +Using this technique, field lookups are possible with only bitwise operations and access to slices. + +`go-json` uses a similar technique for fields with 9 or more and 16 or less fields. At this time, Bitmap is constructed as `[maxKeyLen][256]int16` type. + +Currently, this optimization is not performed when the maximum length of the field name is long (specifically, 64 bytes or more) in addition to the limitation of the number of fields from the viewpoint of saving memory usage. + +### Others + +I have done a lot of other optimizations. I will find time to write about them. If you have any questions about what's written here or other optimizations, please visit the `#go-json` channel on `gophers.slack.com` . + +## Reference + +Regarding the story of go-json, there are the following articles in Japanese only. + +- https://speakerdeck.com/goccy/zui-su-falsejsonraiburariwoqiu-mete +- https://engineering.mercari.com/blog/entry/1599563768-081104c850/ + +# Looking for Sponsors + +I'm looking for sponsors this library. This library is being developed as a personal project in my spare time. If you want a quick response or problem resolution when using this library in your project, please register as a [sponsor](https://github.com/sponsors/goccy). I will cooperate as much as possible. Of course, this library is developed as an MIT license, so you can use it freely for free. + +# License + +MIT diff --git a/vendor/github.com/goccy/go-json/color.go b/vendor/github.com/goccy/go-json/color.go new file mode 100644 index 0000000..e80b22b --- /dev/null +++ b/vendor/github.com/goccy/go-json/color.go @@ -0,0 +1,68 @@ +package json + +import ( + "fmt" + + "github.com/goccy/go-json/internal/encoder" +) + +type ( + ColorFormat = encoder.ColorFormat + ColorScheme = encoder.ColorScheme +) + +const escape = "\x1b" + +type colorAttr int + +//nolint:deadcode,varcheck +const ( + fgBlackColor colorAttr = iota + 30 + fgRedColor + fgGreenColor + fgYellowColor + fgBlueColor + fgMagentaColor + fgCyanColor + fgWhiteColor +) + +//nolint:deadcode,varcheck +const ( + fgHiBlackColor colorAttr = iota + 90 + fgHiRedColor + fgHiGreenColor + fgHiYellowColor + fgHiBlueColor + fgHiMagentaColor + fgHiCyanColor + fgHiWhiteColor +) + +func createColorFormat(attr colorAttr) ColorFormat { + return ColorFormat{ + Header: wrapColor(attr), + Footer: resetColor(), + } +} + +func wrapColor(attr colorAttr) string { + return fmt.Sprintf("%s[%dm", escape, attr) +} + +func resetColor() string { + return wrapColor(colorAttr(0)) +} + +var ( + DefaultColorScheme = &ColorScheme{ + Int: createColorFormat(fgHiMagentaColor), + Uint: createColorFormat(fgHiMagentaColor), + Float: createColorFormat(fgHiMagentaColor), + Bool: createColorFormat(fgHiYellowColor), + String: createColorFormat(fgHiGreenColor), + Binary: createColorFormat(fgHiRedColor), + ObjectKey: createColorFormat(fgHiCyanColor), + Null: createColorFormat(fgBlueColor), + } +) diff --git a/vendor/github.com/goccy/go-json/decode.go b/vendor/github.com/goccy/go-json/decode.go new file mode 100644 index 0000000..74c6ac3 --- /dev/null +++ b/vendor/github.com/goccy/go-json/decode.go @@ -0,0 +1,263 @@ +package json + +import ( + "context" + "fmt" + "io" + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/decoder" + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type Decoder struct { + s *decoder.Stream +} + +const ( + nul = '\000' +) + +type emptyInterface struct { + typ *runtime.Type + ptr unsafe.Pointer +} + +func unmarshal(data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { + src := make([]byte, len(data)+1) // append nul byte to the end + copy(src, data) + + header := (*emptyInterface)(unsafe.Pointer(&v)) + + if err := validateType(header.typ, uintptr(header.ptr)); err != nil { + return err + } + dec, err := decoder.CompileToGetDecoder(header.typ) + if err != nil { + return err + } + ctx := decoder.TakeRuntimeContext() + ctx.Buf = src + ctx.Option.Flags = 0 + for _, optFunc := range optFuncs { + optFunc(ctx.Option) + } + cursor, err := dec.Decode(ctx, 0, 0, header.ptr) + if err != nil { + decoder.ReleaseRuntimeContext(ctx) + return err + } + decoder.ReleaseRuntimeContext(ctx) + return validateEndBuf(src, cursor) +} + +func unmarshalContext(ctx context.Context, data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { + src := make([]byte, len(data)+1) // append nul byte to the end + copy(src, data) + + header := (*emptyInterface)(unsafe.Pointer(&v)) + + if err := validateType(header.typ, uintptr(header.ptr)); err != nil { + return err + } + dec, err := decoder.CompileToGetDecoder(header.typ) + if err != nil { + return err + } + rctx := decoder.TakeRuntimeContext() + rctx.Buf = src + rctx.Option.Flags = 0 + rctx.Option.Flags |= decoder.ContextOption + rctx.Option.Context = ctx + for _, optFunc := range optFuncs { + optFunc(rctx.Option) + } + cursor, err := dec.Decode(rctx, 0, 0, header.ptr) + if err != nil { + decoder.ReleaseRuntimeContext(rctx) + return err + } + decoder.ReleaseRuntimeContext(rctx) + return validateEndBuf(src, cursor) +} + +var ( + pathDecoder = decoder.NewPathDecoder() +) + +func extractFromPath(path *Path, data []byte, optFuncs ...DecodeOptionFunc) ([][]byte, error) { + if path.path.RootSelectorOnly { + return [][]byte{data}, nil + } + src := make([]byte, len(data)+1) // append nul byte to the end + copy(src, data) + + ctx := decoder.TakeRuntimeContext() + ctx.Buf = src + ctx.Option.Flags = 0 + ctx.Option.Flags |= decoder.PathOption + ctx.Option.Path = path.path + for _, optFunc := range optFuncs { + optFunc(ctx.Option) + } + paths, cursor, err := pathDecoder.DecodePath(ctx, 0, 0) + if err != nil { + decoder.ReleaseRuntimeContext(ctx) + return nil, err + } + decoder.ReleaseRuntimeContext(ctx) + if err := validateEndBuf(src, cursor); err != nil { + return nil, err + } + return paths, nil +} + +func unmarshalNoEscape(data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { + src := make([]byte, len(data)+1) // append nul byte to the end + copy(src, data) + + header := (*emptyInterface)(unsafe.Pointer(&v)) + + if err := validateType(header.typ, uintptr(header.ptr)); err != nil { + return err + } + dec, err := decoder.CompileToGetDecoder(header.typ) + if err != nil { + return err + } + + ctx := decoder.TakeRuntimeContext() + ctx.Buf = src + ctx.Option.Flags = 0 + for _, optFunc := range optFuncs { + optFunc(ctx.Option) + } + cursor, err := dec.Decode(ctx, 0, 0, noescape(header.ptr)) + if err != nil { + decoder.ReleaseRuntimeContext(ctx) + return err + } + decoder.ReleaseRuntimeContext(ctx) + return validateEndBuf(src, cursor) +} + +func validateEndBuf(src []byte, cursor int64) error { + for { + switch src[cursor] { + case ' ', '\t', '\n', '\r': + cursor++ + continue + case nul: + return nil + } + return errors.ErrSyntax( + fmt.Sprintf("invalid character '%c' after top-level value", src[cursor]), + cursor+1, + ) + } +} + +//nolint:staticcheck +//go:nosplit +func noescape(p unsafe.Pointer) unsafe.Pointer { + x := uintptr(p) + return unsafe.Pointer(x ^ 0) +} + +func validateType(typ *runtime.Type, p uintptr) error { + if typ == nil || typ.Kind() != reflect.Ptr || p == 0 { + return &InvalidUnmarshalError{Type: runtime.RType2Type(typ)} + } + return nil +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may +// read data from r beyond the JSON values requested. +func NewDecoder(r io.Reader) *Decoder { + s := decoder.NewStream(r) + return &Decoder{ + s: s, + } +} + +// Buffered returns a reader of the data remaining in the Decoder's +// buffer. The reader is valid until the next call to Decode. +func (d *Decoder) Buffered() io.Reader { + return d.s.Buffered() +} + +// Decode reads the next JSON-encoded value from its +// input and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about +// the conversion of JSON into a Go value. +func (d *Decoder) Decode(v interface{}) error { + return d.DecodeWithOption(v) +} + +// DecodeContext reads the next JSON-encoded value from its +// input and stores it in the value pointed to by v with context.Context. +func (d *Decoder) DecodeContext(ctx context.Context, v interface{}) error { + d.s.Option.Flags |= decoder.ContextOption + d.s.Option.Context = ctx + return d.DecodeWithOption(v) +} + +func (d *Decoder) DecodeWithOption(v interface{}, optFuncs ...DecodeOptionFunc) error { + header := (*emptyInterface)(unsafe.Pointer(&v)) + typ := header.typ + ptr := uintptr(header.ptr) + typeptr := uintptr(unsafe.Pointer(typ)) + // noescape trick for header.typ ( reflect.*rtype ) + copiedType := *(**runtime.Type)(unsafe.Pointer(&typeptr)) + + if err := validateType(copiedType, ptr); err != nil { + return err + } + + dec, err := decoder.CompileToGetDecoder(typ) + if err != nil { + return err + } + if err := d.s.PrepareForDecode(); err != nil { + return err + } + s := d.s + for _, optFunc := range optFuncs { + optFunc(s.Option) + } + if err := dec.DecodeStream(s, 0, header.ptr); err != nil { + return err + } + s.Reset() + return nil +} + +func (d *Decoder) More() bool { + return d.s.More() +} + +func (d *Decoder) Token() (Token, error) { + return d.s.Token() +} + +// DisallowUnknownFields causes the Decoder to return an error when the destination +// is a struct and the input contains object keys which do not match any +// non-ignored, exported fields in the destination. +func (d *Decoder) DisallowUnknownFields() { + d.s.DisallowUnknownFields = true +} + +func (d *Decoder) InputOffset() int64 { + return d.s.TotalOffset() +} + +// UseNumber causes the Decoder to unmarshal a number into an interface{} as a +// Number instead of as a float64. +func (d *Decoder) UseNumber() { + d.s.UseNumber = true +} diff --git a/vendor/github.com/goccy/go-json/docker-compose.yml b/vendor/github.com/goccy/go-json/docker-compose.yml new file mode 100644 index 0000000..db40c79 --- /dev/null +++ b/vendor/github.com/goccy/go-json/docker-compose.yml @@ -0,0 +1,13 @@ +version: '2' +services: + go-json: + image: golang:1.18 + volumes: + - '.:/go/src/go-json' + deploy: + resources: + limits: + memory: 620M + working_dir: /go/src/go-json + command: | + sh -c "go test -c . && ls go-json.test" diff --git a/vendor/github.com/goccy/go-json/encode.go b/vendor/github.com/goccy/go-json/encode.go new file mode 100644 index 0000000..c517382 --- /dev/null +++ b/vendor/github.com/goccy/go-json/encode.go @@ -0,0 +1,326 @@ +package json + +import ( + "context" + "io" + "os" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/encoder/vm" + "github.com/goccy/go-json/internal/encoder/vm_color" + "github.com/goccy/go-json/internal/encoder/vm_color_indent" + "github.com/goccy/go-json/internal/encoder/vm_indent" +) + +// An Encoder writes JSON values to an output stream. +type Encoder struct { + w io.Writer + enabledIndent bool + enabledHTMLEscape bool + prefix string + indentStr string +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{w: w, enabledHTMLEscape: true} +} + +// Encode writes the JSON encoding of v to the stream, followed by a newline character. +// +// See the documentation for Marshal for details about the conversion of Go values to JSON. +func (e *Encoder) Encode(v interface{}) error { + return e.EncodeWithOption(v) +} + +// EncodeWithOption call Encode with EncodeOption. +func (e *Encoder) EncodeWithOption(v interface{}, optFuncs ...EncodeOptionFunc) error { + ctx := encoder.TakeRuntimeContext() + ctx.Option.Flag = 0 + + err := e.encodeWithOption(ctx, v, optFuncs...) + + encoder.ReleaseRuntimeContext(ctx) + return err +} + +// EncodeContext call Encode with context.Context and EncodeOption. +func (e *Encoder) EncodeContext(ctx context.Context, v interface{}, optFuncs ...EncodeOptionFunc) error { + rctx := encoder.TakeRuntimeContext() + rctx.Option.Flag = 0 + rctx.Option.Flag |= encoder.ContextOption + rctx.Option.Context = ctx + + err := e.encodeWithOption(rctx, v, optFuncs...) //nolint: contextcheck + + encoder.ReleaseRuntimeContext(rctx) + return err +} + +func (e *Encoder) encodeWithOption(ctx *encoder.RuntimeContext, v interface{}, optFuncs ...EncodeOptionFunc) error { + if e.enabledHTMLEscape { + ctx.Option.Flag |= encoder.HTMLEscapeOption + } + ctx.Option.Flag |= encoder.NormalizeUTF8Option + ctx.Option.DebugOut = os.Stdout + for _, optFunc := range optFuncs { + optFunc(ctx.Option) + } + var ( + buf []byte + err error + ) + if e.enabledIndent { + buf, err = encodeIndent(ctx, v, e.prefix, e.indentStr) + } else { + buf, err = encode(ctx, v) + } + if err != nil { + return err + } + if e.enabledIndent { + buf = buf[:len(buf)-2] + } else { + buf = buf[:len(buf)-1] + } + buf = append(buf, '\n') + if _, err := e.w.Write(buf); err != nil { + return err + } + return nil +} + +// SetEscapeHTML specifies whether problematic HTML characters should be escaped inside JSON quoted strings. +// The default behavior is to escape &, <, and > to \u0026, \u003c, and \u003e to avoid certain safety problems that can arise when embedding JSON in HTML. +// +// In non-HTML settings where the escaping interferes with the readability of the output, SetEscapeHTML(false) disables this behavior. +func (e *Encoder) SetEscapeHTML(on bool) { + e.enabledHTMLEscape = on +} + +// SetIndent instructs the encoder to format each subsequent encoded value as if indented by the package-level function Indent(dst, src, prefix, indent). +// Calling SetIndent("", "") disables indentation. +func (e *Encoder) SetIndent(prefix, indent string) { + if prefix == "" && indent == "" { + e.enabledIndent = false + return + } + e.prefix = prefix + e.indentStr = indent + e.enabledIndent = true +} + +func marshalContext(ctx context.Context, v interface{}, optFuncs ...EncodeOptionFunc) ([]byte, error) { + rctx := encoder.TakeRuntimeContext() + rctx.Option.Flag = 0 + rctx.Option.Flag = encoder.HTMLEscapeOption | encoder.NormalizeUTF8Option | encoder.ContextOption + rctx.Option.Context = ctx + for _, optFunc := range optFuncs { + optFunc(rctx.Option) + } + + buf, err := encode(rctx, v) //nolint: contextcheck + if err != nil { + encoder.ReleaseRuntimeContext(rctx) + return nil, err + } + + // this line exists to escape call of `runtime.makeslicecopy` . + // if use `make([]byte, len(buf)-1)` and `copy(copied, buf)`, + // dst buffer size and src buffer size are differrent. + // in this case, compiler uses `runtime.makeslicecopy`, but it is slow. + buf = buf[:len(buf)-1] + copied := make([]byte, len(buf)) + copy(copied, buf) + + encoder.ReleaseRuntimeContext(rctx) + return copied, nil +} + +func marshal(v interface{}, optFuncs ...EncodeOptionFunc) ([]byte, error) { + ctx := encoder.TakeRuntimeContext() + + ctx.Option.Flag = 0 + ctx.Option.Flag |= (encoder.HTMLEscapeOption | encoder.NormalizeUTF8Option) + for _, optFunc := range optFuncs { + optFunc(ctx.Option) + } + + buf, err := encode(ctx, v) + if err != nil { + encoder.ReleaseRuntimeContext(ctx) + return nil, err + } + + // this line exists to escape call of `runtime.makeslicecopy` . + // if use `make([]byte, len(buf)-1)` and `copy(copied, buf)`, + // dst buffer size and src buffer size are differrent. + // in this case, compiler uses `runtime.makeslicecopy`, but it is slow. + buf = buf[:len(buf)-1] + copied := make([]byte, len(buf)) + copy(copied, buf) + + encoder.ReleaseRuntimeContext(ctx) + return copied, nil +} + +func marshalNoEscape(v interface{}) ([]byte, error) { + ctx := encoder.TakeRuntimeContext() + + ctx.Option.Flag = 0 + ctx.Option.Flag |= (encoder.HTMLEscapeOption | encoder.NormalizeUTF8Option) + + buf, err := encodeNoEscape(ctx, v) + if err != nil { + encoder.ReleaseRuntimeContext(ctx) + return nil, err + } + + // this line exists to escape call of `runtime.makeslicecopy` . + // if use `make([]byte, len(buf)-1)` and `copy(copied, buf)`, + // dst buffer size and src buffer size are differrent. + // in this case, compiler uses `runtime.makeslicecopy`, but it is slow. + buf = buf[:len(buf)-1] + copied := make([]byte, len(buf)) + copy(copied, buf) + + encoder.ReleaseRuntimeContext(ctx) + return copied, nil +} + +func marshalIndent(v interface{}, prefix, indent string, optFuncs ...EncodeOptionFunc) ([]byte, error) { + ctx := encoder.TakeRuntimeContext() + + ctx.Option.Flag = 0 + ctx.Option.Flag |= (encoder.HTMLEscapeOption | encoder.NormalizeUTF8Option | encoder.IndentOption) + for _, optFunc := range optFuncs { + optFunc(ctx.Option) + } + + buf, err := encodeIndent(ctx, v, prefix, indent) + if err != nil { + encoder.ReleaseRuntimeContext(ctx) + return nil, err + } + + buf = buf[:len(buf)-2] + copied := make([]byte, len(buf)) + copy(copied, buf) + + encoder.ReleaseRuntimeContext(ctx) + return copied, nil +} + +func encode(ctx *encoder.RuntimeContext, v interface{}) ([]byte, error) { + b := ctx.Buf[:0] + if v == nil { + b = encoder.AppendNull(ctx, b) + b = encoder.AppendComma(ctx, b) + return b, nil + } + header := (*emptyInterface)(unsafe.Pointer(&v)) + typ := header.typ + + typeptr := uintptr(unsafe.Pointer(typ)) + codeSet, err := encoder.CompileToGetCodeSet(ctx, typeptr) + if err != nil { + return nil, err + } + + p := uintptr(header.ptr) + ctx.Init(p, codeSet.CodeLength) + ctx.KeepRefs = append(ctx.KeepRefs, header.ptr) + + buf, err := encodeRunCode(ctx, b, codeSet) + if err != nil { + return nil, err + } + ctx.Buf = buf + return buf, nil +} + +func encodeNoEscape(ctx *encoder.RuntimeContext, v interface{}) ([]byte, error) { + b := ctx.Buf[:0] + if v == nil { + b = encoder.AppendNull(ctx, b) + b = encoder.AppendComma(ctx, b) + return b, nil + } + header := (*emptyInterface)(unsafe.Pointer(&v)) + typ := header.typ + + typeptr := uintptr(unsafe.Pointer(typ)) + codeSet, err := encoder.CompileToGetCodeSet(ctx, typeptr) + if err != nil { + return nil, err + } + + p := uintptr(header.ptr) + ctx.Init(p, codeSet.CodeLength) + buf, err := encodeRunCode(ctx, b, codeSet) + if err != nil { + return nil, err + } + + ctx.Buf = buf + return buf, nil +} + +func encodeIndent(ctx *encoder.RuntimeContext, v interface{}, prefix, indent string) ([]byte, error) { + b := ctx.Buf[:0] + if v == nil { + b = encoder.AppendNull(ctx, b) + b = encoder.AppendCommaIndent(ctx, b) + return b, nil + } + header := (*emptyInterface)(unsafe.Pointer(&v)) + typ := header.typ + + typeptr := uintptr(unsafe.Pointer(typ)) + codeSet, err := encoder.CompileToGetCodeSet(ctx, typeptr) + if err != nil { + return nil, err + } + + p := uintptr(header.ptr) + ctx.Init(p, codeSet.CodeLength) + buf, err := encodeRunIndentCode(ctx, b, codeSet, prefix, indent) + + ctx.KeepRefs = append(ctx.KeepRefs, header.ptr) + + if err != nil { + return nil, err + } + + ctx.Buf = buf + return buf, nil +} + +func encodeRunCode(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + if (ctx.Option.Flag & encoder.DebugOption) != 0 { + if (ctx.Option.Flag & encoder.ColorizeOption) != 0 { + return vm_color.DebugRun(ctx, b, codeSet) + } + return vm.DebugRun(ctx, b, codeSet) + } + if (ctx.Option.Flag & encoder.ColorizeOption) != 0 { + return vm_color.Run(ctx, b, codeSet) + } + return vm.Run(ctx, b, codeSet) +} + +func encodeRunIndentCode(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet, prefix, indent string) ([]byte, error) { + ctx.Prefix = []byte(prefix) + ctx.IndentStr = []byte(indent) + if (ctx.Option.Flag & encoder.DebugOption) != 0 { + if (ctx.Option.Flag & encoder.ColorizeOption) != 0 { + return vm_color_indent.DebugRun(ctx, b, codeSet) + } + return vm_indent.DebugRun(ctx, b, codeSet) + } + if (ctx.Option.Flag & encoder.ColorizeOption) != 0 { + return vm_color_indent.Run(ctx, b, codeSet) + } + return vm_indent.Run(ctx, b, codeSet) +} diff --git a/vendor/github.com/goccy/go-json/error.go b/vendor/github.com/goccy/go-json/error.go new file mode 100644 index 0000000..5b2dcee --- /dev/null +++ b/vendor/github.com/goccy/go-json/error.go @@ -0,0 +1,41 @@ +package json + +import ( + "github.com/goccy/go-json/internal/errors" +) + +// Before Go 1.2, an InvalidUTF8Error was returned by Marshal when +// attempting to encode a string value with invalid UTF-8 sequences. +// As of Go 1.2, Marshal instead coerces the string to valid UTF-8 by +// replacing invalid bytes with the Unicode replacement rune U+FFFD. +// +// Deprecated: No longer used; kept for compatibility. +type InvalidUTF8Error = errors.InvalidUTF8Error + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError = errors.InvalidUnmarshalError + +// A MarshalerError represents an error from calling a MarshalJSON or MarshalText method. +type MarshalerError = errors.MarshalerError + +// A SyntaxError is a description of a JSON syntax error. +type SyntaxError = errors.SyntaxError + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// +// Deprecated: No longer used; kept for compatibility. +type UnmarshalFieldError = errors.UnmarshalFieldError + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError = errors.UnmarshalTypeError + +// An UnsupportedTypeError is returned by Marshal when attempting +// to encode an unsupported value type. +type UnsupportedTypeError = errors.UnsupportedTypeError + +type UnsupportedValueError = errors.UnsupportedValueError + +type PathError = errors.PathError diff --git a/vendor/github.com/goccy/go-json/internal/decoder/anonymous_field.go b/vendor/github.com/goccy/go-json/internal/decoder/anonymous_field.go new file mode 100644 index 0000000..b6876cf --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/anonymous_field.go @@ -0,0 +1,41 @@ +package decoder + +import ( + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +type anonymousFieldDecoder struct { + structType *runtime.Type + offset uintptr + dec Decoder +} + +func newAnonymousFieldDecoder(structType *runtime.Type, offset uintptr, dec Decoder) *anonymousFieldDecoder { + return &anonymousFieldDecoder{ + structType: structType, + offset: offset, + dec: dec, + } +} + +func (d *anonymousFieldDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + if *(*unsafe.Pointer)(p) == nil { + *(*unsafe.Pointer)(p) = unsafe_New(d.structType) + } + p = *(*unsafe.Pointer)(p) + return d.dec.DecodeStream(s, depth, unsafe.Pointer(uintptr(p)+d.offset)) +} + +func (d *anonymousFieldDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + if *(*unsafe.Pointer)(p) == nil { + *(*unsafe.Pointer)(p) = unsafe_New(d.structType) + } + p = *(*unsafe.Pointer)(p) + return d.dec.Decode(ctx, cursor, depth, unsafe.Pointer(uintptr(p)+d.offset)) +} + +func (d *anonymousFieldDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return d.dec.DecodePath(ctx, cursor, depth) +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/array.go b/vendor/github.com/goccy/go-json/internal/decoder/array.go new file mode 100644 index 0000000..4b23ed4 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/array.go @@ -0,0 +1,176 @@ +package decoder + +import ( + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type arrayDecoder struct { + elemType *runtime.Type + size uintptr + valueDecoder Decoder + alen int + structName string + fieldName string + zeroValue unsafe.Pointer +} + +func newArrayDecoder(dec Decoder, elemType *runtime.Type, alen int, structName, fieldName string) *arrayDecoder { + // workaround to avoid checkptr errors. cannot use `*(*unsafe.Pointer)(unsafe_New(elemType))` directly. + zeroValuePtr := unsafe_New(elemType) + zeroValue := **(**unsafe.Pointer)(unsafe.Pointer(&zeroValuePtr)) + return &arrayDecoder{ + valueDecoder: dec, + elemType: elemType, + size: elemType.Size(), + alen: alen, + structName: structName, + fieldName: fieldName, + zeroValue: zeroValue, + } +} + +func (d *arrayDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + + for { + switch s.char() { + case ' ', '\n', '\t', '\r': + case 'n': + if err := nullBytes(s); err != nil { + return err + } + return nil + case '[': + idx := 0 + s.cursor++ + if s.skipWhiteSpace() == ']' { + for idx < d.alen { + *(*unsafe.Pointer)(unsafe.Pointer(uintptr(p) + uintptr(idx)*d.size)) = d.zeroValue + idx++ + } + s.cursor++ + return nil + } + for { + if idx < d.alen { + if err := d.valueDecoder.DecodeStream(s, depth, unsafe.Pointer(uintptr(p)+uintptr(idx)*d.size)); err != nil { + return err + } + } else { + if err := s.skipValue(depth); err != nil { + return err + } + } + idx++ + switch s.skipWhiteSpace() { + case ']': + for idx < d.alen { + *(*unsafe.Pointer)(unsafe.Pointer(uintptr(p) + uintptr(idx)*d.size)) = d.zeroValue + idx++ + } + s.cursor++ + return nil + case ',': + s.cursor++ + continue + case nul: + if s.read() { + s.cursor++ + continue + } + goto ERROR + default: + goto ERROR + } + } + case nul: + if s.read() { + continue + } + goto ERROR + default: + goto ERROR + } + s.cursor++ + } +ERROR: + return errors.ErrUnexpectedEndOfJSON("array", s.totalOffset()) +} + +func (d *arrayDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + + for { + switch buf[cursor] { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case 'n': + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + return cursor, nil + case '[': + idx := 0 + cursor++ + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == ']' { + for idx < d.alen { + *(*unsafe.Pointer)(unsafe.Pointer(uintptr(p) + uintptr(idx)*d.size)) = d.zeroValue + idx++ + } + cursor++ + return cursor, nil + } + for { + if idx < d.alen { + c, err := d.valueDecoder.Decode(ctx, cursor, depth, unsafe.Pointer(uintptr(p)+uintptr(idx)*d.size)) + if err != nil { + return 0, err + } + cursor = c + } else { + c, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + cursor = c + } + idx++ + cursor = skipWhiteSpace(buf, cursor) + switch buf[cursor] { + case ']': + for idx < d.alen { + *(*unsafe.Pointer)(unsafe.Pointer(uintptr(p) + uintptr(idx)*d.size)) = d.zeroValue + idx++ + } + cursor++ + return cursor, nil + case ',': + cursor++ + continue + default: + return 0, errors.ErrInvalidCharacter(buf[cursor], "array", cursor) + } + } + default: + return 0, errors.ErrUnexpectedEndOfJSON("array", cursor) + } + } +} + +func (d *arrayDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: array decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/assign.go b/vendor/github.com/goccy/go-json/internal/decoder/assign.go new file mode 100644 index 0000000..c53e6ad --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/assign.go @@ -0,0 +1,438 @@ +package decoder + +import ( + "fmt" + "reflect" + "strconv" +) + +var ( + nilValue = reflect.ValueOf(nil) +) + +func AssignValue(src, dst reflect.Value) error { + if dst.Type().Kind() != reflect.Ptr { + return fmt.Errorf("invalid dst type. required pointer type: %T", dst.Type()) + } + casted, err := castValue(dst.Elem().Type(), src) + if err != nil { + return err + } + dst.Elem().Set(casted) + return nil +} + +func castValue(t reflect.Type, v reflect.Value) (reflect.Value, error) { + switch t.Kind() { + case reflect.Int: + vv, err := castInt(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(int(vv.Int())), nil + case reflect.Int8: + vv, err := castInt(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(int8(vv.Int())), nil + case reflect.Int16: + vv, err := castInt(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(int16(vv.Int())), nil + case reflect.Int32: + vv, err := castInt(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(int32(vv.Int())), nil + case reflect.Int64: + return castInt(v) + case reflect.Uint: + vv, err := castUint(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(uint(vv.Uint())), nil + case reflect.Uint8: + vv, err := castUint(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(uint8(vv.Uint())), nil + case reflect.Uint16: + vv, err := castUint(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(uint16(vv.Uint())), nil + case reflect.Uint32: + vv, err := castUint(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(uint32(vv.Uint())), nil + case reflect.Uint64: + return castUint(v) + case reflect.Uintptr: + vv, err := castUint(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(uintptr(vv.Uint())), nil + case reflect.String: + return castString(v) + case reflect.Bool: + return castBool(v) + case reflect.Float32: + vv, err := castFloat(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(float32(vv.Float())), nil + case reflect.Float64: + return castFloat(v) + case reflect.Array: + return castArray(t, v) + case reflect.Slice: + return castSlice(t, v) + case reflect.Map: + return castMap(t, v) + case reflect.Struct: + return castStruct(t, v) + } + return v, nil +} + +func castInt(v reflect.Value) (reflect.Value, error) { + switch v.Type().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return reflect.ValueOf(int64(v.Uint())), nil + case reflect.String: + i64, err := strconv.ParseInt(v.String(), 10, 64) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(i64), nil + case reflect.Bool: + if v.Bool() { + return reflect.ValueOf(int64(1)), nil + } + return reflect.ValueOf(int64(0)), nil + case reflect.Float32, reflect.Float64: + return reflect.ValueOf(int64(v.Float())), nil + case reflect.Array: + if v.Len() > 0 { + return castInt(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to int64 from empty array") + case reflect.Slice: + if v.Len() > 0 { + return castInt(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to int64 from empty slice") + case reflect.Interface: + return castInt(reflect.ValueOf(v.Interface())) + case reflect.Map: + return nilValue, fmt.Errorf("failed to cast to int64 from map") + case reflect.Struct: + return nilValue, fmt.Errorf("failed to cast to int64 from struct") + case reflect.Ptr: + return castInt(v.Elem()) + } + return nilValue, fmt.Errorf("failed to cast to int64 from %s", v.Type().Kind()) +} + +func castUint(v reflect.Value) (reflect.Value, error) { + switch v.Type().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return reflect.ValueOf(uint64(v.Int())), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v, nil + case reflect.String: + u64, err := strconv.ParseUint(v.String(), 10, 64) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(u64), nil + case reflect.Bool: + if v.Bool() { + return reflect.ValueOf(uint64(1)), nil + } + return reflect.ValueOf(uint64(0)), nil + case reflect.Float32, reflect.Float64: + return reflect.ValueOf(uint64(v.Float())), nil + case reflect.Array: + if v.Len() > 0 { + return castUint(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to uint64 from empty array") + case reflect.Slice: + if v.Len() > 0 { + return castUint(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to uint64 from empty slice") + case reflect.Interface: + return castUint(reflect.ValueOf(v.Interface())) + case reflect.Map: + return nilValue, fmt.Errorf("failed to cast to uint64 from map") + case reflect.Struct: + return nilValue, fmt.Errorf("failed to cast to uint64 from struct") + case reflect.Ptr: + return castUint(v.Elem()) + } + return nilValue, fmt.Errorf("failed to cast to uint64 from %s", v.Type().Kind()) +} + +func castString(v reflect.Value) (reflect.Value, error) { + switch v.Type().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return reflect.ValueOf(fmt.Sprint(v.Int())), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return reflect.ValueOf(fmt.Sprint(v.Uint())), nil + case reflect.String: + return v, nil + case reflect.Bool: + if v.Bool() { + return reflect.ValueOf("true"), nil + } + return reflect.ValueOf("false"), nil + case reflect.Float32, reflect.Float64: + return reflect.ValueOf(fmt.Sprint(v.Float())), nil + case reflect.Array: + if v.Len() > 0 { + return castString(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to string from empty array") + case reflect.Slice: + if v.Len() > 0 { + return castString(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to string from empty slice") + case reflect.Interface: + return castString(reflect.ValueOf(v.Interface())) + case reflect.Map: + return nilValue, fmt.Errorf("failed to cast to string from map") + case reflect.Struct: + return nilValue, fmt.Errorf("failed to cast to string from struct") + case reflect.Ptr: + return castString(v.Elem()) + } + return nilValue, fmt.Errorf("failed to cast to string from %s", v.Type().Kind()) +} + +func castBool(v reflect.Value) (reflect.Value, error) { + switch v.Type().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch v.Int() { + case 0: + return reflect.ValueOf(false), nil + case 1: + return reflect.ValueOf(true), nil + } + return nilValue, fmt.Errorf("failed to cast to bool from %d", v.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch v.Uint() { + case 0: + return reflect.ValueOf(false), nil + case 1: + return reflect.ValueOf(true), nil + } + return nilValue, fmt.Errorf("failed to cast to bool from %d", v.Uint()) + case reflect.String: + b, err := strconv.ParseBool(v.String()) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(b), nil + case reflect.Bool: + return v, nil + case reflect.Float32, reflect.Float64: + switch v.Float() { + case 0: + return reflect.ValueOf(false), nil + case 1: + return reflect.ValueOf(true), nil + } + return nilValue, fmt.Errorf("failed to cast to bool from %f", v.Float()) + case reflect.Array: + if v.Len() > 0 { + return castBool(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to string from empty array") + case reflect.Slice: + if v.Len() > 0 { + return castBool(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to string from empty slice") + case reflect.Interface: + return castBool(reflect.ValueOf(v.Interface())) + case reflect.Map: + return nilValue, fmt.Errorf("failed to cast to string from map") + case reflect.Struct: + return nilValue, fmt.Errorf("failed to cast to string from struct") + case reflect.Ptr: + return castBool(v.Elem()) + } + return nilValue, fmt.Errorf("failed to cast to bool from %s", v.Type().Kind()) +} + +func castFloat(v reflect.Value) (reflect.Value, error) { + switch v.Type().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return reflect.ValueOf(float64(v.Int())), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return reflect.ValueOf(float64(v.Uint())), nil + case reflect.String: + f64, err := strconv.ParseFloat(v.String(), 64) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(f64), nil + case reflect.Bool: + if v.Bool() { + return reflect.ValueOf(float64(1)), nil + } + return reflect.ValueOf(float64(0)), nil + case reflect.Float32, reflect.Float64: + return v, nil + case reflect.Array: + if v.Len() > 0 { + return castFloat(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to float64 from empty array") + case reflect.Slice: + if v.Len() > 0 { + return castFloat(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to float64 from empty slice") + case reflect.Interface: + return castFloat(reflect.ValueOf(v.Interface())) + case reflect.Map: + return nilValue, fmt.Errorf("failed to cast to float64 from map") + case reflect.Struct: + return nilValue, fmt.Errorf("failed to cast to float64 from struct") + case reflect.Ptr: + return castFloat(v.Elem()) + } + return nilValue, fmt.Errorf("failed to cast to float64 from %s", v.Type().Kind()) +} + +func castArray(t reflect.Type, v reflect.Value) (reflect.Value, error) { + kind := v.Type().Kind() + if kind == reflect.Interface { + return castArray(t, reflect.ValueOf(v.Interface())) + } + if kind != reflect.Slice && kind != reflect.Array { + return nilValue, fmt.Errorf("failed to cast to array from %s", kind) + } + if t.Elem() == v.Type().Elem() { + return v, nil + } + if t.Len() != v.Len() { + return nilValue, fmt.Errorf("failed to cast [%d]array from slice of %d length", t.Len(), v.Len()) + } + ret := reflect.New(t).Elem() + for i := 0; i < v.Len(); i++ { + vv, err := castValue(t.Elem(), v.Index(i)) + if err != nil { + return nilValue, err + } + ret.Index(i).Set(vv) + } + return ret, nil +} + +func castSlice(t reflect.Type, v reflect.Value) (reflect.Value, error) { + kind := v.Type().Kind() + if kind == reflect.Interface { + return castSlice(t, reflect.ValueOf(v.Interface())) + } + if kind != reflect.Slice && kind != reflect.Array { + return nilValue, fmt.Errorf("failed to cast to slice from %s", kind) + } + if t.Elem() == v.Type().Elem() { + return v, nil + } + ret := reflect.MakeSlice(t, v.Len(), v.Len()) + for i := 0; i < v.Len(); i++ { + vv, err := castValue(t.Elem(), v.Index(i)) + if err != nil { + return nilValue, err + } + ret.Index(i).Set(vv) + } + return ret, nil +} + +func castMap(t reflect.Type, v reflect.Value) (reflect.Value, error) { + ret := reflect.MakeMap(t) + switch v.Type().Kind() { + case reflect.Map: + iter := v.MapRange() + for iter.Next() { + key, err := castValue(t.Key(), iter.Key()) + if err != nil { + return nilValue, err + } + value, err := castValue(t.Elem(), iter.Value()) + if err != nil { + return nilValue, err + } + ret.SetMapIndex(key, value) + } + return ret, nil + case reflect.Interface: + return castMap(t, reflect.ValueOf(v.Interface())) + case reflect.Slice: + if v.Len() > 0 { + return castMap(t, v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to map from empty slice") + } + return nilValue, fmt.Errorf("failed to cast to map from %s", v.Type().Kind()) +} + +func castStruct(t reflect.Type, v reflect.Value) (reflect.Value, error) { + ret := reflect.New(t).Elem() + switch v.Type().Kind() { + case reflect.Map: + iter := v.MapRange() + for iter.Next() { + key := iter.Key() + k, err := castString(key) + if err != nil { + return nilValue, err + } + fieldName := k.String() + field, ok := t.FieldByName(fieldName) + if ok { + value, err := castValue(field.Type, iter.Value()) + if err != nil { + return nilValue, err + } + ret.FieldByName(fieldName).Set(value) + } + } + return ret, nil + case reflect.Struct: + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + ret.FieldByName(name).Set(v.FieldByName(name)) + } + return ret, nil + case reflect.Interface: + return castStruct(t, reflect.ValueOf(v.Interface())) + case reflect.Slice: + if v.Len() > 0 { + return castStruct(t, v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to struct from empty slice") + default: + return nilValue, fmt.Errorf("failed to cast to struct from %s", v.Type().Kind()) + } +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/bool.go b/vendor/github.com/goccy/go-json/internal/decoder/bool.go new file mode 100644 index 0000000..ba6cf5b --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/bool.go @@ -0,0 +1,83 @@ +package decoder + +import ( + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +type boolDecoder struct { + structName string + fieldName string +} + +func newBoolDecoder(structName, fieldName string) *boolDecoder { + return &boolDecoder{structName: structName, fieldName: fieldName} +} + +func (d *boolDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + c := s.skipWhiteSpace() + for { + switch c { + case 't': + if err := trueBytes(s); err != nil { + return err + } + **(**bool)(unsafe.Pointer(&p)) = true + return nil + case 'f': + if err := falseBytes(s); err != nil { + return err + } + **(**bool)(unsafe.Pointer(&p)) = false + return nil + case 'n': + if err := nullBytes(s); err != nil { + return err + } + return nil + case nul: + if s.read() { + c = s.char() + continue + } + goto ERROR + } + break + } +ERROR: + return errors.ErrUnexpectedEndOfJSON("bool", s.totalOffset()) +} + +func (d *boolDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + switch buf[cursor] { + case 't': + if err := validateTrue(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + **(**bool)(unsafe.Pointer(&p)) = true + return cursor, nil + case 'f': + if err := validateFalse(buf, cursor); err != nil { + return 0, err + } + cursor += 5 + **(**bool)(unsafe.Pointer(&p)) = false + return cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + return cursor, nil + } + return 0, errors.ErrUnexpectedEndOfJSON("bool", cursor) +} + +func (d *boolDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: bool decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/bytes.go b/vendor/github.com/goccy/go-json/internal/decoder/bytes.go new file mode 100644 index 0000000..939bf43 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/bytes.go @@ -0,0 +1,118 @@ +package decoder + +import ( + "encoding/base64" + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type bytesDecoder struct { + typ *runtime.Type + sliceDecoder Decoder + stringDecoder *stringDecoder + structName string + fieldName string +} + +func byteUnmarshalerSliceDecoder(typ *runtime.Type, structName string, fieldName string) Decoder { + var unmarshalDecoder Decoder + switch { + case runtime.PtrTo(typ).Implements(unmarshalJSONType): + unmarshalDecoder = newUnmarshalJSONDecoder(runtime.PtrTo(typ), structName, fieldName) + case runtime.PtrTo(typ).Implements(unmarshalTextType): + unmarshalDecoder = newUnmarshalTextDecoder(runtime.PtrTo(typ), structName, fieldName) + default: + unmarshalDecoder, _ = compileUint8(typ, structName, fieldName) + } + return newSliceDecoder(unmarshalDecoder, typ, 1, structName, fieldName) +} + +func newBytesDecoder(typ *runtime.Type, structName string, fieldName string) *bytesDecoder { + return &bytesDecoder{ + typ: typ, + sliceDecoder: byteUnmarshalerSliceDecoder(typ, structName, fieldName), + stringDecoder: newStringDecoder(structName, fieldName), + structName: structName, + fieldName: fieldName, + } +} + +func (d *bytesDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + bytes, err := d.decodeStreamBinary(s, depth, p) + if err != nil { + return err + } + if bytes == nil { + s.reset() + return nil + } + decodedLen := base64.StdEncoding.DecodedLen(len(bytes)) + buf := make([]byte, decodedLen) + n, err := base64.StdEncoding.Decode(buf, bytes) + if err != nil { + return err + } + *(*[]byte)(p) = buf[:n] + s.reset() + return nil +} + +func (d *bytesDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + bytes, c, err := d.decodeBinary(ctx, cursor, depth, p) + if err != nil { + return 0, err + } + if bytes == nil { + return c, nil + } + cursor = c + decodedLen := base64.StdEncoding.DecodedLen(len(bytes)) + b := make([]byte, decodedLen) + n, err := base64.StdEncoding.Decode(b, bytes) + if err != nil { + return 0, err + } + *(*[]byte)(p) = b[:n] + return cursor, nil +} + +func (d *bytesDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: []byte decoder does not support decode path") +} + +func (d *bytesDecoder) decodeStreamBinary(s *Stream, depth int64, p unsafe.Pointer) ([]byte, error) { + c := s.skipWhiteSpace() + if c == '[' { + if d.sliceDecoder == nil { + return nil, &errors.UnmarshalTypeError{ + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + } + err := d.sliceDecoder.DecodeStream(s, depth, p) + return nil, err + } + return d.stringDecoder.decodeStreamByte(s) +} + +func (d *bytesDecoder) decodeBinary(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) ([]byte, int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == '[' { + if d.sliceDecoder == nil { + return nil, 0, &errors.UnmarshalTypeError{ + Type: runtime.RType2Type(d.typ), + Offset: cursor, + } + } + c, err := d.sliceDecoder.Decode(ctx, cursor, depth, p) + if err != nil { + return nil, 0, err + } + return nil, c, nil + } + return d.stringDecoder.decodeByte(buf, cursor) +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile.go b/vendor/github.com/goccy/go-json/internal/decoder/compile.go new file mode 100644 index 0000000..fab6437 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/compile.go @@ -0,0 +1,487 @@ +package decoder + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "sync/atomic" + "unicode" + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +var ( + jsonNumberType = reflect.TypeOf(json.Number("")) + typeAddr *runtime.TypeAddr + cachedDecoderMap unsafe.Pointer // map[uintptr]decoder + cachedDecoder []Decoder +) + +func init() { + typeAddr = runtime.AnalyzeTypeAddr() + if typeAddr == nil { + typeAddr = &runtime.TypeAddr{} + } + cachedDecoder = make([]Decoder, typeAddr.AddrRange>>typeAddr.AddrShift+1) +} + +func loadDecoderMap() map[uintptr]Decoder { + p := atomic.LoadPointer(&cachedDecoderMap) + return *(*map[uintptr]Decoder)(unsafe.Pointer(&p)) +} + +func storeDecoder(typ uintptr, dec Decoder, m map[uintptr]Decoder) { + newDecoderMap := make(map[uintptr]Decoder, len(m)+1) + newDecoderMap[typ] = dec + + for k, v := range m { + newDecoderMap[k] = v + } + + atomic.StorePointer(&cachedDecoderMap, *(*unsafe.Pointer)(unsafe.Pointer(&newDecoderMap))) +} + +func compileToGetDecoderSlowPath(typeptr uintptr, typ *runtime.Type) (Decoder, error) { + decoderMap := loadDecoderMap() + if dec, exists := decoderMap[typeptr]; exists { + return dec, nil + } + + dec, err := compileHead(typ, map[uintptr]Decoder{}) + if err != nil { + return nil, err + } + storeDecoder(typeptr, dec, decoderMap) + return dec, nil +} + +func compileHead(typ *runtime.Type, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + switch { + case implementsUnmarshalJSONType(runtime.PtrTo(typ)): + return newUnmarshalJSONDecoder(runtime.PtrTo(typ), "", ""), nil + case runtime.PtrTo(typ).Implements(unmarshalTextType): + return newUnmarshalTextDecoder(runtime.PtrTo(typ), "", ""), nil + } + return compile(typ.Elem(), "", "", structTypeToDecoder) +} + +func compile(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + switch { + case implementsUnmarshalJSONType(runtime.PtrTo(typ)): + return newUnmarshalJSONDecoder(runtime.PtrTo(typ), structName, fieldName), nil + case runtime.PtrTo(typ).Implements(unmarshalTextType): + return newUnmarshalTextDecoder(runtime.PtrTo(typ), structName, fieldName), nil + } + + switch typ.Kind() { + case reflect.Ptr: + return compilePtr(typ, structName, fieldName, structTypeToDecoder) + case reflect.Struct: + return compileStruct(typ, structName, fieldName, structTypeToDecoder) + case reflect.Slice: + elem := typ.Elem() + if elem.Kind() == reflect.Uint8 { + return compileBytes(elem, structName, fieldName) + } + return compileSlice(typ, structName, fieldName, structTypeToDecoder) + case reflect.Array: + return compileArray(typ, structName, fieldName, structTypeToDecoder) + case reflect.Map: + return compileMap(typ, structName, fieldName, structTypeToDecoder) + case reflect.Interface: + return compileInterface(typ, structName, fieldName) + case reflect.Uintptr: + return compileUint(typ, structName, fieldName) + case reflect.Int: + return compileInt(typ, structName, fieldName) + case reflect.Int8: + return compileInt8(typ, structName, fieldName) + case reflect.Int16: + return compileInt16(typ, structName, fieldName) + case reflect.Int32: + return compileInt32(typ, structName, fieldName) + case reflect.Int64: + return compileInt64(typ, structName, fieldName) + case reflect.Uint: + return compileUint(typ, structName, fieldName) + case reflect.Uint8: + return compileUint8(typ, structName, fieldName) + case reflect.Uint16: + return compileUint16(typ, structName, fieldName) + case reflect.Uint32: + return compileUint32(typ, structName, fieldName) + case reflect.Uint64: + return compileUint64(typ, structName, fieldName) + case reflect.String: + return compileString(typ, structName, fieldName) + case reflect.Bool: + return compileBool(structName, fieldName) + case reflect.Float32: + return compileFloat32(structName, fieldName) + case reflect.Float64: + return compileFloat64(structName, fieldName) + case reflect.Func: + return compileFunc(typ, structName, fieldName) + } + return newInvalidDecoder(typ, structName, fieldName), nil +} + +func isStringTagSupportedType(typ *runtime.Type) bool { + switch { + case implementsUnmarshalJSONType(runtime.PtrTo(typ)): + return false + case runtime.PtrTo(typ).Implements(unmarshalTextType): + return false + } + switch typ.Kind() { + case reflect.Map: + return false + case reflect.Slice: + return false + case reflect.Array: + return false + case reflect.Struct: + return false + case reflect.Interface: + return false + } + return true +} + +func compileMapKey(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + if runtime.PtrTo(typ).Implements(unmarshalTextType) { + return newUnmarshalTextDecoder(runtime.PtrTo(typ), structName, fieldName), nil + } + if typ.Kind() == reflect.String { + return newStringDecoder(structName, fieldName), nil + } + dec, err := compile(typ, structName, fieldName, structTypeToDecoder) + if err != nil { + return nil, err + } + for { + switch t := dec.(type) { + case *stringDecoder, *interfaceDecoder: + return dec, nil + case *boolDecoder, *intDecoder, *uintDecoder, *numberDecoder: + return newWrappedStringDecoder(typ, dec, structName, fieldName), nil + case *ptrDecoder: + dec = t.dec + default: + return newInvalidDecoder(typ, structName, fieldName), nil + } + } +} + +func compilePtr(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + dec, err := compile(typ.Elem(), structName, fieldName, structTypeToDecoder) + if err != nil { + return nil, err + } + return newPtrDecoder(dec, typ.Elem(), structName, fieldName), nil +} + +func compileInt(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newIntDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v int64) { + *(*int)(p) = int(v) + }), nil +} + +func compileInt8(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newIntDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v int64) { + *(*int8)(p) = int8(v) + }), nil +} + +func compileInt16(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newIntDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v int64) { + *(*int16)(p) = int16(v) + }), nil +} + +func compileInt32(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newIntDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v int64) { + *(*int32)(p) = int32(v) + }), nil +} + +func compileInt64(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newIntDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v int64) { + *(*int64)(p) = v + }), nil +} + +func compileUint(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newUintDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v uint64) { + *(*uint)(p) = uint(v) + }), nil +} + +func compileUint8(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newUintDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v uint64) { + *(*uint8)(p) = uint8(v) + }), nil +} + +func compileUint16(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newUintDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v uint64) { + *(*uint16)(p) = uint16(v) + }), nil +} + +func compileUint32(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newUintDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v uint64) { + *(*uint32)(p) = uint32(v) + }), nil +} + +func compileUint64(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newUintDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v uint64) { + *(*uint64)(p) = v + }), nil +} + +func compileFloat32(structName, fieldName string) (Decoder, error) { + return newFloatDecoder(structName, fieldName, func(p unsafe.Pointer, v float64) { + *(*float32)(p) = float32(v) + }), nil +} + +func compileFloat64(structName, fieldName string) (Decoder, error) { + return newFloatDecoder(structName, fieldName, func(p unsafe.Pointer, v float64) { + *(*float64)(p) = v + }), nil +} + +func compileString(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + if typ == runtime.Type2RType(jsonNumberType) { + return newNumberDecoder(structName, fieldName, func(p unsafe.Pointer, v json.Number) { + *(*json.Number)(p) = v + }), nil + } + return newStringDecoder(structName, fieldName), nil +} + +func compileBool(structName, fieldName string) (Decoder, error) { + return newBoolDecoder(structName, fieldName), nil +} + +func compileBytes(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newBytesDecoder(typ, structName, fieldName), nil +} + +func compileSlice(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + elem := typ.Elem() + decoder, err := compile(elem, structName, fieldName, structTypeToDecoder) + if err != nil { + return nil, err + } + return newSliceDecoder(decoder, elem, elem.Size(), structName, fieldName), nil +} + +func compileArray(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + elem := typ.Elem() + decoder, err := compile(elem, structName, fieldName, structTypeToDecoder) + if err != nil { + return nil, err + } + return newArrayDecoder(decoder, elem, typ.Len(), structName, fieldName), nil +} + +func compileMap(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + keyDec, err := compileMapKey(typ.Key(), structName, fieldName, structTypeToDecoder) + if err != nil { + return nil, err + } + valueDec, err := compile(typ.Elem(), structName, fieldName, structTypeToDecoder) + if err != nil { + return nil, err + } + return newMapDecoder(typ, typ.Key(), keyDec, typ.Elem(), valueDec, structName, fieldName), nil +} + +func compileInterface(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newInterfaceDecoder(typ, structName, fieldName), nil +} + +func compileFunc(typ *runtime.Type, strutName, fieldName string) (Decoder, error) { + return newFuncDecoder(typ, strutName, fieldName), nil +} + +func typeToStructTags(typ *runtime.Type) runtime.StructTags { + tags := runtime.StructTags{} + fieldNum := typ.NumField() + for i := 0; i < fieldNum; i++ { + field := typ.Field(i) + if runtime.IsIgnoredStructField(field) { + continue + } + tags = append(tags, runtime.StructTagFromField(field)) + } + return tags +} + +func compileStruct(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + fieldNum := typ.NumField() + fieldMap := map[string]*structFieldSet{} + typeptr := uintptr(unsafe.Pointer(typ)) + if dec, exists := structTypeToDecoder[typeptr]; exists { + return dec, nil + } + structDec := newStructDecoder(structName, fieldName, fieldMap) + structTypeToDecoder[typeptr] = structDec + structName = typ.Name() + tags := typeToStructTags(typ) + allFields := []*structFieldSet{} + for i := 0; i < fieldNum; i++ { + field := typ.Field(i) + if runtime.IsIgnoredStructField(field) { + continue + } + isUnexportedField := unicode.IsLower([]rune(field.Name)[0]) + tag := runtime.StructTagFromField(field) + dec, err := compile(runtime.Type2RType(field.Type), structName, field.Name, structTypeToDecoder) + if err != nil { + return nil, err + } + if field.Anonymous && !tag.IsTaggedKey { + if stDec, ok := dec.(*structDecoder); ok { + if runtime.Type2RType(field.Type) == typ { + // recursive definition + continue + } + for k, v := range stDec.fieldMap { + if tags.ExistsKey(k) { + continue + } + fieldSet := &structFieldSet{ + dec: v.dec, + offset: field.Offset + v.offset, + isTaggedKey: v.isTaggedKey, + key: k, + keyLen: int64(len(k)), + } + allFields = append(allFields, fieldSet) + } + } else if pdec, ok := dec.(*ptrDecoder); ok { + contentDec := pdec.contentDecoder() + if pdec.typ == typ { + // recursive definition + continue + } + var fieldSetErr error + if isUnexportedField { + fieldSetErr = fmt.Errorf( + "json: cannot set embedded pointer to unexported struct: %v", + field.Type.Elem(), + ) + } + if dec, ok := contentDec.(*structDecoder); ok { + for k, v := range dec.fieldMap { + if tags.ExistsKey(k) { + continue + } + fieldSet := &structFieldSet{ + dec: newAnonymousFieldDecoder(pdec.typ, v.offset, v.dec), + offset: field.Offset, + isTaggedKey: v.isTaggedKey, + key: k, + keyLen: int64(len(k)), + err: fieldSetErr, + } + allFields = append(allFields, fieldSet) + } + } else { + fieldSet := &structFieldSet{ + dec: pdec, + offset: field.Offset, + isTaggedKey: tag.IsTaggedKey, + key: field.Name, + keyLen: int64(len(field.Name)), + } + allFields = append(allFields, fieldSet) + } + } else { + fieldSet := &structFieldSet{ + dec: dec, + offset: field.Offset, + isTaggedKey: tag.IsTaggedKey, + key: field.Name, + keyLen: int64(len(field.Name)), + } + allFields = append(allFields, fieldSet) + } + } else { + if tag.IsString && isStringTagSupportedType(runtime.Type2RType(field.Type)) { + dec = newWrappedStringDecoder(runtime.Type2RType(field.Type), dec, structName, field.Name) + } + var key string + if tag.Key != "" { + key = tag.Key + } else { + key = field.Name + } + fieldSet := &structFieldSet{ + dec: dec, + offset: field.Offset, + isTaggedKey: tag.IsTaggedKey, + key: key, + keyLen: int64(len(key)), + } + allFields = append(allFields, fieldSet) + } + } + for _, set := range filterDuplicatedFields(allFields) { + fieldMap[set.key] = set + lower := strings.ToLower(set.key) + if _, exists := fieldMap[lower]; !exists { + // first win + fieldMap[lower] = set + } + } + delete(structTypeToDecoder, typeptr) + structDec.tryOptimize() + return structDec, nil +} + +func filterDuplicatedFields(allFields []*structFieldSet) []*structFieldSet { + fieldMap := map[string][]*structFieldSet{} + for _, field := range allFields { + fieldMap[field.key] = append(fieldMap[field.key], field) + } + duplicatedFieldMap := map[string]struct{}{} + for k, sets := range fieldMap { + sets = filterFieldSets(sets) + if len(sets) != 1 { + duplicatedFieldMap[k] = struct{}{} + } + } + + filtered := make([]*structFieldSet, 0, len(allFields)) + for _, field := range allFields { + if _, exists := duplicatedFieldMap[field.key]; exists { + continue + } + filtered = append(filtered, field) + } + return filtered +} + +func filterFieldSets(sets []*structFieldSet) []*structFieldSet { + if len(sets) == 1 { + return sets + } + filtered := make([]*structFieldSet, 0, len(sets)) + for _, set := range sets { + if set.isTaggedKey { + filtered = append(filtered, set) + } + } + return filtered +} + +func implementsUnmarshalJSONType(typ *runtime.Type) bool { + return typ.Implements(unmarshalJSONType) || typ.Implements(unmarshalJSONContextType) +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go b/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go new file mode 100644 index 0000000..eb7e2b1 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go @@ -0,0 +1,29 @@ +//go:build !race +// +build !race + +package decoder + +import ( + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +func CompileToGetDecoder(typ *runtime.Type) (Decoder, error) { + typeptr := uintptr(unsafe.Pointer(typ)) + if typeptr > typeAddr.MaxTypeAddr { + return compileToGetDecoderSlowPath(typeptr, typ) + } + + index := (typeptr - typeAddr.BaseTypeAddr) >> typeAddr.AddrShift + if dec := cachedDecoder[index]; dec != nil { + return dec, nil + } + + dec, err := compileHead(typ, map[uintptr]Decoder{}) + if err != nil { + return nil, err + } + cachedDecoder[index] = dec + return dec, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go b/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go new file mode 100644 index 0000000..49cdda4 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go @@ -0,0 +1,37 @@ +//go:build race +// +build race + +package decoder + +import ( + "sync" + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +var decMu sync.RWMutex + +func CompileToGetDecoder(typ *runtime.Type) (Decoder, error) { + typeptr := uintptr(unsafe.Pointer(typ)) + if typeptr > typeAddr.MaxTypeAddr { + return compileToGetDecoderSlowPath(typeptr, typ) + } + + index := (typeptr - typeAddr.BaseTypeAddr) >> typeAddr.AddrShift + decMu.RLock() + if dec := cachedDecoder[index]; dec != nil { + decMu.RUnlock() + return dec, nil + } + decMu.RUnlock() + + dec, err := compileHead(typ, map[uintptr]Decoder{}) + if err != nil { + return nil, err + } + decMu.Lock() + cachedDecoder[index] = dec + decMu.Unlock() + return dec, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/context.go b/vendor/github.com/goccy/go-json/internal/decoder/context.go new file mode 100644 index 0000000..cb2ffda --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/context.go @@ -0,0 +1,254 @@ +package decoder + +import ( + "sync" + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +type RuntimeContext struct { + Buf []byte + Option *Option +} + +var ( + runtimeContextPool = sync.Pool{ + New: func() interface{} { + return &RuntimeContext{ + Option: &Option{}, + } + }, + } +) + +func TakeRuntimeContext() *RuntimeContext { + return runtimeContextPool.Get().(*RuntimeContext) +} + +func ReleaseRuntimeContext(ctx *RuntimeContext) { + runtimeContextPool.Put(ctx) +} + +var ( + isWhiteSpace = [256]bool{} +) + +func init() { + isWhiteSpace[' '] = true + isWhiteSpace['\n'] = true + isWhiteSpace['\t'] = true + isWhiteSpace['\r'] = true +} + +func char(ptr unsafe.Pointer, offset int64) byte { + return *(*byte)(unsafe.Pointer(uintptr(ptr) + uintptr(offset))) +} + +func skipWhiteSpace(buf []byte, cursor int64) int64 { + for isWhiteSpace[buf[cursor]] { + cursor++ + } + return cursor +} + +func skipObject(buf []byte, cursor, depth int64) (int64, error) { + braceCount := 1 + for { + switch buf[cursor] { + case '{': + braceCount++ + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + case '}': + depth-- + braceCount-- + if braceCount == 0 { + return cursor + 1, nil + } + case '[': + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + case ']': + depth-- + case '"': + for { + cursor++ + switch buf[cursor] { + case '\\': + cursor++ + if buf[cursor] == nul { + return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + case '"': + goto SWITCH_OUT + case nul: + return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + } + case nul: + return 0, errors.ErrUnexpectedEndOfJSON("object of object", cursor) + } + SWITCH_OUT: + cursor++ + } +} + +func skipArray(buf []byte, cursor, depth int64) (int64, error) { + bracketCount := 1 + for { + switch buf[cursor] { + case '[': + bracketCount++ + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + case ']': + bracketCount-- + depth-- + if bracketCount == 0 { + return cursor + 1, nil + } + case '{': + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + case '}': + depth-- + case '"': + for { + cursor++ + switch buf[cursor] { + case '\\': + cursor++ + if buf[cursor] == nul { + return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + case '"': + goto SWITCH_OUT + case nul: + return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + } + case nul: + return 0, errors.ErrUnexpectedEndOfJSON("array of object", cursor) + } + SWITCH_OUT: + cursor++ + } +} + +func skipValue(buf []byte, cursor, depth int64) (int64, error) { + for { + switch buf[cursor] { + case ' ', '\t', '\n', '\r': + cursor++ + continue + case '{': + return skipObject(buf, cursor+1, depth+1) + case '[': + return skipArray(buf, cursor+1, depth+1) + case '"': + for { + cursor++ + switch buf[cursor] { + case '\\': + cursor++ + if buf[cursor] == nul { + return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + case '"': + return cursor + 1, nil + case nul: + return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + for { + cursor++ + if floatTable[buf[cursor]] { + continue + } + break + } + return cursor, nil + case 't': + if err := validateTrue(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + return cursor, nil + case 'f': + if err := validateFalse(buf, cursor); err != nil { + return 0, err + } + cursor += 5 + return cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + return cursor, nil + default: + return cursor, errors.ErrUnexpectedEndOfJSON("null", cursor) + } + } +} + +func validateTrue(buf []byte, cursor int64) error { + if cursor+3 >= int64(len(buf)) { + return errors.ErrUnexpectedEndOfJSON("true", cursor) + } + if buf[cursor+1] != 'r' { + return errors.ErrInvalidCharacter(buf[cursor+1], "true", cursor) + } + if buf[cursor+2] != 'u' { + return errors.ErrInvalidCharacter(buf[cursor+2], "true", cursor) + } + if buf[cursor+3] != 'e' { + return errors.ErrInvalidCharacter(buf[cursor+3], "true", cursor) + } + return nil +} + +func validateFalse(buf []byte, cursor int64) error { + if cursor+4 >= int64(len(buf)) { + return errors.ErrUnexpectedEndOfJSON("false", cursor) + } + if buf[cursor+1] != 'a' { + return errors.ErrInvalidCharacter(buf[cursor+1], "false", cursor) + } + if buf[cursor+2] != 'l' { + return errors.ErrInvalidCharacter(buf[cursor+2], "false", cursor) + } + if buf[cursor+3] != 's' { + return errors.ErrInvalidCharacter(buf[cursor+3], "false", cursor) + } + if buf[cursor+4] != 'e' { + return errors.ErrInvalidCharacter(buf[cursor+4], "false", cursor) + } + return nil +} + +func validateNull(buf []byte, cursor int64) error { + if cursor+3 >= int64(len(buf)) { + return errors.ErrUnexpectedEndOfJSON("null", cursor) + } + if buf[cursor+1] != 'u' { + return errors.ErrInvalidCharacter(buf[cursor+1], "null", cursor) + } + if buf[cursor+2] != 'l' { + return errors.ErrInvalidCharacter(buf[cursor+2], "null", cursor) + } + if buf[cursor+3] != 'l' { + return errors.ErrInvalidCharacter(buf[cursor+3], "null", cursor) + } + return nil +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/float.go b/vendor/github.com/goccy/go-json/internal/decoder/float.go new file mode 100644 index 0000000..9b2eb8b --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/float.go @@ -0,0 +1,170 @@ +package decoder + +import ( + "strconv" + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +type floatDecoder struct { + op func(unsafe.Pointer, float64) + structName string + fieldName string +} + +func newFloatDecoder(structName, fieldName string, op func(unsafe.Pointer, float64)) *floatDecoder { + return &floatDecoder{op: op, structName: structName, fieldName: fieldName} +} + +var ( + floatTable = [256]bool{ + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + '.': true, + 'e': true, + 'E': true, + '+': true, + '-': true, + } + + validEndNumberChar = [256]bool{ + nul: true, + ' ': true, + '\t': true, + '\r': true, + '\n': true, + ',': true, + ':': true, + '}': true, + ']': true, + } +) + +func floatBytes(s *Stream) []byte { + start := s.cursor + for { + s.cursor++ + if floatTable[s.char()] { + continue + } else if s.char() == nul { + if s.read() { + s.cursor-- // for retry current character + continue + } + } + break + } + return s.buf[start:s.cursor] +} + +func (d *floatDecoder) decodeStreamByte(s *Stream) ([]byte, error) { + for { + switch s.char() { + case ' ', '\n', '\t', '\r': + s.cursor++ + continue + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return floatBytes(s), nil + case 'n': + if err := nullBytes(s); err != nil { + return nil, err + } + return nil, nil + case nul: + if s.read() { + continue + } + goto ERROR + default: + goto ERROR + } + } +ERROR: + return nil, errors.ErrUnexpectedEndOfJSON("float", s.totalOffset()) +} + +func (d *floatDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error) { + for { + switch buf[cursor] { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + start := cursor + cursor++ + for floatTable[buf[cursor]] { + cursor++ + } + num := buf[start:cursor] + return num, cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return nil, cursor, nil + default: + return nil, 0, errors.ErrUnexpectedEndOfJSON("float", cursor) + } + } +} + +func (d *floatDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + bytes, err := d.decodeStreamByte(s) + if err != nil { + return err + } + if bytes == nil { + return nil + } + str := *(*string)(unsafe.Pointer(&bytes)) + f64, err := strconv.ParseFloat(str, 64) + if err != nil { + return errors.ErrSyntax(err.Error(), s.totalOffset()) + } + d.op(p, f64) + return nil +} + +func (d *floatDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + bytes, c, err := d.decodeByte(buf, cursor) + if err != nil { + return 0, err + } + if bytes == nil { + return c, nil + } + cursor = c + if !validEndNumberChar[buf[cursor]] { + return 0, errors.ErrUnexpectedEndOfJSON("float", cursor) + } + s := *(*string)(unsafe.Pointer(&bytes)) + f64, err := strconv.ParseFloat(s, 64) + if err != nil { + return 0, errors.ErrSyntax(err.Error(), cursor) + } + d.op(p, f64) + return cursor, nil +} + +func (d *floatDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + buf := ctx.Buf + bytes, c, err := d.decodeByte(buf, cursor) + if err != nil { + return nil, 0, err + } + if bytes == nil { + return [][]byte{nullbytes}, c, nil + } + return [][]byte{bytes}, c, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/func.go b/vendor/github.com/goccy/go-json/internal/decoder/func.go new file mode 100644 index 0000000..4cc12ca --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/func.go @@ -0,0 +1,146 @@ +package decoder + +import ( + "bytes" + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type funcDecoder struct { + typ *runtime.Type + structName string + fieldName string +} + +func newFuncDecoder(typ *runtime.Type, structName, fieldName string) *funcDecoder { + fnDecoder := &funcDecoder{typ, structName, fieldName} + return fnDecoder +} + +func (d *funcDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + s.skipWhiteSpace() + start := s.cursor + if err := s.skipValue(depth); err != nil { + return err + } + src := s.buf[start:s.cursor] + if len(src) > 0 { + switch src[0] { + case '"': + return &errors.UnmarshalTypeError{ + Value: "string", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + case '[': + return &errors.UnmarshalTypeError{ + Value: "array", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + case '{': + return &errors.UnmarshalTypeError{ + Value: "object", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return &errors.UnmarshalTypeError{ + Value: "number", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + case 'n': + if err := nullBytes(s); err != nil { + return err + } + *(*unsafe.Pointer)(p) = nil + return nil + case 't': + if err := trueBytes(s); err == nil { + return &errors.UnmarshalTypeError{ + Value: "boolean", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + } + case 'f': + if err := falseBytes(s); err == nil { + return &errors.UnmarshalTypeError{ + Value: "boolean", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + } + } + } + return errors.ErrInvalidBeginningOfValue(s.buf[s.cursor], s.totalOffset()) +} + +func (d *funcDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + src := buf[start:end] + if len(src) > 0 { + switch src[0] { + case '"': + return 0, &errors.UnmarshalTypeError{ + Value: "string", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + case '[': + return 0, &errors.UnmarshalTypeError{ + Value: "array", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + case '{': + return 0, &errors.UnmarshalTypeError{ + Value: "object", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return 0, &errors.UnmarshalTypeError{ + Value: "number", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + case 'n': + if bytes.Equal(src, nullbytes) { + *(*unsafe.Pointer)(p) = nil + return end, nil + } + case 't': + if err := validateTrue(buf, start); err == nil { + return 0, &errors.UnmarshalTypeError{ + Value: "boolean", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + } + case 'f': + if err := validateFalse(buf, start); err == nil { + return 0, &errors.UnmarshalTypeError{ + Value: "boolean", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + } + } + } + return cursor, errors.ErrInvalidBeginningOfValue(buf[cursor], cursor) +} + +func (d *funcDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: func decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/int.go b/vendor/github.com/goccy/go-json/internal/decoder/int.go new file mode 100644 index 0000000..1a7f081 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/int.go @@ -0,0 +1,246 @@ +package decoder + +import ( + "fmt" + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type intDecoder struct { + typ *runtime.Type + kind reflect.Kind + op func(unsafe.Pointer, int64) + structName string + fieldName string +} + +func newIntDecoder(typ *runtime.Type, structName, fieldName string, op func(unsafe.Pointer, int64)) *intDecoder { + return &intDecoder{ + typ: typ, + kind: typ.Kind(), + op: op, + structName: structName, + fieldName: fieldName, + } +} + +func (d *intDecoder) typeError(buf []byte, offset int64) *errors.UnmarshalTypeError { + return &errors.UnmarshalTypeError{ + Value: fmt.Sprintf("number %s", string(buf)), + Type: runtime.RType2Type(d.typ), + Struct: d.structName, + Field: d.fieldName, + Offset: offset, + } +} + +var ( + pow10i64 = [...]int64{ + 1e00, 1e01, 1e02, 1e03, 1e04, 1e05, 1e06, 1e07, 1e08, 1e09, + 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, + } + pow10i64Len = len(pow10i64) +) + +func (d *intDecoder) parseInt(b []byte) (int64, error) { + isNegative := false + if b[0] == '-' { + b = b[1:] + isNegative = true + } + maxDigit := len(b) + if maxDigit > pow10i64Len { + return 0, fmt.Errorf("invalid length of number") + } + sum := int64(0) + for i := 0; i < maxDigit; i++ { + c := int64(b[i]) - 48 + digitValue := pow10i64[maxDigit-i-1] + sum += c * digitValue + } + if isNegative { + return -1 * sum, nil + } + return sum, nil +} + +var ( + numTable = [256]bool{ + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + } +) + +var ( + numZeroBuf = []byte{'0'} +) + +func (d *intDecoder) decodeStreamByte(s *Stream) ([]byte, error) { + for { + switch s.char() { + case ' ', '\n', '\t', '\r': + s.cursor++ + continue + case '-': + start := s.cursor + for { + s.cursor++ + if numTable[s.char()] { + continue + } else if s.char() == nul { + if s.read() { + s.cursor-- // for retry current character + continue + } + } + break + } + num := s.buf[start:s.cursor] + if len(num) < 2 { + goto ERROR + } + return num, nil + case '0': + s.cursor++ + return numZeroBuf, nil + case '1', '2', '3', '4', '5', '6', '7', '8', '9': + start := s.cursor + for { + s.cursor++ + if numTable[s.char()] { + continue + } else if s.char() == nul { + if s.read() { + s.cursor-- // for retry current character + continue + } + } + break + } + num := s.buf[start:s.cursor] + return num, nil + case 'n': + if err := nullBytes(s); err != nil { + return nil, err + } + return nil, nil + case nul: + if s.read() { + continue + } + goto ERROR + default: + return nil, d.typeError([]byte{s.char()}, s.totalOffset()) + } + } +ERROR: + return nil, errors.ErrUnexpectedEndOfJSON("number(integer)", s.totalOffset()) +} + +func (d *intDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error) { + b := (*sliceHeader)(unsafe.Pointer(&buf)).data + for { + switch char(b, cursor) { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case '0': + cursor++ + return numZeroBuf, cursor, nil + case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9': + start := cursor + cursor++ + for numTable[char(b, cursor)] { + cursor++ + } + num := buf[start:cursor] + return num, cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return nil, cursor, nil + default: + return nil, 0, d.typeError([]byte{char(b, cursor)}, cursor) + } + } +} + +func (d *intDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + bytes, err := d.decodeStreamByte(s) + if err != nil { + return err + } + if bytes == nil { + return nil + } + i64, err := d.parseInt(bytes) + if err != nil { + return d.typeError(bytes, s.totalOffset()) + } + switch d.kind { + case reflect.Int8: + if i64 < -1*(1<<7) || (1<<7) <= i64 { + return d.typeError(bytes, s.totalOffset()) + } + case reflect.Int16: + if i64 < -1*(1<<15) || (1<<15) <= i64 { + return d.typeError(bytes, s.totalOffset()) + } + case reflect.Int32: + if i64 < -1*(1<<31) || (1<<31) <= i64 { + return d.typeError(bytes, s.totalOffset()) + } + } + d.op(p, i64) + s.reset() + return nil +} + +func (d *intDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + bytes, c, err := d.decodeByte(ctx.Buf, cursor) + if err != nil { + return 0, err + } + if bytes == nil { + return c, nil + } + cursor = c + + i64, err := d.parseInt(bytes) + if err != nil { + return 0, d.typeError(bytes, cursor) + } + switch d.kind { + case reflect.Int8: + if i64 < -1*(1<<7) || (1<<7) <= i64 { + return 0, d.typeError(bytes, cursor) + } + case reflect.Int16: + if i64 < -1*(1<<15) || (1<<15) <= i64 { + return 0, d.typeError(bytes, cursor) + } + case reflect.Int32: + if i64 < -1*(1<<31) || (1<<31) <= i64 { + return 0, d.typeError(bytes, cursor) + } + } + d.op(p, i64) + return cursor, nil +} + +func (d *intDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: int decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/interface.go b/vendor/github.com/goccy/go-json/internal/decoder/interface.go new file mode 100644 index 0000000..45c69ab --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/interface.go @@ -0,0 +1,528 @@ +package decoder + +import ( + "bytes" + "encoding" + "encoding/json" + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type interfaceDecoder struct { + typ *runtime.Type + structName string + fieldName string + sliceDecoder *sliceDecoder + mapDecoder *mapDecoder + floatDecoder *floatDecoder + numberDecoder *numberDecoder + stringDecoder *stringDecoder +} + +func newEmptyInterfaceDecoder(structName, fieldName string) *interfaceDecoder { + ifaceDecoder := &interfaceDecoder{ + typ: emptyInterfaceType, + structName: structName, + fieldName: fieldName, + floatDecoder: newFloatDecoder(structName, fieldName, func(p unsafe.Pointer, v float64) { + *(*interface{})(p) = v + }), + numberDecoder: newNumberDecoder(structName, fieldName, func(p unsafe.Pointer, v json.Number) { + *(*interface{})(p) = v + }), + stringDecoder: newStringDecoder(structName, fieldName), + } + ifaceDecoder.sliceDecoder = newSliceDecoder( + ifaceDecoder, + emptyInterfaceType, + emptyInterfaceType.Size(), + structName, fieldName, + ) + ifaceDecoder.mapDecoder = newMapDecoder( + interfaceMapType, + stringType, + ifaceDecoder.stringDecoder, + interfaceMapType.Elem(), + ifaceDecoder, + structName, + fieldName, + ) + return ifaceDecoder +} + +func newInterfaceDecoder(typ *runtime.Type, structName, fieldName string) *interfaceDecoder { + emptyIfaceDecoder := newEmptyInterfaceDecoder(structName, fieldName) + stringDecoder := newStringDecoder(structName, fieldName) + return &interfaceDecoder{ + typ: typ, + structName: structName, + fieldName: fieldName, + sliceDecoder: newSliceDecoder( + emptyIfaceDecoder, + emptyInterfaceType, + emptyInterfaceType.Size(), + structName, fieldName, + ), + mapDecoder: newMapDecoder( + interfaceMapType, + stringType, + stringDecoder, + interfaceMapType.Elem(), + emptyIfaceDecoder, + structName, + fieldName, + ), + floatDecoder: newFloatDecoder(structName, fieldName, func(p unsafe.Pointer, v float64) { + *(*interface{})(p) = v + }), + numberDecoder: newNumberDecoder(structName, fieldName, func(p unsafe.Pointer, v json.Number) { + *(*interface{})(p) = v + }), + stringDecoder: stringDecoder, + } +} + +func (d *interfaceDecoder) numDecoder(s *Stream) Decoder { + if s.UseNumber { + return d.numberDecoder + } + return d.floatDecoder +} + +var ( + emptyInterfaceType = runtime.Type2RType(reflect.TypeOf((*interface{})(nil)).Elem()) + EmptyInterfaceType = emptyInterfaceType + interfaceMapType = runtime.Type2RType( + reflect.TypeOf((*map[string]interface{})(nil)).Elem(), + ) + stringType = runtime.Type2RType( + reflect.TypeOf(""), + ) +) + +func decodeStreamUnmarshaler(s *Stream, depth int64, unmarshaler json.Unmarshaler) error { + start := s.cursor + if err := s.skipValue(depth); err != nil { + return err + } + src := s.buf[start:s.cursor] + dst := make([]byte, len(src)) + copy(dst, src) + + if err := unmarshaler.UnmarshalJSON(dst); err != nil { + return err + } + return nil +} + +func decodeStreamUnmarshalerContext(s *Stream, depth int64, unmarshaler unmarshalerContext) error { + start := s.cursor + if err := s.skipValue(depth); err != nil { + return err + } + src := s.buf[start:s.cursor] + dst := make([]byte, len(src)) + copy(dst, src) + + if err := unmarshaler.UnmarshalJSON(s.Option.Context, dst); err != nil { + return err + } + return nil +} + +func decodeUnmarshaler(buf []byte, cursor, depth int64, unmarshaler json.Unmarshaler) (int64, error) { + cursor = skipWhiteSpace(buf, cursor) + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + src := buf[start:end] + dst := make([]byte, len(src)) + copy(dst, src) + + if err := unmarshaler.UnmarshalJSON(dst); err != nil { + return 0, err + } + return end, nil +} + +func decodeUnmarshalerContext(ctx *RuntimeContext, buf []byte, cursor, depth int64, unmarshaler unmarshalerContext) (int64, error) { + cursor = skipWhiteSpace(buf, cursor) + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + src := buf[start:end] + dst := make([]byte, len(src)) + copy(dst, src) + + if err := unmarshaler.UnmarshalJSON(ctx.Option.Context, dst); err != nil { + return 0, err + } + return end, nil +} + +func decodeStreamTextUnmarshaler(s *Stream, depth int64, unmarshaler encoding.TextUnmarshaler, p unsafe.Pointer) error { + start := s.cursor + if err := s.skipValue(depth); err != nil { + return err + } + src := s.buf[start:s.cursor] + if bytes.Equal(src, nullbytes) { + *(*unsafe.Pointer)(p) = nil + return nil + } + + dst := make([]byte, len(src)) + copy(dst, src) + + if err := unmarshaler.UnmarshalText(dst); err != nil { + return err + } + return nil +} + +func decodeTextUnmarshaler(buf []byte, cursor, depth int64, unmarshaler encoding.TextUnmarshaler, p unsafe.Pointer) (int64, error) { + cursor = skipWhiteSpace(buf, cursor) + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + src := buf[start:end] + if bytes.Equal(src, nullbytes) { + *(*unsafe.Pointer)(p) = nil + return end, nil + } + if s, ok := unquoteBytes(src); ok { + src = s + } + if err := unmarshaler.UnmarshalText(src); err != nil { + return 0, err + } + return end, nil +} + +func (d *interfaceDecoder) decodeStreamEmptyInterface(s *Stream, depth int64, p unsafe.Pointer) error { + c := s.skipWhiteSpace() + for { + switch c { + case '{': + var v map[string]interface{} + ptr := unsafe.Pointer(&v) + if err := d.mapDecoder.DecodeStream(s, depth, ptr); err != nil { + return err + } + *(*interface{})(p) = v + return nil + case '[': + var v []interface{} + ptr := unsafe.Pointer(&v) + if err := d.sliceDecoder.DecodeStream(s, depth, ptr); err != nil { + return err + } + *(*interface{})(p) = v + return nil + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return d.numDecoder(s).DecodeStream(s, depth, p) + case '"': + s.cursor++ + start := s.cursor + for { + switch s.char() { + case '\\': + if _, err := decodeEscapeString(s, nil); err != nil { + return err + } + case '"': + literal := s.buf[start:s.cursor] + s.cursor++ + *(*interface{})(p) = string(literal) + return nil + case nul: + if s.read() { + continue + } + return errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + } + s.cursor++ + } + case 't': + if err := trueBytes(s); err != nil { + return err + } + **(**interface{})(unsafe.Pointer(&p)) = true + return nil + case 'f': + if err := falseBytes(s); err != nil { + return err + } + **(**interface{})(unsafe.Pointer(&p)) = false + return nil + case 'n': + if err := nullBytes(s); err != nil { + return err + } + *(*interface{})(p) = nil + return nil + case nul: + if s.read() { + c = s.char() + continue + } + } + break + } + return errors.ErrInvalidBeginningOfValue(c, s.totalOffset()) +} + +type emptyInterface struct { + typ *runtime.Type + ptr unsafe.Pointer +} + +func (d *interfaceDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + runtimeInterfaceValue := *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: d.typ, + ptr: p, + })) + rv := reflect.ValueOf(runtimeInterfaceValue) + if rv.NumMethod() > 0 && rv.CanInterface() { + if u, ok := rv.Interface().(unmarshalerContext); ok { + return decodeStreamUnmarshalerContext(s, depth, u) + } + if u, ok := rv.Interface().(json.Unmarshaler); ok { + return decodeStreamUnmarshaler(s, depth, u) + } + if u, ok := rv.Interface().(encoding.TextUnmarshaler); ok { + return decodeStreamTextUnmarshaler(s, depth, u, p) + } + if s.skipWhiteSpace() == 'n' { + if err := nullBytes(s); err != nil { + return err + } + *(*interface{})(p) = nil + return nil + } + return d.errUnmarshalType(rv.Type(), s.totalOffset()) + } + iface := rv.Interface() + ifaceHeader := (*emptyInterface)(unsafe.Pointer(&iface)) + typ := ifaceHeader.typ + if ifaceHeader.ptr == nil || d.typ == typ || typ == nil { + // concrete type is empty interface + return d.decodeStreamEmptyInterface(s, depth, p) + } + if typ.Kind() == reflect.Ptr && typ.Elem() == d.typ || typ.Kind() != reflect.Ptr { + return d.decodeStreamEmptyInterface(s, depth, p) + } + if s.skipWhiteSpace() == 'n' { + if err := nullBytes(s); err != nil { + return err + } + *(*interface{})(p) = nil + return nil + } + decoder, err := CompileToGetDecoder(typ) + if err != nil { + return err + } + return decoder.DecodeStream(s, depth, ifaceHeader.ptr) +} + +func (d *interfaceDecoder) errUnmarshalType(typ reflect.Type, offset int64) *errors.UnmarshalTypeError { + return &errors.UnmarshalTypeError{ + Value: typ.String(), + Type: typ, + Offset: offset, + Struct: d.structName, + Field: d.fieldName, + } +} + +func (d *interfaceDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + runtimeInterfaceValue := *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: d.typ, + ptr: p, + })) + rv := reflect.ValueOf(runtimeInterfaceValue) + if rv.NumMethod() > 0 && rv.CanInterface() { + if u, ok := rv.Interface().(unmarshalerContext); ok { + return decodeUnmarshalerContext(ctx, buf, cursor, depth, u) + } + if u, ok := rv.Interface().(json.Unmarshaler); ok { + return decodeUnmarshaler(buf, cursor, depth, u) + } + if u, ok := rv.Interface().(encoding.TextUnmarshaler); ok { + return decodeTextUnmarshaler(buf, cursor, depth, u, p) + } + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == 'n' { + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + **(**interface{})(unsafe.Pointer(&p)) = nil + return cursor, nil + } + return 0, d.errUnmarshalType(rv.Type(), cursor) + } + + iface := rv.Interface() + ifaceHeader := (*emptyInterface)(unsafe.Pointer(&iface)) + typ := ifaceHeader.typ + if ifaceHeader.ptr == nil || d.typ == typ || typ == nil { + // concrete type is empty interface + return d.decodeEmptyInterface(ctx, cursor, depth, p) + } + if typ.Kind() == reflect.Ptr && typ.Elem() == d.typ || typ.Kind() != reflect.Ptr { + return d.decodeEmptyInterface(ctx, cursor, depth, p) + } + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == 'n' { + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + **(**interface{})(unsafe.Pointer(&p)) = nil + return cursor, nil + } + decoder, err := CompileToGetDecoder(typ) + if err != nil { + return 0, err + } + return decoder.Decode(ctx, cursor, depth, ifaceHeader.ptr) +} + +func (d *interfaceDecoder) decodeEmptyInterface(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + switch buf[cursor] { + case '{': + var v map[string]interface{} + ptr := unsafe.Pointer(&v) + cursor, err := d.mapDecoder.Decode(ctx, cursor, depth, ptr) + if err != nil { + return 0, err + } + **(**interface{})(unsafe.Pointer(&p)) = v + return cursor, nil + case '[': + var v []interface{} + ptr := unsafe.Pointer(&v) + cursor, err := d.sliceDecoder.Decode(ctx, cursor, depth, ptr) + if err != nil { + return 0, err + } + **(**interface{})(unsafe.Pointer(&p)) = v + return cursor, nil + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return d.floatDecoder.Decode(ctx, cursor, depth, p) + case '"': + var v string + ptr := unsafe.Pointer(&v) + cursor, err := d.stringDecoder.Decode(ctx, cursor, depth, ptr) + if err != nil { + return 0, err + } + **(**interface{})(unsafe.Pointer(&p)) = v + return cursor, nil + case 't': + if err := validateTrue(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + **(**interface{})(unsafe.Pointer(&p)) = true + return cursor, nil + case 'f': + if err := validateFalse(buf, cursor); err != nil { + return 0, err + } + cursor += 5 + **(**interface{})(unsafe.Pointer(&p)) = false + return cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + **(**interface{})(unsafe.Pointer(&p)) = nil + return cursor, nil + } + return cursor, errors.ErrInvalidBeginningOfValue(buf[cursor], cursor) +} + +func NewPathDecoder() Decoder { + ifaceDecoder := &interfaceDecoder{ + typ: emptyInterfaceType, + structName: "", + fieldName: "", + floatDecoder: newFloatDecoder("", "", func(p unsafe.Pointer, v float64) { + *(*interface{})(p) = v + }), + numberDecoder: newNumberDecoder("", "", func(p unsafe.Pointer, v json.Number) { + *(*interface{})(p) = v + }), + stringDecoder: newStringDecoder("", ""), + } + ifaceDecoder.sliceDecoder = newSliceDecoder( + ifaceDecoder, + emptyInterfaceType, + emptyInterfaceType.Size(), + "", "", + ) + ifaceDecoder.mapDecoder = newMapDecoder( + interfaceMapType, + stringType, + ifaceDecoder.stringDecoder, + interfaceMapType.Elem(), + ifaceDecoder, + "", "", + ) + return ifaceDecoder +} + +var ( + truebytes = []byte("true") + falsebytes = []byte("false") +) + +func (d *interfaceDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + switch buf[cursor] { + case '{': + return d.mapDecoder.DecodePath(ctx, cursor, depth) + case '[': + return d.sliceDecoder.DecodePath(ctx, cursor, depth) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return d.floatDecoder.DecodePath(ctx, cursor, depth) + case '"': + return d.stringDecoder.DecodePath(ctx, cursor, depth) + case 't': + if err := validateTrue(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return [][]byte{truebytes}, cursor, nil + case 'f': + if err := validateFalse(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 5 + return [][]byte{falsebytes}, cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return [][]byte{nullbytes}, cursor, nil + } + return nil, cursor, errors.ErrInvalidBeginningOfValue(buf[cursor], cursor) +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/invalid.go b/vendor/github.com/goccy/go-json/internal/decoder/invalid.go new file mode 100644 index 0000000..4c9721b --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/invalid.go @@ -0,0 +1,55 @@ +package decoder + +import ( + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type invalidDecoder struct { + typ *runtime.Type + kind reflect.Kind + structName string + fieldName string +} + +func newInvalidDecoder(typ *runtime.Type, structName, fieldName string) *invalidDecoder { + return &invalidDecoder{ + typ: typ, + kind: typ.Kind(), + structName: structName, + fieldName: fieldName, + } +} + +func (d *invalidDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + return &errors.UnmarshalTypeError{ + Value: "object", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + Struct: d.structName, + Field: d.fieldName, + } +} + +func (d *invalidDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + return 0, &errors.UnmarshalTypeError{ + Value: "object", + Type: runtime.RType2Type(d.typ), + Offset: cursor, + Struct: d.structName, + Field: d.fieldName, + } +} + +func (d *invalidDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, &errors.UnmarshalTypeError{ + Value: "object", + Type: runtime.RType2Type(d.typ), + Offset: cursor, + Struct: d.structName, + Field: d.fieldName, + } +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/map.go b/vendor/github.com/goccy/go-json/internal/decoder/map.go new file mode 100644 index 0000000..07a9cae --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/map.go @@ -0,0 +1,280 @@ +package decoder + +import ( + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type mapDecoder struct { + mapType *runtime.Type + keyType *runtime.Type + valueType *runtime.Type + canUseAssignFaststrType bool + keyDecoder Decoder + valueDecoder Decoder + structName string + fieldName string +} + +func newMapDecoder(mapType *runtime.Type, keyType *runtime.Type, keyDec Decoder, valueType *runtime.Type, valueDec Decoder, structName, fieldName string) *mapDecoder { + return &mapDecoder{ + mapType: mapType, + keyDecoder: keyDec, + keyType: keyType, + canUseAssignFaststrType: canUseAssignFaststrType(keyType, valueType), + valueType: valueType, + valueDecoder: valueDec, + structName: structName, + fieldName: fieldName, + } +} + +const ( + mapMaxElemSize = 128 +) + +// See detail: https://github.com/goccy/go-json/pull/283 +func canUseAssignFaststrType(key *runtime.Type, value *runtime.Type) bool { + indirectElem := value.Size() > mapMaxElemSize + if indirectElem { + return false + } + return key.Kind() == reflect.String +} + +//go:linkname makemap reflect.makemap +func makemap(*runtime.Type, int) unsafe.Pointer + +//nolint:golint +//go:linkname mapassign_faststr runtime.mapassign_faststr +//go:noescape +func mapassign_faststr(t *runtime.Type, m unsafe.Pointer, s string) unsafe.Pointer + +//go:linkname mapassign reflect.mapassign +//go:noescape +func mapassign(t *runtime.Type, m unsafe.Pointer, k, v unsafe.Pointer) + +func (d *mapDecoder) mapassign(t *runtime.Type, m, k, v unsafe.Pointer) { + if d.canUseAssignFaststrType { + mapV := mapassign_faststr(t, m, *(*string)(k)) + typedmemmove(d.valueType, mapV, v) + } else { + mapassign(t, m, k, v) + } +} + +func (d *mapDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + + switch s.skipWhiteSpace() { + case 'n': + if err := nullBytes(s); err != nil { + return err + } + **(**unsafe.Pointer)(unsafe.Pointer(&p)) = nil + return nil + case '{': + default: + return errors.ErrExpected("{ character for map value", s.totalOffset()) + } + mapValue := *(*unsafe.Pointer)(p) + if mapValue == nil { + mapValue = makemap(d.mapType, 0) + } + s.cursor++ + if s.skipWhiteSpace() == '}' { + *(*unsafe.Pointer)(p) = mapValue + s.cursor++ + return nil + } + for { + k := unsafe_New(d.keyType) + if err := d.keyDecoder.DecodeStream(s, depth, k); err != nil { + return err + } + s.skipWhiteSpace() + if !s.equalChar(':') { + return errors.ErrExpected("colon after object key", s.totalOffset()) + } + s.cursor++ + v := unsafe_New(d.valueType) + if err := d.valueDecoder.DecodeStream(s, depth, v); err != nil { + return err + } + d.mapassign(d.mapType, mapValue, k, v) + s.skipWhiteSpace() + if s.equalChar('}') { + **(**unsafe.Pointer)(unsafe.Pointer(&p)) = mapValue + s.cursor++ + return nil + } + if !s.equalChar(',') { + return errors.ErrExpected("comma after object value", s.totalOffset()) + } + s.cursor++ + } +} + +func (d *mapDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + + cursor = skipWhiteSpace(buf, cursor) + buflen := int64(len(buf)) + if buflen < 2 { + return 0, errors.ErrExpected("{} for map", cursor) + } + switch buf[cursor] { + case 'n': + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + **(**unsafe.Pointer)(unsafe.Pointer(&p)) = nil + return cursor, nil + case '{': + default: + return 0, errors.ErrExpected("{ character for map value", cursor) + } + cursor++ + cursor = skipWhiteSpace(buf, cursor) + mapValue := *(*unsafe.Pointer)(p) + if mapValue == nil { + mapValue = makemap(d.mapType, 0) + } + if buf[cursor] == '}' { + **(**unsafe.Pointer)(unsafe.Pointer(&p)) = mapValue + cursor++ + return cursor, nil + } + for { + k := unsafe_New(d.keyType) + keyCursor, err := d.keyDecoder.Decode(ctx, cursor, depth, k) + if err != nil { + return 0, err + } + cursor = skipWhiteSpace(buf, keyCursor) + if buf[cursor] != ':' { + return 0, errors.ErrExpected("colon after object key", cursor) + } + cursor++ + v := unsafe_New(d.valueType) + valueCursor, err := d.valueDecoder.Decode(ctx, cursor, depth, v) + if err != nil { + return 0, err + } + d.mapassign(d.mapType, mapValue, k, v) + cursor = skipWhiteSpace(buf, valueCursor) + if buf[cursor] == '}' { + **(**unsafe.Pointer)(unsafe.Pointer(&p)) = mapValue + cursor++ + return cursor, nil + } + if buf[cursor] != ',' { + return 0, errors.ErrExpected("comma after object value", cursor) + } + cursor++ + } +} + +func (d *mapDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + buf := ctx.Buf + depth++ + if depth > maxDecodeNestingDepth { + return nil, 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + + cursor = skipWhiteSpace(buf, cursor) + buflen := int64(len(buf)) + if buflen < 2 { + return nil, 0, errors.ErrExpected("{} for map", cursor) + } + switch buf[cursor] { + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return [][]byte{nullbytes}, cursor, nil + case '{': + default: + return nil, 0, errors.ErrExpected("{ character for map value", cursor) + } + cursor++ + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == '}' { + cursor++ + return nil, cursor, nil + } + keyDecoder, ok := d.keyDecoder.(*stringDecoder) + if !ok { + return nil, 0, &errors.UnmarshalTypeError{ + Value: "string", + Type: reflect.TypeOf(""), + Offset: cursor, + Struct: d.structName, + Field: d.fieldName, + } + } + ret := [][]byte{} + for { + key, keyCursor, err := keyDecoder.decodeByte(buf, cursor) + if err != nil { + return nil, 0, err + } + cursor = skipWhiteSpace(buf, keyCursor) + if buf[cursor] != ':' { + return nil, 0, errors.ErrExpected("colon after object key", cursor) + } + cursor++ + child, found, err := ctx.Option.Path.Field(string(key)) + if err != nil { + return nil, 0, err + } + if found { + if child != nil { + oldPath := ctx.Option.Path.node + ctx.Option.Path.node = child + paths, c, err := d.valueDecoder.DecodePath(ctx, cursor, depth) + if err != nil { + return nil, 0, err + } + ctx.Option.Path.node = oldPath + ret = append(ret, paths...) + cursor = c + } else { + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return nil, 0, err + } + ret = append(ret, buf[start:end]) + cursor = end + } + } else { + c, err := skipValue(buf, cursor, depth) + if err != nil { + return nil, 0, err + } + cursor = c + } + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == '}' { + cursor++ + return ret, cursor, nil + } + if buf[cursor] != ',' { + return nil, 0, errors.ErrExpected("comma after object value", cursor) + } + cursor++ + } +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/number.go b/vendor/github.com/goccy/go-json/internal/decoder/number.go new file mode 100644 index 0000000..10e5435 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/number.go @@ -0,0 +1,123 @@ +package decoder + +import ( + "encoding/json" + "strconv" + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +type numberDecoder struct { + stringDecoder *stringDecoder + op func(unsafe.Pointer, json.Number) + structName string + fieldName string +} + +func newNumberDecoder(structName, fieldName string, op func(unsafe.Pointer, json.Number)) *numberDecoder { + return &numberDecoder{ + stringDecoder: newStringDecoder(structName, fieldName), + op: op, + structName: structName, + fieldName: fieldName, + } +} + +func (d *numberDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + bytes, err := d.decodeStreamByte(s) + if err != nil { + return err + } + if _, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&bytes)), 64); err != nil { + return errors.ErrSyntax(err.Error(), s.totalOffset()) + } + d.op(p, json.Number(string(bytes))) + s.reset() + return nil +} + +func (d *numberDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + bytes, c, err := d.decodeByte(ctx.Buf, cursor) + if err != nil { + return 0, err + } + if _, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&bytes)), 64); err != nil { + return 0, errors.ErrSyntax(err.Error(), c) + } + cursor = c + s := *(*string)(unsafe.Pointer(&bytes)) + d.op(p, json.Number(s)) + return cursor, nil +} + +func (d *numberDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + bytes, c, err := d.decodeByte(ctx.Buf, cursor) + if err != nil { + return nil, 0, err + } + if bytes == nil { + return [][]byte{nullbytes}, c, nil + } + return [][]byte{bytes}, c, nil +} + +func (d *numberDecoder) decodeStreamByte(s *Stream) ([]byte, error) { + start := s.cursor + for { + switch s.char() { + case ' ', '\n', '\t', '\r': + s.cursor++ + continue + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return floatBytes(s), nil + case 'n': + if err := nullBytes(s); err != nil { + return nil, err + } + return nil, nil + case '"': + return d.stringDecoder.decodeStreamByte(s) + case nul: + if s.read() { + continue + } + goto ERROR + default: + goto ERROR + } + } +ERROR: + if s.cursor == start { + return nil, errors.ErrInvalidBeginningOfValue(s.char(), s.totalOffset()) + } + return nil, errors.ErrUnexpectedEndOfJSON("json.Number", s.totalOffset()) +} + +func (d *numberDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error) { + for { + switch buf[cursor] { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + start := cursor + cursor++ + for floatTable[buf[cursor]] { + cursor++ + } + num := buf[start:cursor] + return num, cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return nil, cursor, nil + case '"': + return d.stringDecoder.decodeByte(buf, cursor) + default: + return nil, 0, errors.ErrUnexpectedEndOfJSON("json.Number", cursor) + } + } +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/option.go b/vendor/github.com/goccy/go-json/internal/decoder/option.go new file mode 100644 index 0000000..502f772 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/option.go @@ -0,0 +1,17 @@ +package decoder + +import "context" + +type OptionFlags uint8 + +const ( + FirstWinOption OptionFlags = 1 << iota + ContextOption + PathOption +) + +type Option struct { + Flags OptionFlags + Context context.Context + Path *Path +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/path.go b/vendor/github.com/goccy/go-json/internal/decoder/path.go new file mode 100644 index 0000000..a15ff69 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/path.go @@ -0,0 +1,670 @@ +package decoder + +import ( + "fmt" + "reflect" + "strconv" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type PathString string + +func (s PathString) Build() (*Path, error) { + builder := new(PathBuilder) + return builder.Build([]rune(s)) +} + +type PathBuilder struct { + root PathNode + node PathNode + singleQuotePathSelector bool + doubleQuotePathSelector bool +} + +func (b *PathBuilder) Build(buf []rune) (*Path, error) { + node, err := b.build(buf) + if err != nil { + return nil, err + } + return &Path{ + node: node, + RootSelectorOnly: node == nil, + SingleQuotePathSelector: b.singleQuotePathSelector, + DoubleQuotePathSelector: b.doubleQuotePathSelector, + }, nil +} + +func (b *PathBuilder) build(buf []rune) (PathNode, error) { + if len(buf) == 0 { + return nil, errors.ErrEmptyPath() + } + if buf[0] != '$' { + return nil, errors.ErrInvalidPath("JSON Path must start with a $ character") + } + if len(buf) == 1 { + return nil, nil + } + buf = buf[1:] + offset, err := b.buildNext(buf) + if err != nil { + return nil, err + } + if len(buf) > offset { + return nil, errors.ErrInvalidPath("remain invalid path %q", buf[offset:]) + } + return b.root, nil +} + +func (b *PathBuilder) buildNextCharIfExists(buf []rune, cursor int) (int, error) { + if len(buf) > cursor { + offset, err := b.buildNext(buf[cursor:]) + if err != nil { + return 0, err + } + return cursor + 1 + offset, nil + } + return cursor, nil +} + +func (b *PathBuilder) buildNext(buf []rune) (int, error) { + switch buf[0] { + case '.': + if len(buf) == 1 { + return 0, errors.ErrInvalidPath("JSON Path ends with dot character") + } + offset, err := b.buildSelector(buf[1:]) + if err != nil { + return 0, err + } + return offset + 1, nil + case '[': + if len(buf) == 1 { + return 0, errors.ErrInvalidPath("JSON Path ends with left bracket character") + } + offset, err := b.buildIndex(buf[1:]) + if err != nil { + return 0, err + } + return offset + 1, nil + default: + return 0, errors.ErrInvalidPath("expect dot or left bracket character. but found %c character", buf[0]) + } +} + +func (b *PathBuilder) buildSelector(buf []rune) (int, error) { + switch buf[0] { + case '.': + if len(buf) == 1 { + return 0, errors.ErrInvalidPath("JSON Path ends with double dot character") + } + offset, err := b.buildPathRecursive(buf[1:]) + if err != nil { + return 0, err + } + return 1 + offset, nil + case '[', ']', '$', '*': + return 0, errors.ErrInvalidPath("found invalid path character %c after dot", buf[0]) + } + for cursor := 0; cursor < len(buf); cursor++ { + switch buf[cursor] { + case '$', '*', ']': + return 0, errors.ErrInvalidPath("found %c character in field selector context", buf[cursor]) + case '.': + if cursor+1 >= len(buf) { + return 0, errors.ErrInvalidPath("JSON Path ends with dot character") + } + selector := buf[:cursor] + b.addSelectorNode(string(selector)) + offset, err := b.buildSelector(buf[cursor+1:]) + if err != nil { + return 0, err + } + return cursor + 1 + offset, nil + case '[': + if cursor+1 >= len(buf) { + return 0, errors.ErrInvalidPath("JSON Path ends with left bracket character") + } + selector := buf[:cursor] + b.addSelectorNode(string(selector)) + offset, err := b.buildIndex(buf[cursor+1:]) + if err != nil { + return 0, err + } + return cursor + 1 + offset, nil + case '"': + if cursor+1 >= len(buf) { + return 0, errors.ErrInvalidPath("JSON Path ends with double quote character") + } + offset, err := b.buildQuoteSelector(buf[cursor+1:], DoubleQuotePathSelector) + if err != nil { + return 0, err + } + return cursor + 1 + offset, nil + } + } + b.addSelectorNode(string(buf)) + return len(buf), nil +} + +func (b *PathBuilder) buildQuoteSelector(buf []rune, sel QuotePathSelector) (int, error) { + switch buf[0] { + case '[', ']', '$', '.', '*', '\'', '"': + return 0, errors.ErrInvalidPath("found invalid path character %c after quote", buf[0]) + } + for cursor := 0; cursor < len(buf); cursor++ { + switch buf[cursor] { + case '\'': + if sel != SingleQuotePathSelector { + return 0, errors.ErrInvalidPath("found double quote character in field selector with single quote context") + } + if len(buf) <= cursor+1 { + return 0, errors.ErrInvalidPath("JSON Path ends with single quote character in field selector context") + } + if buf[cursor+1] != ']' { + return 0, errors.ErrInvalidPath("expect right bracket for field selector with single quote but found %c", buf[cursor+1]) + } + selector := buf[:cursor] + b.addSelectorNode(string(selector)) + b.singleQuotePathSelector = true + return b.buildNextCharIfExists(buf, cursor+2) + case '"': + if sel != DoubleQuotePathSelector { + return 0, errors.ErrInvalidPath("found single quote character in field selector with double quote context") + } + selector := buf[:cursor] + b.addSelectorNode(string(selector)) + b.doubleQuotePathSelector = true + return b.buildNextCharIfExists(buf, cursor+1) + } + } + return 0, errors.ErrInvalidPath("couldn't find quote character in selector quote path context") +} + +func (b *PathBuilder) buildPathRecursive(buf []rune) (int, error) { + switch buf[0] { + case '.', '[', ']', '$', '*': + return 0, errors.ErrInvalidPath("found invalid path character %c after double dot", buf[0]) + } + for cursor := 0; cursor < len(buf); cursor++ { + switch buf[cursor] { + case '$', '*', ']': + return 0, errors.ErrInvalidPath("found %c character in field selector context", buf[cursor]) + case '.': + if cursor+1 >= len(buf) { + return 0, errors.ErrInvalidPath("JSON Path ends with dot character") + } + selector := buf[:cursor] + b.addRecursiveNode(string(selector)) + offset, err := b.buildSelector(buf[cursor+1:]) + if err != nil { + return 0, err + } + return cursor + 1 + offset, nil + case '[': + if cursor+1 >= len(buf) { + return 0, errors.ErrInvalidPath("JSON Path ends with left bracket character") + } + selector := buf[:cursor] + b.addRecursiveNode(string(selector)) + offset, err := b.buildIndex(buf[cursor+1:]) + if err != nil { + return 0, err + } + return cursor + 1 + offset, nil + } + } + b.addRecursiveNode(string(buf)) + return len(buf), nil +} + +func (b *PathBuilder) buildIndex(buf []rune) (int, error) { + switch buf[0] { + case '.', '[', ']', '$': + return 0, errors.ErrInvalidPath("found invalid path character %c after left bracket", buf[0]) + case '\'': + if len(buf) == 1 { + return 0, errors.ErrInvalidPath("JSON Path ends with single quote character") + } + offset, err := b.buildQuoteSelector(buf[1:], SingleQuotePathSelector) + if err != nil { + return 0, err + } + return 1 + offset, nil + case '*': + if len(buf) == 1 { + return 0, errors.ErrInvalidPath("JSON Path ends with star character") + } + if buf[1] != ']' { + return 0, errors.ErrInvalidPath("expect right bracket character for index all path but found %c character", buf[1]) + } + b.addIndexAllNode() + offset := len("*]") + if len(buf) > 2 { + buildOffset, err := b.buildNext(buf[2:]) + if err != nil { + return 0, err + } + return offset + buildOffset, nil + } + return offset, nil + } + + for cursor := 0; cursor < len(buf); cursor++ { + switch buf[cursor] { + case ']': + index, err := strconv.ParseInt(string(buf[:cursor]), 10, 64) + if err != nil { + return 0, errors.ErrInvalidPath("%q is unexpected index path", buf[:cursor]) + } + b.addIndexNode(int(index)) + return b.buildNextCharIfExists(buf, cursor+1) + } + } + return 0, errors.ErrInvalidPath("couldn't find right bracket character in index path context") +} + +func (b *PathBuilder) addIndexAllNode() { + node := newPathIndexAllNode() + if b.root == nil { + b.root = node + b.node = node + } else { + b.node = b.node.chain(node) + } +} + +func (b *PathBuilder) addRecursiveNode(selector string) { + node := newPathRecursiveNode(selector) + if b.root == nil { + b.root = node + b.node = node + } else { + b.node = b.node.chain(node) + } +} + +func (b *PathBuilder) addSelectorNode(name string) { + node := newPathSelectorNode(name) + if b.root == nil { + b.root = node + b.node = node + } else { + b.node = b.node.chain(node) + } +} + +func (b *PathBuilder) addIndexNode(idx int) { + node := newPathIndexNode(idx) + if b.root == nil { + b.root = node + b.node = node + } else { + b.node = b.node.chain(node) + } +} + +type QuotePathSelector int + +const ( + SingleQuotePathSelector QuotePathSelector = 1 + DoubleQuotePathSelector QuotePathSelector = 2 +) + +type Path struct { + node PathNode + RootSelectorOnly bool + SingleQuotePathSelector bool + DoubleQuotePathSelector bool +} + +func (p *Path) Field(sel string) (PathNode, bool, error) { + if p.node == nil { + return nil, false, nil + } + return p.node.Field(sel) +} + +func (p *Path) Get(src, dst reflect.Value) error { + if p.node == nil { + return nil + } + return p.node.Get(src, dst) +} + +func (p *Path) String() string { + if p.node == nil { + return "$" + } + return p.node.String() +} + +type PathNode interface { + fmt.Stringer + Index(idx int) (PathNode, bool, error) + Field(fieldName string) (PathNode, bool, error) + Get(src, dst reflect.Value) error + chain(PathNode) PathNode + target() bool + single() bool +} + +type BasePathNode struct { + child PathNode +} + +func (n *BasePathNode) chain(node PathNode) PathNode { + n.child = node + return node +} + +func (n *BasePathNode) target() bool { + return n.child == nil +} + +func (n *BasePathNode) single() bool { + return true +} + +type PathSelectorNode struct { + *BasePathNode + selector string +} + +func newPathSelectorNode(selector string) *PathSelectorNode { + return &PathSelectorNode{ + BasePathNode: &BasePathNode{}, + selector: selector, + } +} + +func (n *PathSelectorNode) Index(idx int) (PathNode, bool, error) { + return nil, false, &errors.PathError{} +} + +func (n *PathSelectorNode) Field(fieldName string) (PathNode, bool, error) { + if n.selector == fieldName { + return n.child, true, nil + } + return nil, false, nil +} + +func (n *PathSelectorNode) Get(src, dst reflect.Value) error { + switch src.Type().Kind() { + case reflect.Map: + iter := src.MapRange() + for iter.Next() { + key, ok := iter.Key().Interface().(string) + if !ok { + return fmt.Errorf("invalid map key type %T", src.Type().Key()) + } + child, found, err := n.Field(key) + if err != nil { + return err + } + if found { + if child != nil { + return child.Get(iter.Value(), dst) + } + return AssignValue(iter.Value(), dst) + } + } + case reflect.Struct: + typ := src.Type() + for i := 0; i < typ.Len(); i++ { + tag := runtime.StructTagFromField(typ.Field(i)) + child, found, err := n.Field(tag.Key) + if err != nil { + return err + } + if found { + if child != nil { + return child.Get(src.Field(i), dst) + } + return AssignValue(src.Field(i), dst) + } + } + case reflect.Ptr: + return n.Get(src.Elem(), dst) + case reflect.Interface: + return n.Get(reflect.ValueOf(src.Interface()), dst) + case reflect.Float64, reflect.String, reflect.Bool: + return AssignValue(src, dst) + } + return fmt.Errorf("failed to get %s value from %s", n.selector, src.Type()) +} + +func (n *PathSelectorNode) String() string { + s := fmt.Sprintf(".%s", n.selector) + if n.child != nil { + s += n.child.String() + } + return s +} + +type PathIndexNode struct { + *BasePathNode + selector int +} + +func newPathIndexNode(selector int) *PathIndexNode { + return &PathIndexNode{ + BasePathNode: &BasePathNode{}, + selector: selector, + } +} + +func (n *PathIndexNode) Index(idx int) (PathNode, bool, error) { + if n.selector == idx { + return n.child, true, nil + } + return nil, false, nil +} + +func (n *PathIndexNode) Field(fieldName string) (PathNode, bool, error) { + return nil, false, &errors.PathError{} +} + +func (n *PathIndexNode) Get(src, dst reflect.Value) error { + switch src.Type().Kind() { + case reflect.Array, reflect.Slice: + if src.Len() > n.selector { + if n.child != nil { + return n.child.Get(src.Index(n.selector), dst) + } + return AssignValue(src.Index(n.selector), dst) + } + case reflect.Ptr: + return n.Get(src.Elem(), dst) + case reflect.Interface: + return n.Get(reflect.ValueOf(src.Interface()), dst) + } + return fmt.Errorf("failed to get [%d] value from %s", n.selector, src.Type()) +} + +func (n *PathIndexNode) String() string { + s := fmt.Sprintf("[%d]", n.selector) + if n.child != nil { + s += n.child.String() + } + return s +} + +type PathIndexAllNode struct { + *BasePathNode +} + +func newPathIndexAllNode() *PathIndexAllNode { + return &PathIndexAllNode{ + BasePathNode: &BasePathNode{}, + } +} + +func (n *PathIndexAllNode) Index(idx int) (PathNode, bool, error) { + return n.child, true, nil +} + +func (n *PathIndexAllNode) Field(fieldName string) (PathNode, bool, error) { + return nil, false, &errors.PathError{} +} + +func (n *PathIndexAllNode) Get(src, dst reflect.Value) error { + switch src.Type().Kind() { + case reflect.Array, reflect.Slice: + var arr []interface{} + for i := 0; i < src.Len(); i++ { + var v interface{} + rv := reflect.ValueOf(&v) + if n.child != nil { + if err := n.child.Get(src.Index(i), rv); err != nil { + return err + } + } else { + if err := AssignValue(src.Index(i), rv); err != nil { + return err + } + } + arr = append(arr, v) + } + if err := AssignValue(reflect.ValueOf(arr), dst); err != nil { + return err + } + return nil + case reflect.Ptr: + return n.Get(src.Elem(), dst) + case reflect.Interface: + return n.Get(reflect.ValueOf(src.Interface()), dst) + } + return fmt.Errorf("failed to get all value from %s", src.Type()) +} + +func (n *PathIndexAllNode) String() string { + s := "[*]" + if n.child != nil { + s += n.child.String() + } + return s +} + +type PathRecursiveNode struct { + *BasePathNode + selector string +} + +func newPathRecursiveNode(selector string) *PathRecursiveNode { + node := newPathSelectorNode(selector) + return &PathRecursiveNode{ + BasePathNode: &BasePathNode{ + child: node, + }, + selector: selector, + } +} + +func (n *PathRecursiveNode) Field(fieldName string) (PathNode, bool, error) { + if n.selector == fieldName { + return n.child, true, nil + } + return nil, false, nil +} + +func (n *PathRecursiveNode) Index(_ int) (PathNode, bool, error) { + return n, true, nil +} + +func valueToSliceValue(v interface{}) []interface{} { + rv := reflect.ValueOf(v) + ret := []interface{}{} + if rv.Type().Kind() == reflect.Slice || rv.Type().Kind() == reflect.Array { + for i := 0; i < rv.Len(); i++ { + ret = append(ret, rv.Index(i).Interface()) + } + return ret + } + return []interface{}{v} +} + +func (n *PathRecursiveNode) Get(src, dst reflect.Value) error { + if n.child == nil { + return fmt.Errorf("failed to get by recursive path ..%s", n.selector) + } + var arr []interface{} + switch src.Type().Kind() { + case reflect.Map: + iter := src.MapRange() + for iter.Next() { + key, ok := iter.Key().Interface().(string) + if !ok { + return fmt.Errorf("invalid map key type %T", src.Type().Key()) + } + child, found, err := n.Field(key) + if err != nil { + return err + } + if found { + var v interface{} + rv := reflect.ValueOf(&v) + _ = child.Get(iter.Value(), rv) + arr = append(arr, valueToSliceValue(v)...) + } else { + var v interface{} + rv := reflect.ValueOf(&v) + _ = n.Get(iter.Value(), rv) + if v != nil { + arr = append(arr, valueToSliceValue(v)...) + } + } + } + _ = AssignValue(reflect.ValueOf(arr), dst) + return nil + case reflect.Struct: + typ := src.Type() + for i := 0; i < typ.Len(); i++ { + tag := runtime.StructTagFromField(typ.Field(i)) + child, found, err := n.Field(tag.Key) + if err != nil { + return err + } + if found { + var v interface{} + rv := reflect.ValueOf(&v) + _ = child.Get(src.Field(i), rv) + arr = append(arr, valueToSliceValue(v)...) + } else { + var v interface{} + rv := reflect.ValueOf(&v) + _ = n.Get(src.Field(i), rv) + if v != nil { + arr = append(arr, valueToSliceValue(v)...) + } + } + } + _ = AssignValue(reflect.ValueOf(arr), dst) + return nil + case reflect.Array, reflect.Slice: + for i := 0; i < src.Len(); i++ { + var v interface{} + rv := reflect.ValueOf(&v) + _ = n.Get(src.Index(i), rv) + if v != nil { + arr = append(arr, valueToSliceValue(v)...) + } + } + _ = AssignValue(reflect.ValueOf(arr), dst) + return nil + case reflect.Ptr: + return n.Get(src.Elem(), dst) + case reflect.Interface: + return n.Get(reflect.ValueOf(src.Interface()), dst) + } + return fmt.Errorf("failed to get %s value from %s", n.selector, src.Type()) +} + +func (n *PathRecursiveNode) String() string { + s := fmt.Sprintf("..%s", n.selector) + if n.child != nil { + s += n.child.String() + } + return s +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/ptr.go b/vendor/github.com/goccy/go-json/internal/decoder/ptr.go new file mode 100644 index 0000000..ae22994 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/ptr.go @@ -0,0 +1,97 @@ +package decoder + +import ( + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +type ptrDecoder struct { + dec Decoder + typ *runtime.Type + structName string + fieldName string +} + +func newPtrDecoder(dec Decoder, typ *runtime.Type, structName, fieldName string) *ptrDecoder { + return &ptrDecoder{ + dec: dec, + typ: typ, + structName: structName, + fieldName: fieldName, + } +} + +func (d *ptrDecoder) contentDecoder() Decoder { + dec, ok := d.dec.(*ptrDecoder) + if !ok { + return d.dec + } + return dec.contentDecoder() +} + +//nolint:golint +//go:linkname unsafe_New reflect.unsafe_New +func unsafe_New(*runtime.Type) unsafe.Pointer + +func UnsafeNew(t *runtime.Type) unsafe.Pointer { + return unsafe_New(t) +} + +func (d *ptrDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + if s.skipWhiteSpace() == nul { + s.read() + } + if s.char() == 'n' { + if err := nullBytes(s); err != nil { + return err + } + *(*unsafe.Pointer)(p) = nil + return nil + } + var newptr unsafe.Pointer + if *(*unsafe.Pointer)(p) == nil { + newptr = unsafe_New(d.typ) + *(*unsafe.Pointer)(p) = newptr + } else { + newptr = *(*unsafe.Pointer)(p) + } + if err := d.dec.DecodeStream(s, depth, newptr); err != nil { + return err + } + return nil +} + +func (d *ptrDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == 'n' { + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + if p != nil { + *(*unsafe.Pointer)(p) = nil + } + cursor += 4 + return cursor, nil + } + var newptr unsafe.Pointer + if *(*unsafe.Pointer)(p) == nil { + newptr = unsafe_New(d.typ) + *(*unsafe.Pointer)(p) = newptr + } else { + newptr = *(*unsafe.Pointer)(p) + } + c, err := d.dec.Decode(ctx, cursor, depth, newptr) + if err != nil { + *(*unsafe.Pointer)(p) = nil + return 0, err + } + cursor = c + return cursor, nil +} + +func (d *ptrDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: ptr decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/slice.go b/vendor/github.com/goccy/go-json/internal/decoder/slice.go new file mode 100644 index 0000000..30a23e4 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/slice.go @@ -0,0 +1,380 @@ +package decoder + +import ( + "reflect" + "sync" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +var ( + sliceType = runtime.Type2RType( + reflect.TypeOf((*sliceHeader)(nil)).Elem(), + ) + nilSlice = unsafe.Pointer(&sliceHeader{}) +) + +type sliceDecoder struct { + elemType *runtime.Type + isElemPointerType bool + valueDecoder Decoder + size uintptr + arrayPool sync.Pool + structName string + fieldName string +} + +// If use reflect.SliceHeader, data type is uintptr. +// In this case, Go compiler cannot trace reference created by newArray(). +// So, define using unsafe.Pointer as data type +type sliceHeader struct { + data unsafe.Pointer + len int + cap int +} + +const ( + defaultSliceCapacity = 2 +) + +func newSliceDecoder(dec Decoder, elemType *runtime.Type, size uintptr, structName, fieldName string) *sliceDecoder { + return &sliceDecoder{ + valueDecoder: dec, + elemType: elemType, + isElemPointerType: elemType.Kind() == reflect.Ptr || elemType.Kind() == reflect.Map, + size: size, + arrayPool: sync.Pool{ + New: func() interface{} { + return &sliceHeader{ + data: newArray(elemType, defaultSliceCapacity), + len: 0, + cap: defaultSliceCapacity, + } + }, + }, + structName: structName, + fieldName: fieldName, + } +} + +func (d *sliceDecoder) newSlice(src *sliceHeader) *sliceHeader { + slice := d.arrayPool.Get().(*sliceHeader) + if src.len > 0 { + // copy original elem + if slice.cap < src.cap { + data := newArray(d.elemType, src.cap) + slice = &sliceHeader{data: data, len: src.len, cap: src.cap} + } else { + slice.len = src.len + } + copySlice(d.elemType, *slice, *src) + } else { + slice.len = 0 + } + return slice +} + +func (d *sliceDecoder) releaseSlice(p *sliceHeader) { + d.arrayPool.Put(p) +} + +//go:linkname copySlice reflect.typedslicecopy +func copySlice(elemType *runtime.Type, dst, src sliceHeader) int + +//go:linkname newArray reflect.unsafe_NewArray +func newArray(*runtime.Type, int) unsafe.Pointer + +//go:linkname typedmemmove reflect.typedmemmove +func typedmemmove(t *runtime.Type, dst, src unsafe.Pointer) + +func (d *sliceDecoder) errNumber(offset int64) *errors.UnmarshalTypeError { + return &errors.UnmarshalTypeError{ + Value: "number", + Type: reflect.SliceOf(runtime.RType2Type(d.elemType)), + Struct: d.structName, + Field: d.fieldName, + Offset: offset, + } +} + +func (d *sliceDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + + for { + switch s.char() { + case ' ', '\n', '\t', '\r': + s.cursor++ + continue + case 'n': + if err := nullBytes(s); err != nil { + return err + } + typedmemmove(sliceType, p, nilSlice) + return nil + case '[': + s.cursor++ + if s.skipWhiteSpace() == ']' { + dst := (*sliceHeader)(p) + if dst.data == nil { + dst.data = newArray(d.elemType, 0) + } else { + dst.len = 0 + } + s.cursor++ + return nil + } + idx := 0 + slice := d.newSlice((*sliceHeader)(p)) + srcLen := slice.len + capacity := slice.cap + data := slice.data + for { + if capacity <= idx { + src := sliceHeader{data: data, len: idx, cap: capacity} + capacity *= 2 + data = newArray(d.elemType, capacity) + dst := sliceHeader{data: data, len: idx, cap: capacity} + copySlice(d.elemType, dst, src) + } + ep := unsafe.Pointer(uintptr(data) + uintptr(idx)*d.size) + + // if srcLen is greater than idx, keep the original reference + if srcLen <= idx { + if d.isElemPointerType { + **(**unsafe.Pointer)(unsafe.Pointer(&ep)) = nil // initialize elem pointer + } else { + // assign new element to the slice + typedmemmove(d.elemType, ep, unsafe_New(d.elemType)) + } + } + + if err := d.valueDecoder.DecodeStream(s, depth, ep); err != nil { + return err + } + s.skipWhiteSpace() + RETRY: + switch s.char() { + case ']': + slice.cap = capacity + slice.len = idx + 1 + slice.data = data + dst := (*sliceHeader)(p) + dst.len = idx + 1 + if dst.len > dst.cap { + dst.data = newArray(d.elemType, dst.len) + dst.cap = dst.len + } + copySlice(d.elemType, *dst, *slice) + d.releaseSlice(slice) + s.cursor++ + return nil + case ',': + idx++ + case nul: + if s.read() { + goto RETRY + } + slice.cap = capacity + slice.data = data + d.releaseSlice(slice) + goto ERROR + default: + slice.cap = capacity + slice.data = data + d.releaseSlice(slice) + goto ERROR + } + s.cursor++ + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return d.errNumber(s.totalOffset()) + case nul: + if s.read() { + continue + } + goto ERROR + default: + goto ERROR + } + } +ERROR: + return errors.ErrUnexpectedEndOfJSON("slice", s.totalOffset()) +} + +func (d *sliceDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + + for { + switch buf[cursor] { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case 'n': + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + typedmemmove(sliceType, p, nilSlice) + return cursor, nil + case '[': + cursor++ + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == ']' { + dst := (*sliceHeader)(p) + if dst.data == nil { + dst.data = newArray(d.elemType, 0) + } else { + dst.len = 0 + } + cursor++ + return cursor, nil + } + idx := 0 + slice := d.newSlice((*sliceHeader)(p)) + srcLen := slice.len + capacity := slice.cap + data := slice.data + for { + if capacity <= idx { + src := sliceHeader{data: data, len: idx, cap: capacity} + capacity *= 2 + data = newArray(d.elemType, capacity) + dst := sliceHeader{data: data, len: idx, cap: capacity} + copySlice(d.elemType, dst, src) + } + ep := unsafe.Pointer(uintptr(data) + uintptr(idx)*d.size) + // if srcLen is greater than idx, keep the original reference + if srcLen <= idx { + if d.isElemPointerType { + **(**unsafe.Pointer)(unsafe.Pointer(&ep)) = nil // initialize elem pointer + } else { + // assign new element to the slice + typedmemmove(d.elemType, ep, unsafe_New(d.elemType)) + } + } + c, err := d.valueDecoder.Decode(ctx, cursor, depth, ep) + if err != nil { + return 0, err + } + cursor = c + cursor = skipWhiteSpace(buf, cursor) + switch buf[cursor] { + case ']': + slice.cap = capacity + slice.len = idx + 1 + slice.data = data + dst := (*sliceHeader)(p) + dst.len = idx + 1 + if dst.len > dst.cap { + dst.data = newArray(d.elemType, dst.len) + dst.cap = dst.len + } + copySlice(d.elemType, *dst, *slice) + d.releaseSlice(slice) + cursor++ + return cursor, nil + case ',': + idx++ + default: + slice.cap = capacity + slice.data = data + d.releaseSlice(slice) + return 0, errors.ErrInvalidCharacter(buf[cursor], "slice", cursor) + } + cursor++ + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return 0, d.errNumber(cursor) + default: + return 0, errors.ErrUnexpectedEndOfJSON("slice", cursor) + } + } +} + +func (d *sliceDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + buf := ctx.Buf + depth++ + if depth > maxDecodeNestingDepth { + return nil, 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + + ret := [][]byte{} + for { + switch buf[cursor] { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return [][]byte{nullbytes}, cursor, nil + case '[': + cursor++ + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == ']' { + cursor++ + return ret, cursor, nil + } + idx := 0 + for { + child, found, err := ctx.Option.Path.node.Index(idx) + if err != nil { + return nil, 0, err + } + if found { + if child != nil { + oldPath := ctx.Option.Path.node + ctx.Option.Path.node = child + paths, c, err := d.valueDecoder.DecodePath(ctx, cursor, depth) + if err != nil { + return nil, 0, err + } + ctx.Option.Path.node = oldPath + ret = append(ret, paths...) + cursor = c + } else { + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return nil, 0, err + } + ret = append(ret, buf[start:end]) + cursor = end + } + } else { + c, err := skipValue(buf, cursor, depth) + if err != nil { + return nil, 0, err + } + cursor = c + } + cursor = skipWhiteSpace(buf, cursor) + switch buf[cursor] { + case ']': + cursor++ + return ret, cursor, nil + case ',': + idx++ + default: + return nil, 0, errors.ErrInvalidCharacter(buf[cursor], "slice", cursor) + } + cursor++ + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return nil, 0, d.errNumber(cursor) + default: + return nil, 0, errors.ErrUnexpectedEndOfJSON("slice", cursor) + } + } +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/stream.go b/vendor/github.com/goccy/go-json/internal/decoder/stream.go new file mode 100644 index 0000000..a383f72 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/stream.go @@ -0,0 +1,556 @@ +package decoder + +import ( + "bytes" + "encoding/json" + "io" + "strconv" + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +const ( + initBufSize = 512 +) + +type Stream struct { + buf []byte + bufSize int64 + length int64 + r io.Reader + offset int64 + cursor int64 + filledBuffer bool + allRead bool + UseNumber bool + DisallowUnknownFields bool + Option *Option +} + +func NewStream(r io.Reader) *Stream { + return &Stream{ + r: r, + bufSize: initBufSize, + buf: make([]byte, initBufSize), + Option: &Option{}, + } +} + +func (s *Stream) TotalOffset() int64 { + return s.totalOffset() +} + +func (s *Stream) Buffered() io.Reader { + buflen := int64(len(s.buf)) + for i := s.cursor; i < buflen; i++ { + if s.buf[i] == nul { + return bytes.NewReader(s.buf[s.cursor:i]) + } + } + return bytes.NewReader(s.buf[s.cursor:]) +} + +func (s *Stream) PrepareForDecode() error { + for { + switch s.char() { + case ' ', '\t', '\r', '\n': + s.cursor++ + continue + case ',', ':': + s.cursor++ + return nil + case nul: + if s.read() { + continue + } + return io.EOF + } + break + } + return nil +} + +func (s *Stream) totalOffset() int64 { + return s.offset + s.cursor +} + +func (s *Stream) char() byte { + return s.buf[s.cursor] +} + +func (s *Stream) equalChar(c byte) bool { + cur := s.buf[s.cursor] + if cur == nul { + s.read() + cur = s.buf[s.cursor] + } + return cur == c +} + +func (s *Stream) stat() ([]byte, int64, unsafe.Pointer) { + return s.buf, s.cursor, (*sliceHeader)(unsafe.Pointer(&s.buf)).data +} + +func (s *Stream) bufptr() unsafe.Pointer { + return (*sliceHeader)(unsafe.Pointer(&s.buf)).data +} + +func (s *Stream) statForRetry() ([]byte, int64, unsafe.Pointer) { + s.cursor-- // for retry ( because caller progress cursor position in each loop ) + return s.buf, s.cursor, (*sliceHeader)(unsafe.Pointer(&s.buf)).data +} + +func (s *Stream) Reset() { + s.reset() + s.bufSize = int64(len(s.buf)) +} + +func (s *Stream) More() bool { + for { + switch s.char() { + case ' ', '\n', '\r', '\t': + s.cursor++ + continue + case '}', ']': + return false + case nul: + if s.read() { + continue + } + return false + } + break + } + return true +} + +func (s *Stream) Token() (interface{}, error) { + for { + c := s.char() + switch c { + case ' ', '\n', '\r', '\t': + s.cursor++ + case '{', '[', ']', '}': + s.cursor++ + return json.Delim(c), nil + case ',', ':': + s.cursor++ + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + bytes := floatBytes(s) + str := *(*string)(unsafe.Pointer(&bytes)) + if s.UseNumber { + return json.Number(str), nil + } + f64, err := strconv.ParseFloat(str, 64) + if err != nil { + return nil, err + } + return f64, nil + case '"': + bytes, err := stringBytes(s) + if err != nil { + return nil, err + } + return string(bytes), nil + case 't': + if err := trueBytes(s); err != nil { + return nil, err + } + return true, nil + case 'f': + if err := falseBytes(s); err != nil { + return nil, err + } + return false, nil + case 'n': + if err := nullBytes(s); err != nil { + return nil, err + } + return nil, nil + case nul: + if s.read() { + continue + } + goto END + default: + return nil, errors.ErrInvalidCharacter(s.char(), "token", s.totalOffset()) + } + } +END: + return nil, io.EOF +} + +func (s *Stream) reset() { + s.offset += s.cursor + s.buf = s.buf[s.cursor:] + s.length -= s.cursor + s.cursor = 0 +} + +func (s *Stream) readBuf() []byte { + if s.filledBuffer { + s.bufSize *= 2 + remainBuf := s.buf + s.buf = make([]byte, s.bufSize) + copy(s.buf, remainBuf) + } + remainLen := s.length - s.cursor + remainNotNulCharNum := int64(0) + for i := int64(0); i < remainLen; i++ { + if s.buf[s.cursor+i] == nul { + break + } + remainNotNulCharNum++ + } + s.length = s.cursor + remainNotNulCharNum + return s.buf[s.cursor+remainNotNulCharNum:] +} + +func (s *Stream) read() bool { + if s.allRead { + return false + } + buf := s.readBuf() + last := len(buf) - 1 + buf[last] = nul + n, err := s.r.Read(buf[:last]) + s.length += int64(n) + if n == last { + s.filledBuffer = true + } else { + s.filledBuffer = false + } + if err == io.EOF { + s.allRead = true + } else if err != nil { + return false + } + return true +} + +func (s *Stream) skipWhiteSpace() byte { + p := s.bufptr() +LOOP: + c := char(p, s.cursor) + switch c { + case ' ', '\n', '\t', '\r': + s.cursor++ + goto LOOP + case nul: + if s.read() { + p = s.bufptr() + goto LOOP + } + } + return c +} + +func (s *Stream) skipObject(depth int64) error { + braceCount := 1 + _, cursor, p := s.stat() + for { + switch char(p, cursor) { + case '{': + braceCount++ + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + case '}': + braceCount-- + depth-- + if braceCount == 0 { + s.cursor = cursor + 1 + return nil + } + case '[': + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + case ']': + depth-- + case '"': + for { + cursor++ + switch char(p, cursor) { + case '\\': + cursor++ + if char(p, cursor) == nul { + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + case '"': + goto SWITCH_OUT + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.statForRetry() + continue + } + return errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + } + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return errors.ErrUnexpectedEndOfJSON("object of object", cursor) + } + SWITCH_OUT: + cursor++ + } +} + +func (s *Stream) skipArray(depth int64) error { + bracketCount := 1 + _, cursor, p := s.stat() + for { + switch char(p, cursor) { + case '[': + bracketCount++ + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + case ']': + bracketCount-- + depth-- + if bracketCount == 0 { + s.cursor = cursor + 1 + return nil + } + case '{': + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + case '}': + depth-- + case '"': + for { + cursor++ + switch char(p, cursor) { + case '\\': + cursor++ + if char(p, cursor) == nul { + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + case '"': + goto SWITCH_OUT + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.statForRetry() + continue + } + return errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + } + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return errors.ErrUnexpectedEndOfJSON("array of object", cursor) + } + SWITCH_OUT: + cursor++ + } +} + +func (s *Stream) skipValue(depth int64) error { + _, cursor, p := s.stat() + for { + switch char(p, cursor) { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return errors.ErrUnexpectedEndOfJSON("value of object", s.totalOffset()) + case '{': + s.cursor = cursor + 1 + return s.skipObject(depth + 1) + case '[': + s.cursor = cursor + 1 + return s.skipArray(depth + 1) + case '"': + for { + cursor++ + switch char(p, cursor) { + case '\\': + cursor++ + if char(p, cursor) == nul { + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return errors.ErrUnexpectedEndOfJSON("value of string", s.totalOffset()) + } + case '"': + s.cursor = cursor + 1 + return nil + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.statForRetry() + continue + } + return errors.ErrUnexpectedEndOfJSON("value of string", s.totalOffset()) + } + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + for { + cursor++ + c := char(p, cursor) + if floatTable[c] { + continue + } else if c == nul { + if s.read() { + _, cursor, p = s.stat() + continue + } + } + s.cursor = cursor + return nil + } + case 't': + s.cursor = cursor + if err := trueBytes(s); err != nil { + return err + } + return nil + case 'f': + s.cursor = cursor + if err := falseBytes(s); err != nil { + return err + } + return nil + case 'n': + s.cursor = cursor + if err := nullBytes(s); err != nil { + return err + } + return nil + } + cursor++ + } +} + +func nullBytes(s *Stream) error { + // current cursor's character is 'n' + s.cursor++ + if s.char() != 'u' { + if err := retryReadNull(s); err != nil { + return err + } + } + s.cursor++ + if s.char() != 'l' { + if err := retryReadNull(s); err != nil { + return err + } + } + s.cursor++ + if s.char() != 'l' { + if err := retryReadNull(s); err != nil { + return err + } + } + s.cursor++ + return nil +} + +func retryReadNull(s *Stream) error { + if s.char() == nul && s.read() { + return nil + } + return errors.ErrInvalidCharacter(s.char(), "null", s.totalOffset()) +} + +func trueBytes(s *Stream) error { + // current cursor's character is 't' + s.cursor++ + if s.char() != 'r' { + if err := retryReadTrue(s); err != nil { + return err + } + } + s.cursor++ + if s.char() != 'u' { + if err := retryReadTrue(s); err != nil { + return err + } + } + s.cursor++ + if s.char() != 'e' { + if err := retryReadTrue(s); err != nil { + return err + } + } + s.cursor++ + return nil +} + +func retryReadTrue(s *Stream) error { + if s.char() == nul && s.read() { + return nil + } + return errors.ErrInvalidCharacter(s.char(), "bool(true)", s.totalOffset()) +} + +func falseBytes(s *Stream) error { + // current cursor's character is 'f' + s.cursor++ + if s.char() != 'a' { + if err := retryReadFalse(s); err != nil { + return err + } + } + s.cursor++ + if s.char() != 'l' { + if err := retryReadFalse(s); err != nil { + return err + } + } + s.cursor++ + if s.char() != 's' { + if err := retryReadFalse(s); err != nil { + return err + } + } + s.cursor++ + if s.char() != 'e' { + if err := retryReadFalse(s); err != nil { + return err + } + } + s.cursor++ + return nil +} + +func retryReadFalse(s *Stream) error { + if s.char() == nul && s.read() { + return nil + } + return errors.ErrInvalidCharacter(s.char(), "bool(false)", s.totalOffset()) +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/string.go b/vendor/github.com/goccy/go-json/internal/decoder/string.go new file mode 100644 index 0000000..32602c9 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/string.go @@ -0,0 +1,452 @@ +package decoder + +import ( + "bytes" + "fmt" + "reflect" + "unicode" + "unicode/utf16" + "unicode/utf8" + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +type stringDecoder struct { + structName string + fieldName string +} + +func newStringDecoder(structName, fieldName string) *stringDecoder { + return &stringDecoder{ + structName: structName, + fieldName: fieldName, + } +} + +func (d *stringDecoder) errUnmarshalType(typeName string, offset int64) *errors.UnmarshalTypeError { + return &errors.UnmarshalTypeError{ + Value: typeName, + Type: reflect.TypeOf(""), + Offset: offset, + Struct: d.structName, + Field: d.fieldName, + } +} + +func (d *stringDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + bytes, err := d.decodeStreamByte(s) + if err != nil { + return err + } + if bytes == nil { + return nil + } + **(**string)(unsafe.Pointer(&p)) = *(*string)(unsafe.Pointer(&bytes)) + s.reset() + return nil +} + +func (d *stringDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + bytes, c, err := d.decodeByte(ctx.Buf, cursor) + if err != nil { + return 0, err + } + if bytes == nil { + return c, nil + } + cursor = c + **(**string)(unsafe.Pointer(&p)) = *(*string)(unsafe.Pointer(&bytes)) + return cursor, nil +} + +func (d *stringDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + bytes, c, err := d.decodeByte(ctx.Buf, cursor) + if err != nil { + return nil, 0, err + } + if bytes == nil { + return [][]byte{nullbytes}, c, nil + } + return [][]byte{bytes}, c, nil +} + +var ( + hexToInt = [256]int{ + '0': 0, + '1': 1, + '2': 2, + '3': 3, + '4': 4, + '5': 5, + '6': 6, + '7': 7, + '8': 8, + '9': 9, + 'A': 10, + 'B': 11, + 'C': 12, + 'D': 13, + 'E': 14, + 'F': 15, + 'a': 10, + 'b': 11, + 'c': 12, + 'd': 13, + 'e': 14, + 'f': 15, + } +) + +func unicodeToRune(code []byte) rune { + var r rune + for i := 0; i < len(code); i++ { + r = r*16 + rune(hexToInt[code[i]]) + } + return r +} + +func readAtLeast(s *Stream, n int64, p *unsafe.Pointer) bool { + for s.cursor+n >= s.length { + if !s.read() { + return false + } + *p = s.bufptr() + } + return true +} + +func decodeUnicodeRune(s *Stream, p unsafe.Pointer) (rune, int64, unsafe.Pointer, error) { + const defaultOffset = 5 + const surrogateOffset = 11 + + if !readAtLeast(s, defaultOffset, &p) { + return rune(0), 0, nil, errors.ErrInvalidCharacter(s.char(), "escaped string", s.totalOffset()) + } + + r := unicodeToRune(s.buf[s.cursor+1 : s.cursor+defaultOffset]) + if utf16.IsSurrogate(r) { + if !readAtLeast(s, surrogateOffset, &p) { + return unicode.ReplacementChar, defaultOffset, p, nil + } + if s.buf[s.cursor+defaultOffset] != '\\' || s.buf[s.cursor+defaultOffset+1] != 'u' { + return unicode.ReplacementChar, defaultOffset, p, nil + } + r2 := unicodeToRune(s.buf[s.cursor+defaultOffset+2 : s.cursor+surrogateOffset]) + if r := utf16.DecodeRune(r, r2); r != unicode.ReplacementChar { + return r, surrogateOffset, p, nil + } + } + return r, defaultOffset, p, nil +} + +func decodeUnicode(s *Stream, p unsafe.Pointer) (unsafe.Pointer, error) { + const backSlashAndULen = 2 // length of \u + + r, offset, pp, err := decodeUnicodeRune(s, p) + if err != nil { + return nil, err + } + unicode := []byte(string(r)) + unicodeLen := int64(len(unicode)) + s.buf = append(append(s.buf[:s.cursor-1], unicode...), s.buf[s.cursor+offset:]...) + unicodeOrgLen := offset - 1 + s.length = s.length - (backSlashAndULen + (unicodeOrgLen - unicodeLen)) + s.cursor = s.cursor - backSlashAndULen + unicodeLen + return pp, nil +} + +func decodeEscapeString(s *Stream, p unsafe.Pointer) (unsafe.Pointer, error) { + s.cursor++ +RETRY: + switch s.buf[s.cursor] { + case '"': + s.buf[s.cursor] = '"' + case '\\': + s.buf[s.cursor] = '\\' + case '/': + s.buf[s.cursor] = '/' + case 'b': + s.buf[s.cursor] = '\b' + case 'f': + s.buf[s.cursor] = '\f' + case 'n': + s.buf[s.cursor] = '\n' + case 'r': + s.buf[s.cursor] = '\r' + case 't': + s.buf[s.cursor] = '\t' + case 'u': + return decodeUnicode(s, p) + case nul: + if !s.read() { + return nil, errors.ErrInvalidCharacter(s.char(), "escaped string", s.totalOffset()) + } + p = s.bufptr() + goto RETRY + default: + return nil, errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + } + s.buf = append(s.buf[:s.cursor-1], s.buf[s.cursor:]...) + s.length-- + s.cursor-- + p = s.bufptr() + return p, nil +} + +var ( + runeErrBytes = []byte(string(utf8.RuneError)) + runeErrBytesLen = int64(len(runeErrBytes)) +) + +func stringBytes(s *Stream) ([]byte, error) { + _, cursor, p := s.stat() + cursor++ // skip double quote char + start := cursor + for { + switch char(p, cursor) { + case '\\': + s.cursor = cursor + pp, err := decodeEscapeString(s, p) + if err != nil { + return nil, err + } + p = pp + cursor = s.cursor + case '"': + literal := s.buf[start:cursor] + cursor++ + s.cursor = cursor + return literal, nil + case + // 0x00 is nul, 0x5c is '\\', 0x22 is '"' . + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, // 0x00-0x0F + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, // 0x10-0x1F + 0x20, 0x21 /*0x22,*/, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, // 0x20-0x2F + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F, // 0x30-0x3F + 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, // 0x40-0x4F + 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B /*0x5C,*/, 0x5D, 0x5E, 0x5F, // 0x50-0x5F + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, // 0x60-0x6F + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F: // 0x70-0x7F + // character is ASCII. skip to next char + case + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F, // 0x80-0x8F + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9E, 0x9F, // 0x90-0x9F + 0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF, // 0xA0-0xAF + 0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF, // 0xB0-0xBF + 0xC0, 0xC1, // 0xC0-0xC1 + 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF: // 0xF5-0xFE + // character is invalid + s.buf = append(append(append([]byte{}, s.buf[:cursor]...), runeErrBytes...), s.buf[cursor+1:]...) + _, _, p = s.stat() + cursor += runeErrBytesLen + s.length += runeErrBytesLen + continue + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + goto ERROR + case 0xEF: + // RuneError is {0xEF, 0xBF, 0xBD} + if s.buf[cursor+1] == 0xBF && s.buf[cursor+2] == 0xBD { + // found RuneError: skip + cursor += 2 + break + } + fallthrough + default: + // multi bytes character + if !utf8.FullRune(s.buf[cursor : len(s.buf)-1]) { + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + goto ERROR + } + r, size := utf8.DecodeRune(s.buf[cursor:]) + if r == utf8.RuneError { + s.buf = append(append(append([]byte{}, s.buf[:cursor]...), runeErrBytes...), s.buf[cursor+1:]...) + cursor += runeErrBytesLen + s.length += runeErrBytesLen + _, _, p = s.stat() + } else { + cursor += int64(size) + } + continue + } + cursor++ + } +ERROR: + return nil, errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) +} + +func (d *stringDecoder) decodeStreamByte(s *Stream) ([]byte, error) { + for { + switch s.char() { + case ' ', '\n', '\t', '\r': + s.cursor++ + continue + case '[': + return nil, d.errUnmarshalType("array", s.totalOffset()) + case '{': + return nil, d.errUnmarshalType("object", s.totalOffset()) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return nil, d.errUnmarshalType("number", s.totalOffset()) + case '"': + return stringBytes(s) + case 'n': + if err := nullBytes(s); err != nil { + return nil, err + } + return nil, nil + case nul: + if s.read() { + continue + } + } + break + } + return nil, errors.ErrInvalidBeginningOfValue(s.char(), s.totalOffset()) +} + +func (d *stringDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error) { + for { + switch buf[cursor] { + case ' ', '\n', '\t', '\r': + cursor++ + case '[': + return nil, 0, d.errUnmarshalType("array", cursor) + case '{': + return nil, 0, d.errUnmarshalType("object", cursor) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return nil, 0, d.errUnmarshalType("number", cursor) + case '"': + cursor++ + start := cursor + b := (*sliceHeader)(unsafe.Pointer(&buf)).data + escaped := 0 + for { + switch char(b, cursor) { + case '\\': + escaped++ + cursor++ + switch char(b, cursor) { + case '"', '\\', '/', 'b', 'f', 'n', 'r', 't': + cursor++ + case 'u': + buflen := int64(len(buf)) + if cursor+5 >= buflen { + return nil, 0, errors.ErrUnexpectedEndOfJSON("escaped string", cursor) + } + for i := int64(1); i <= 4; i++ { + c := char(b, cursor+i) + if !(('0' <= c && c <= '9') || ('a' <= c && c <= 'f') || ('A' <= c && c <= 'F')) { + return nil, 0, errors.ErrSyntax(fmt.Sprintf("json: invalid character %c in \\u hexadecimal character escape", c), cursor+i) + } + } + cursor += 5 + default: + return nil, 0, errors.ErrUnexpectedEndOfJSON("escaped string", cursor) + } + continue + case '"': + literal := buf[start:cursor] + if escaped > 0 { + literal = literal[:unescapeString(literal)] + } + cursor++ + return literal, cursor, nil + case nul: + return nil, 0, errors.ErrUnexpectedEndOfJSON("string", cursor) + } + cursor++ + } + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return nil, cursor, nil + default: + return nil, 0, errors.ErrInvalidBeginningOfValue(buf[cursor], cursor) + } + } +} + +var unescapeMap = [256]byte{ + '"': '"', + '\\': '\\', + '/': '/', + 'b': '\b', + 'f': '\f', + 'n': '\n', + 'r': '\r', + 't': '\t', +} + +func unsafeAdd(ptr unsafe.Pointer, offset int) unsafe.Pointer { + return unsafe.Pointer(uintptr(ptr) + uintptr(offset)) +} + +func unescapeString(buf []byte) int { + p := (*sliceHeader)(unsafe.Pointer(&buf)).data + end := unsafeAdd(p, len(buf)) + src := unsafeAdd(p, bytes.IndexByte(buf, '\\')) + dst := src + for src != end { + c := char(src, 0) + if c == '\\' { + escapeChar := char(src, 1) + if escapeChar != 'u' { + *(*byte)(dst) = unescapeMap[escapeChar] + src = unsafeAdd(src, 2) + dst = unsafeAdd(dst, 1) + } else { + v1 := hexToInt[char(src, 2)] + v2 := hexToInt[char(src, 3)] + v3 := hexToInt[char(src, 4)] + v4 := hexToInt[char(src, 5)] + code := rune((v1 << 12) | (v2 << 8) | (v3 << 4) | v4) + if code >= 0xd800 && code < 0xdc00 && uintptr(unsafeAdd(src, 11)) < uintptr(end) { + if char(src, 6) == '\\' && char(src, 7) == 'u' { + v1 := hexToInt[char(src, 8)] + v2 := hexToInt[char(src, 9)] + v3 := hexToInt[char(src, 10)] + v4 := hexToInt[char(src, 11)] + lo := rune((v1 << 12) | (v2 << 8) | (v3 << 4) | v4) + if lo >= 0xdc00 && lo < 0xe000 { + code = (code-0xd800)<<10 | (lo - 0xdc00) + 0x10000 + src = unsafeAdd(src, 6) + } + } + } + var b [utf8.UTFMax]byte + n := utf8.EncodeRune(b[:], code) + switch n { + case 4: + *(*byte)(unsafeAdd(dst, 3)) = b[3] + fallthrough + case 3: + *(*byte)(unsafeAdd(dst, 2)) = b[2] + fallthrough + case 2: + *(*byte)(unsafeAdd(dst, 1)) = b[1] + fallthrough + case 1: + *(*byte)(unsafeAdd(dst, 0)) = b[0] + } + src = unsafeAdd(src, 6) + dst = unsafeAdd(dst, n) + } + } else { + *(*byte)(dst) = c + src = unsafeAdd(src, 1) + dst = unsafeAdd(dst, 1) + } + } + return int(uintptr(dst) - uintptr(p)) +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/struct.go b/vendor/github.com/goccy/go-json/internal/decoder/struct.go new file mode 100644 index 0000000..313da15 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/struct.go @@ -0,0 +1,845 @@ +package decoder + +import ( + "fmt" + "math" + "math/bits" + "sort" + "strings" + "unicode" + "unicode/utf16" + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +type structFieldSet struct { + dec Decoder + offset uintptr + isTaggedKey bool + fieldIdx int + key string + keyLen int64 + err error +} + +type structDecoder struct { + fieldMap map[string]*structFieldSet + fieldUniqueNameNum int + stringDecoder *stringDecoder + structName string + fieldName string + isTriedOptimize bool + keyBitmapUint8 [][256]uint8 + keyBitmapUint16 [][256]uint16 + sortedFieldSets []*structFieldSet + keyDecoder func(*structDecoder, []byte, int64) (int64, *structFieldSet, error) + keyStreamDecoder func(*structDecoder, *Stream) (*structFieldSet, string, error) +} + +var ( + largeToSmallTable [256]byte +) + +func init() { + for i := 0; i < 256; i++ { + c := i + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } + largeToSmallTable[i] = byte(c) + } +} + +func toASCIILower(s string) string { + b := []byte(s) + for i := range b { + b[i] = largeToSmallTable[b[i]] + } + return string(b) +} + +func newStructDecoder(structName, fieldName string, fieldMap map[string]*structFieldSet) *structDecoder { + return &structDecoder{ + fieldMap: fieldMap, + stringDecoder: newStringDecoder(structName, fieldName), + structName: structName, + fieldName: fieldName, + keyDecoder: decodeKey, + keyStreamDecoder: decodeKeyStream, + } +} + +const ( + allowOptimizeMaxKeyLen = 64 + allowOptimizeMaxFieldLen = 16 +) + +func (d *structDecoder) tryOptimize() { + fieldUniqueNameMap := map[string]int{} + fieldIdx := -1 + for k, v := range d.fieldMap { + lower := strings.ToLower(k) + idx, exists := fieldUniqueNameMap[lower] + if exists { + v.fieldIdx = idx + } else { + fieldIdx++ + v.fieldIdx = fieldIdx + } + fieldUniqueNameMap[lower] = fieldIdx + } + d.fieldUniqueNameNum = len(fieldUniqueNameMap) + + if d.isTriedOptimize { + return + } + fieldMap := map[string]*structFieldSet{} + conflicted := map[string]struct{}{} + for k, v := range d.fieldMap { + key := strings.ToLower(k) + if key != k { + if key != toASCIILower(k) { + d.isTriedOptimize = true + return + } + // already exists same key (e.g. Hello and HELLO has same lower case key + if _, exists := conflicted[key]; exists { + d.isTriedOptimize = true + return + } + conflicted[key] = struct{}{} + } + if field, exists := fieldMap[key]; exists { + if field != v { + d.isTriedOptimize = true + return + } + } + fieldMap[key] = v + } + + if len(fieldMap) > allowOptimizeMaxFieldLen { + d.isTriedOptimize = true + return + } + + var maxKeyLen int + sortedKeys := []string{} + for key := range fieldMap { + keyLen := len(key) + if keyLen > allowOptimizeMaxKeyLen { + d.isTriedOptimize = true + return + } + if maxKeyLen < keyLen { + maxKeyLen = keyLen + } + sortedKeys = append(sortedKeys, key) + } + sort.Strings(sortedKeys) + + // By allocating one extra capacity than `maxKeyLen`, + // it is possible to avoid the process of comparing the index of the key with the length of the bitmap each time. + bitmapLen := maxKeyLen + 1 + if len(sortedKeys) <= 8 { + keyBitmap := make([][256]uint8, bitmapLen) + for i, key := range sortedKeys { + for j := 0; j < len(key); j++ { + c := key[j] + keyBitmap[j][c] |= (1 << uint(i)) + } + d.sortedFieldSets = append(d.sortedFieldSets, fieldMap[key]) + } + d.keyBitmapUint8 = keyBitmap + d.keyDecoder = decodeKeyByBitmapUint8 + d.keyStreamDecoder = decodeKeyByBitmapUint8Stream + } else { + keyBitmap := make([][256]uint16, bitmapLen) + for i, key := range sortedKeys { + for j := 0; j < len(key); j++ { + c := key[j] + keyBitmap[j][c] |= (1 << uint(i)) + } + d.sortedFieldSets = append(d.sortedFieldSets, fieldMap[key]) + } + d.keyBitmapUint16 = keyBitmap + d.keyDecoder = decodeKeyByBitmapUint16 + d.keyStreamDecoder = decodeKeyByBitmapUint16Stream + } +} + +// decode from '\uXXXX' +func decodeKeyCharByUnicodeRune(buf []byte, cursor int64) ([]byte, int64, error) { + const defaultOffset = 4 + const surrogateOffset = 6 + + if cursor+defaultOffset >= int64(len(buf)) { + return nil, 0, errors.ErrUnexpectedEndOfJSON("escaped string", cursor) + } + + r := unicodeToRune(buf[cursor : cursor+defaultOffset]) + if utf16.IsSurrogate(r) { + cursor += defaultOffset + if cursor+surrogateOffset >= int64(len(buf)) || buf[cursor] != '\\' || buf[cursor+1] != 'u' { + return []byte(string(unicode.ReplacementChar)), cursor + defaultOffset - 1, nil + } + cursor += 2 + r2 := unicodeToRune(buf[cursor : cursor+defaultOffset]) + if r := utf16.DecodeRune(r, r2); r != unicode.ReplacementChar { + return []byte(string(r)), cursor + defaultOffset - 1, nil + } + } + return []byte(string(r)), cursor + defaultOffset - 1, nil +} + +func decodeKeyCharByEscapedChar(buf []byte, cursor int64) ([]byte, int64, error) { + c := buf[cursor] + cursor++ + switch c { + case '"': + return []byte{'"'}, cursor, nil + case '\\': + return []byte{'\\'}, cursor, nil + case '/': + return []byte{'/'}, cursor, nil + case 'b': + return []byte{'\b'}, cursor, nil + case 'f': + return []byte{'\f'}, cursor, nil + case 'n': + return []byte{'\n'}, cursor, nil + case 'r': + return []byte{'\r'}, cursor, nil + case 't': + return []byte{'\t'}, cursor, nil + case 'u': + return decodeKeyCharByUnicodeRune(buf, cursor) + } + return nil, cursor, nil +} + +func decodeKeyByBitmapUint8(d *structDecoder, buf []byte, cursor int64) (int64, *structFieldSet, error) { + var ( + curBit uint8 = math.MaxUint8 + ) + b := (*sliceHeader)(unsafe.Pointer(&buf)).data + for { + switch char(b, cursor) { + case ' ', '\n', '\t', '\r': + cursor++ + case '"': + cursor++ + c := char(b, cursor) + switch c { + case '"': + cursor++ + return cursor, nil, nil + case nul: + return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) + } + keyIdx := 0 + bitmap := d.keyBitmapUint8 + start := cursor + for { + c := char(b, cursor) + switch c { + case '"': + fieldSetIndex := bits.TrailingZeros8(curBit) + field := d.sortedFieldSets[fieldSetIndex] + keyLen := cursor - start + cursor++ + if keyLen < field.keyLen { + // early match + return cursor, nil, nil + } + return cursor, field, nil + case nul: + return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) + case '\\': + cursor++ + chars, nextCursor, err := decodeKeyCharByEscapedChar(buf, cursor) + if err != nil { + return 0, nil, err + } + for _, c := range chars { + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + return decodeKeyNotFound(b, cursor) + } + keyIdx++ + } + cursor = nextCursor + default: + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + return decodeKeyNotFound(b, cursor) + } + keyIdx++ + } + cursor++ + } + default: + return cursor, nil, errors.ErrInvalidBeginningOfValue(char(b, cursor), cursor) + } + } +} + +func decodeKeyByBitmapUint16(d *structDecoder, buf []byte, cursor int64) (int64, *structFieldSet, error) { + var ( + curBit uint16 = math.MaxUint16 + ) + b := (*sliceHeader)(unsafe.Pointer(&buf)).data + for { + switch char(b, cursor) { + case ' ', '\n', '\t', '\r': + cursor++ + case '"': + cursor++ + c := char(b, cursor) + switch c { + case '"': + cursor++ + return cursor, nil, nil + case nul: + return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) + } + keyIdx := 0 + bitmap := d.keyBitmapUint16 + start := cursor + for { + c := char(b, cursor) + switch c { + case '"': + fieldSetIndex := bits.TrailingZeros16(curBit) + field := d.sortedFieldSets[fieldSetIndex] + keyLen := cursor - start + cursor++ + if keyLen < field.keyLen { + // early match + return cursor, nil, nil + } + return cursor, field, nil + case nul: + return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) + case '\\': + cursor++ + chars, nextCursor, err := decodeKeyCharByEscapedChar(buf, cursor) + if err != nil { + return 0, nil, err + } + for _, c := range chars { + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + return decodeKeyNotFound(b, cursor) + } + keyIdx++ + } + cursor = nextCursor + default: + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + return decodeKeyNotFound(b, cursor) + } + keyIdx++ + } + cursor++ + } + default: + return cursor, nil, errors.ErrInvalidBeginningOfValue(char(b, cursor), cursor) + } + } +} + +func decodeKeyNotFound(b unsafe.Pointer, cursor int64) (int64, *structFieldSet, error) { + for { + cursor++ + switch char(b, cursor) { + case '"': + cursor++ + return cursor, nil, nil + case '\\': + cursor++ + if char(b, cursor) == nul { + return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) + } + case nul: + return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) + } + } +} + +func decodeKey(d *structDecoder, buf []byte, cursor int64) (int64, *structFieldSet, error) { + key, c, err := d.stringDecoder.decodeByte(buf, cursor) + if err != nil { + return 0, nil, err + } + cursor = c + k := *(*string)(unsafe.Pointer(&key)) + field, exists := d.fieldMap[k] + if !exists { + return cursor, nil, nil + } + return cursor, field, nil +} + +func decodeKeyByBitmapUint8Stream(d *structDecoder, s *Stream) (*structFieldSet, string, error) { + var ( + curBit uint8 = math.MaxUint8 + ) + _, cursor, p := s.stat() + for { + switch char(p, cursor) { + case ' ', '\n', '\t', '\r': + cursor++ + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return nil, "", errors.ErrInvalidBeginningOfValue(char(p, cursor), s.totalOffset()) + case '"': + cursor++ + FIRST_CHAR: + start := cursor + switch char(p, cursor) { + case '"': + cursor++ + s.cursor = cursor + return nil, "", nil + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + goto FIRST_CHAR + } + return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + } + keyIdx := 0 + bitmap := d.keyBitmapUint8 + for { + c := char(p, cursor) + switch c { + case '"': + fieldSetIndex := bits.TrailingZeros8(curBit) + field := d.sortedFieldSets[fieldSetIndex] + keyLen := cursor - start + cursor++ + s.cursor = cursor + if keyLen < field.keyLen { + // early match + return nil, field.key, nil + } + return field, field.key, nil + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + case '\\': + s.cursor = cursor + 1 // skip '\' char + chars, err := decodeKeyCharByEscapeCharStream(s) + if err != nil { + return nil, "", err + } + cursor = s.cursor + for _, c := range chars { + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + s.cursor = cursor + return decodeKeyNotFoundStream(s, start) + } + keyIdx++ + } + default: + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + s.cursor = cursor + return decodeKeyNotFoundStream(s, start) + } + keyIdx++ + } + cursor++ + } + default: + return nil, "", errors.ErrInvalidBeginningOfValue(char(p, cursor), s.totalOffset()) + } + } +} + +func decodeKeyByBitmapUint16Stream(d *structDecoder, s *Stream) (*structFieldSet, string, error) { + var ( + curBit uint16 = math.MaxUint16 + ) + _, cursor, p := s.stat() + for { + switch char(p, cursor) { + case ' ', '\n', '\t', '\r': + cursor++ + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return nil, "", errors.ErrInvalidBeginningOfValue(char(p, cursor), s.totalOffset()) + case '"': + cursor++ + FIRST_CHAR: + start := cursor + switch char(p, cursor) { + case '"': + cursor++ + s.cursor = cursor + return nil, "", nil + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + goto FIRST_CHAR + } + return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + } + keyIdx := 0 + bitmap := d.keyBitmapUint16 + for { + c := char(p, cursor) + switch c { + case '"': + fieldSetIndex := bits.TrailingZeros16(curBit) + field := d.sortedFieldSets[fieldSetIndex] + keyLen := cursor - start + cursor++ + s.cursor = cursor + if keyLen < field.keyLen { + // early match + return nil, field.key, nil + } + return field, field.key, nil + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + case '\\': + s.cursor = cursor + 1 // skip '\' char + chars, err := decodeKeyCharByEscapeCharStream(s) + if err != nil { + return nil, "", err + } + cursor = s.cursor + for _, c := range chars { + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + s.cursor = cursor + return decodeKeyNotFoundStream(s, start) + } + keyIdx++ + } + default: + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + s.cursor = cursor + return decodeKeyNotFoundStream(s, start) + } + keyIdx++ + } + cursor++ + } + default: + return nil, "", errors.ErrInvalidBeginningOfValue(char(p, cursor), s.totalOffset()) + } + } +} + +// decode from '\uXXXX' +func decodeKeyCharByUnicodeRuneStream(s *Stream) ([]byte, error) { + const defaultOffset = 4 + const surrogateOffset = 6 + + if s.cursor+defaultOffset >= s.length { + if !s.read() { + return nil, errors.ErrInvalidCharacter(s.char(), "escaped unicode char", s.totalOffset()) + } + } + + r := unicodeToRune(s.buf[s.cursor : s.cursor+defaultOffset]) + if utf16.IsSurrogate(r) { + s.cursor += defaultOffset + if s.cursor+surrogateOffset >= s.length { + s.read() + } + if s.cursor+surrogateOffset >= s.length || s.buf[s.cursor] != '\\' || s.buf[s.cursor+1] != 'u' { + s.cursor += defaultOffset - 1 + return []byte(string(unicode.ReplacementChar)), nil + } + r2 := unicodeToRune(s.buf[s.cursor+defaultOffset+2 : s.cursor+surrogateOffset]) + if r := utf16.DecodeRune(r, r2); r != unicode.ReplacementChar { + s.cursor += defaultOffset - 1 + return []byte(string(r)), nil + } + } + s.cursor += defaultOffset - 1 + return []byte(string(r)), nil +} + +func decodeKeyCharByEscapeCharStream(s *Stream) ([]byte, error) { + c := s.buf[s.cursor] + s.cursor++ +RETRY: + switch c { + case '"': + return []byte{'"'}, nil + case '\\': + return []byte{'\\'}, nil + case '/': + return []byte{'/'}, nil + case 'b': + return []byte{'\b'}, nil + case 'f': + return []byte{'\f'}, nil + case 'n': + return []byte{'\n'}, nil + case 'r': + return []byte{'\r'}, nil + case 't': + return []byte{'\t'}, nil + case 'u': + return decodeKeyCharByUnicodeRuneStream(s) + case nul: + if !s.read() { + return nil, errors.ErrInvalidCharacter(s.char(), "escaped char", s.totalOffset()) + } + goto RETRY + default: + return nil, errors.ErrUnexpectedEndOfJSON("struct field", s.totalOffset()) + } +} + +func decodeKeyNotFoundStream(s *Stream, start int64) (*structFieldSet, string, error) { + buf, cursor, p := s.stat() + for { + cursor++ + switch char(p, cursor) { + case '"': + b := buf[start:cursor] + key := *(*string)(unsafe.Pointer(&b)) + cursor++ + s.cursor = cursor + return nil, key, nil + case '\\': + cursor++ + if char(p, cursor) == nul { + s.cursor = cursor + if !s.read() { + return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + } + buf, cursor, p = s.statForRetry() + } + case nul: + s.cursor = cursor + if !s.read() { + return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + } + buf, cursor, p = s.statForRetry() + } + } +} + +func decodeKeyStream(d *structDecoder, s *Stream) (*structFieldSet, string, error) { + key, err := d.stringDecoder.decodeStreamByte(s) + if err != nil { + return nil, "", err + } + k := *(*string)(unsafe.Pointer(&key)) + return d.fieldMap[k], k, nil +} + +func (d *structDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + + c := s.skipWhiteSpace() + switch c { + case 'n': + if err := nullBytes(s); err != nil { + return err + } + return nil + default: + if s.char() != '{' { + return errors.ErrInvalidBeginningOfValue(s.char(), s.totalOffset()) + } + } + s.cursor++ + if s.skipWhiteSpace() == '}' { + s.cursor++ + return nil + } + var ( + seenFields map[int]struct{} + seenFieldNum int + ) + firstWin := (s.Option.Flags & FirstWinOption) != 0 + if firstWin { + seenFields = make(map[int]struct{}, d.fieldUniqueNameNum) + } + for { + s.reset() + field, key, err := d.keyStreamDecoder(d, s) + if err != nil { + return err + } + if s.skipWhiteSpace() != ':' { + return errors.ErrExpected("colon after object key", s.totalOffset()) + } + s.cursor++ + if field != nil { + if field.err != nil { + return field.err + } + if firstWin { + if _, exists := seenFields[field.fieldIdx]; exists { + if err := s.skipValue(depth); err != nil { + return err + } + } else { + if err := field.dec.DecodeStream(s, depth, unsafe.Pointer(uintptr(p)+field.offset)); err != nil { + return err + } + seenFieldNum++ + if d.fieldUniqueNameNum <= seenFieldNum { + return s.skipObject(depth) + } + seenFields[field.fieldIdx] = struct{}{} + } + } else { + if err := field.dec.DecodeStream(s, depth, unsafe.Pointer(uintptr(p)+field.offset)); err != nil { + return err + } + } + } else if s.DisallowUnknownFields { + return fmt.Errorf("json: unknown field %q", key) + } else { + if err := s.skipValue(depth); err != nil { + return err + } + } + c := s.skipWhiteSpace() + if c == '}' { + s.cursor++ + return nil + } + if c != ',' { + return errors.ErrExpected("comma after object element", s.totalOffset()) + } + s.cursor++ + } +} + +func (d *structDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + buflen := int64(len(buf)) + cursor = skipWhiteSpace(buf, cursor) + b := (*sliceHeader)(unsafe.Pointer(&buf)).data + switch char(b, cursor) { + case 'n': + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + return cursor, nil + case '{': + default: + return 0, errors.ErrInvalidBeginningOfValue(char(b, cursor), cursor) + } + cursor++ + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == '}' { + cursor++ + return cursor, nil + } + var ( + seenFields map[int]struct{} + seenFieldNum int + ) + firstWin := (ctx.Option.Flags & FirstWinOption) != 0 + if firstWin { + seenFields = make(map[int]struct{}, d.fieldUniqueNameNum) + } + for { + c, field, err := d.keyDecoder(d, buf, cursor) + if err != nil { + return 0, err + } + cursor = skipWhiteSpace(buf, c) + if char(b, cursor) != ':' { + return 0, errors.ErrExpected("colon after object key", cursor) + } + cursor++ + if cursor >= buflen { + return 0, errors.ErrExpected("object value after colon", cursor) + } + if field != nil { + if field.err != nil { + return 0, field.err + } + if firstWin { + if _, exists := seenFields[field.fieldIdx]; exists { + c, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + cursor = c + } else { + c, err := field.dec.Decode(ctx, cursor, depth, unsafe.Pointer(uintptr(p)+field.offset)) + if err != nil { + return 0, err + } + cursor = c + seenFieldNum++ + if d.fieldUniqueNameNum <= seenFieldNum { + return skipObject(buf, cursor, depth) + } + seenFields[field.fieldIdx] = struct{}{} + } + } else { + c, err := field.dec.Decode(ctx, cursor, depth, unsafe.Pointer(uintptr(p)+field.offset)) + if err != nil { + return 0, err + } + cursor = c + } + } else { + c, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + cursor = c + } + cursor = skipWhiteSpace(buf, cursor) + if char(b, cursor) == '}' { + cursor++ + return cursor, nil + } + if char(b, cursor) != ',' { + return 0, errors.ErrExpected("comma after object element", cursor) + } + cursor++ + } +} + +func (d *structDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: struct decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/type.go b/vendor/github.com/goccy/go-json/internal/decoder/type.go new file mode 100644 index 0000000..beaf3ab --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/type.go @@ -0,0 +1,30 @@ +package decoder + +import ( + "context" + "encoding" + "encoding/json" + "reflect" + "unsafe" +) + +type Decoder interface { + Decode(*RuntimeContext, int64, int64, unsafe.Pointer) (int64, error) + DecodePath(*RuntimeContext, int64, int64) ([][]byte, int64, error) + DecodeStream(*Stream, int64, unsafe.Pointer) error +} + +const ( + nul = '\000' + maxDecodeNestingDepth = 10000 +) + +type unmarshalerContext interface { + UnmarshalJSON(context.Context, []byte) error +} + +var ( + unmarshalJSONType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + unmarshalJSONContextType = reflect.TypeOf((*unmarshalerContext)(nil)).Elem() + unmarshalTextType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) diff --git a/vendor/github.com/goccy/go-json/internal/decoder/uint.go b/vendor/github.com/goccy/go-json/internal/decoder/uint.go new file mode 100644 index 0000000..4131731 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/uint.go @@ -0,0 +1,194 @@ +package decoder + +import ( + "fmt" + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type uintDecoder struct { + typ *runtime.Type + kind reflect.Kind + op func(unsafe.Pointer, uint64) + structName string + fieldName string +} + +func newUintDecoder(typ *runtime.Type, structName, fieldName string, op func(unsafe.Pointer, uint64)) *uintDecoder { + return &uintDecoder{ + typ: typ, + kind: typ.Kind(), + op: op, + structName: structName, + fieldName: fieldName, + } +} + +func (d *uintDecoder) typeError(buf []byte, offset int64) *errors.UnmarshalTypeError { + return &errors.UnmarshalTypeError{ + Value: fmt.Sprintf("number %s", string(buf)), + Type: runtime.RType2Type(d.typ), + Offset: offset, + } +} + +var ( + pow10u64 = [...]uint64{ + 1e00, 1e01, 1e02, 1e03, 1e04, 1e05, 1e06, 1e07, 1e08, 1e09, + 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, + } + pow10u64Len = len(pow10u64) +) + +func (d *uintDecoder) parseUint(b []byte) (uint64, error) { + maxDigit := len(b) + if maxDigit > pow10u64Len { + return 0, fmt.Errorf("invalid length of number") + } + sum := uint64(0) + for i := 0; i < maxDigit; i++ { + c := uint64(b[i]) - 48 + digitValue := pow10u64[maxDigit-i-1] + sum += c * digitValue + } + return sum, nil +} + +func (d *uintDecoder) decodeStreamByte(s *Stream) ([]byte, error) { + for { + switch s.char() { + case ' ', '\n', '\t', '\r': + s.cursor++ + continue + case '0': + s.cursor++ + return numZeroBuf, nil + case '1', '2', '3', '4', '5', '6', '7', '8', '9': + start := s.cursor + for { + s.cursor++ + if numTable[s.char()] { + continue + } else if s.char() == nul { + if s.read() { + s.cursor-- // for retry current character + continue + } + } + break + } + num := s.buf[start:s.cursor] + return num, nil + case 'n': + if err := nullBytes(s); err != nil { + return nil, err + } + return nil, nil + case nul: + if s.read() { + continue + } + default: + return nil, d.typeError([]byte{s.char()}, s.totalOffset()) + } + break + } + return nil, errors.ErrUnexpectedEndOfJSON("number(unsigned integer)", s.totalOffset()) +} + +func (d *uintDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error) { + for { + switch buf[cursor] { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case '0': + cursor++ + return numZeroBuf, cursor, nil + case '1', '2', '3', '4', '5', '6', '7', '8', '9': + start := cursor + cursor++ + for numTable[buf[cursor]] { + cursor++ + } + num := buf[start:cursor] + return num, cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return nil, cursor, nil + default: + return nil, 0, d.typeError([]byte{buf[cursor]}, cursor) + } + } +} + +func (d *uintDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + bytes, err := d.decodeStreamByte(s) + if err != nil { + return err + } + if bytes == nil { + return nil + } + u64, err := d.parseUint(bytes) + if err != nil { + return d.typeError(bytes, s.totalOffset()) + } + switch d.kind { + case reflect.Uint8: + if (1 << 8) <= u64 { + return d.typeError(bytes, s.totalOffset()) + } + case reflect.Uint16: + if (1 << 16) <= u64 { + return d.typeError(bytes, s.totalOffset()) + } + case reflect.Uint32: + if (1 << 32) <= u64 { + return d.typeError(bytes, s.totalOffset()) + } + } + d.op(p, u64) + return nil +} + +func (d *uintDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + bytes, c, err := d.decodeByte(ctx.Buf, cursor) + if err != nil { + return 0, err + } + if bytes == nil { + return c, nil + } + cursor = c + u64, err := d.parseUint(bytes) + if err != nil { + return 0, d.typeError(bytes, cursor) + } + switch d.kind { + case reflect.Uint8: + if (1 << 8) <= u64 { + return 0, d.typeError(bytes, cursor) + } + case reflect.Uint16: + if (1 << 16) <= u64 { + return 0, d.typeError(bytes, cursor) + } + case reflect.Uint32: + if (1 << 32) <= u64 { + return 0, d.typeError(bytes, cursor) + } + } + d.op(p, u64) + return cursor, nil +} + +func (d *uintDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: uint decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_json.go b/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_json.go new file mode 100644 index 0000000..4cd6dbd --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_json.go @@ -0,0 +1,104 @@ +package decoder + +import ( + "context" + "encoding/json" + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type unmarshalJSONDecoder struct { + typ *runtime.Type + structName string + fieldName string +} + +func newUnmarshalJSONDecoder(typ *runtime.Type, structName, fieldName string) *unmarshalJSONDecoder { + return &unmarshalJSONDecoder{ + typ: typ, + structName: structName, + fieldName: fieldName, + } +} + +func (d *unmarshalJSONDecoder) annotateError(cursor int64, err error) { + switch e := err.(type) { + case *errors.UnmarshalTypeError: + e.Struct = d.structName + e.Field = d.fieldName + case *errors.SyntaxError: + e.Offset = cursor + } +} + +func (d *unmarshalJSONDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + s.skipWhiteSpace() + start := s.cursor + if err := s.skipValue(depth); err != nil { + return err + } + src := s.buf[start:s.cursor] + dst := make([]byte, len(src)) + copy(dst, src) + + v := *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: d.typ, + ptr: p, + })) + switch v := v.(type) { + case unmarshalerContext: + var ctx context.Context + if (s.Option.Flags & ContextOption) != 0 { + ctx = s.Option.Context + } else { + ctx = context.Background() + } + if err := v.UnmarshalJSON(ctx, dst); err != nil { + d.annotateError(s.cursor, err) + return err + } + case json.Unmarshaler: + if err := v.UnmarshalJSON(dst); err != nil { + d.annotateError(s.cursor, err) + return err + } + } + return nil +} + +func (d *unmarshalJSONDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + src := buf[start:end] + dst := make([]byte, len(src)) + copy(dst, src) + + v := *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: d.typ, + ptr: p, + })) + if (ctx.Option.Flags & ContextOption) != 0 { + if err := v.(unmarshalerContext).UnmarshalJSON(ctx.Option.Context, dst); err != nil { + d.annotateError(cursor, err) + return 0, err + } + } else { + if err := v.(json.Unmarshaler).UnmarshalJSON(dst); err != nil { + d.annotateError(cursor, err) + return 0, err + } + } + return end, nil +} + +func (d *unmarshalJSONDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: unmarshal json decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_text.go b/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_text.go new file mode 100644 index 0000000..d711d0f --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_text.go @@ -0,0 +1,285 @@ +package decoder + +import ( + "bytes" + "encoding" + "fmt" + "unicode" + "unicode/utf16" + "unicode/utf8" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type unmarshalTextDecoder struct { + typ *runtime.Type + structName string + fieldName string +} + +func newUnmarshalTextDecoder(typ *runtime.Type, structName, fieldName string) *unmarshalTextDecoder { + return &unmarshalTextDecoder{ + typ: typ, + structName: structName, + fieldName: fieldName, + } +} + +func (d *unmarshalTextDecoder) annotateError(cursor int64, err error) { + switch e := err.(type) { + case *errors.UnmarshalTypeError: + e.Struct = d.structName + e.Field = d.fieldName + case *errors.SyntaxError: + e.Offset = cursor + } +} + +var ( + nullbytes = []byte(`null`) +) + +func (d *unmarshalTextDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + s.skipWhiteSpace() + start := s.cursor + if err := s.skipValue(depth); err != nil { + return err + } + src := s.buf[start:s.cursor] + if len(src) > 0 { + switch src[0] { + case '[': + return &errors.UnmarshalTypeError{ + Value: "array", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + case '{': + return &errors.UnmarshalTypeError{ + Value: "object", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return &errors.UnmarshalTypeError{ + Value: "number", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + case 'n': + if bytes.Equal(src, nullbytes) { + *(*unsafe.Pointer)(p) = nil + return nil + } + } + } + dst := make([]byte, len(src)) + copy(dst, src) + + if b, ok := unquoteBytes(dst); ok { + dst = b + } + v := *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: d.typ, + ptr: p, + })) + if err := v.(encoding.TextUnmarshaler).UnmarshalText(dst); err != nil { + d.annotateError(s.cursor, err) + return err + } + return nil +} + +func (d *unmarshalTextDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + src := buf[start:end] + if len(src) > 0 { + switch src[0] { + case '[': + return 0, &errors.UnmarshalTypeError{ + Value: "array", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + case '{': + return 0, &errors.UnmarshalTypeError{ + Value: "object", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return 0, &errors.UnmarshalTypeError{ + Value: "number", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + case 'n': + if bytes.Equal(src, nullbytes) { + *(*unsafe.Pointer)(p) = nil + return end, nil + } + } + } + + if s, ok := unquoteBytes(src); ok { + src = s + } + v := *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: d.typ, + ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)), + })) + if err := v.(encoding.TextUnmarshaler).UnmarshalText(src); err != nil { + d.annotateError(cursor, err) + return 0, err + } + return end, nil +} + +func (d *unmarshalTextDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: unmarshal text decoder does not support decode path") +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { //nolint: nonamedreturns + length := len(s) + if length < 2 || s[0] != '"' || s[length-1] != '"' { + return + } + s = s[1 : length-1] + length -= 2 + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < length { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == length { + return s, true + } + + b := make([]byte, length+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < length { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= length { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} + +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + var r rune + for _, c := range s[2:6] { + switch { + case '0' <= c && c <= '9': + c = c - '0' + case 'a' <= c && c <= 'f': + c = c - 'a' + 10 + case 'A' <= c && c <= 'F': + c = c - 'A' + 10 + default: + return -1 + } + r = r*16 + rune(c) + } + return r +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/wrapped_string.go b/vendor/github.com/goccy/go-json/internal/decoder/wrapped_string.go new file mode 100644 index 0000000..0c4e2e6 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/wrapped_string.go @@ -0,0 +1,73 @@ +package decoder + +import ( + "fmt" + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +type wrappedStringDecoder struct { + typ *runtime.Type + dec Decoder + stringDecoder *stringDecoder + structName string + fieldName string + isPtrType bool +} + +func newWrappedStringDecoder(typ *runtime.Type, dec Decoder, structName, fieldName string) *wrappedStringDecoder { + return &wrappedStringDecoder{ + typ: typ, + dec: dec, + stringDecoder: newStringDecoder(structName, fieldName), + structName: structName, + fieldName: fieldName, + isPtrType: typ.Kind() == reflect.Ptr, + } +} + +func (d *wrappedStringDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + bytes, err := d.stringDecoder.decodeStreamByte(s) + if err != nil { + return err + } + if bytes == nil { + if d.isPtrType { + *(*unsafe.Pointer)(p) = nil + } + return nil + } + b := make([]byte, len(bytes)+1) + copy(b, bytes) + if _, err := d.dec.Decode(&RuntimeContext{Buf: b}, 0, depth, p); err != nil { + return err + } + return nil +} + +func (d *wrappedStringDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + bytes, c, err := d.stringDecoder.decodeByte(ctx.Buf, cursor) + if err != nil { + return 0, err + } + if bytes == nil { + if d.isPtrType { + *(*unsafe.Pointer)(p) = nil + } + return c, nil + } + bytes = append(bytes, nul) + oldBuf := ctx.Buf + ctx.Buf = bytes + if _, err := d.dec.Decode(ctx, 0, depth, p); err != nil { + return 0, err + } + ctx.Buf = oldBuf + return c, nil +} + +func (d *wrappedStringDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: wrapped string decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/code.go b/vendor/github.com/goccy/go-json/internal/encoder/code.go new file mode 100644 index 0000000..5b08fae --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/code.go @@ -0,0 +1,1023 @@ +package encoder + +import ( + "fmt" + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +type Code interface { + Kind() CodeKind + ToOpcode(*compileContext) Opcodes + Filter(*FieldQuery) Code +} + +type AnonymousCode interface { + ToAnonymousOpcode(*compileContext) Opcodes +} + +type Opcodes []*Opcode + +func (o Opcodes) First() *Opcode { + if len(o) == 0 { + return nil + } + return o[0] +} + +func (o Opcodes) Last() *Opcode { + if len(o) == 0 { + return nil + } + return o[len(o)-1] +} + +func (o Opcodes) Add(codes ...*Opcode) Opcodes { + return append(o, codes...) +} + +type CodeKind int + +const ( + CodeKindInterface CodeKind = iota + CodeKindPtr + CodeKindInt + CodeKindUint + CodeKindFloat + CodeKindString + CodeKindBool + CodeKindStruct + CodeKindMap + CodeKindSlice + CodeKindArray + CodeKindBytes + CodeKindMarshalJSON + CodeKindMarshalText + CodeKindRecursive +) + +type IntCode struct { + typ *runtime.Type + bitSize uint8 + isString bool + isPtr bool +} + +func (c *IntCode) Kind() CodeKind { + return CodeKindInt +} + +func (c *IntCode) ToOpcode(ctx *compileContext) Opcodes { + var code *Opcode + switch { + case c.isPtr: + code = newOpCode(ctx, c.typ, OpIntPtr) + case c.isString: + code = newOpCode(ctx, c.typ, OpIntString) + default: + code = newOpCode(ctx, c.typ, OpInt) + } + code.NumBitSize = c.bitSize + ctx.incIndex() + return Opcodes{code} +} + +func (c *IntCode) Filter(_ *FieldQuery) Code { + return c +} + +type UintCode struct { + typ *runtime.Type + bitSize uint8 + isString bool + isPtr bool +} + +func (c *UintCode) Kind() CodeKind { + return CodeKindUint +} + +func (c *UintCode) ToOpcode(ctx *compileContext) Opcodes { + var code *Opcode + switch { + case c.isPtr: + code = newOpCode(ctx, c.typ, OpUintPtr) + case c.isString: + code = newOpCode(ctx, c.typ, OpUintString) + default: + code = newOpCode(ctx, c.typ, OpUint) + } + code.NumBitSize = c.bitSize + ctx.incIndex() + return Opcodes{code} +} + +func (c *UintCode) Filter(_ *FieldQuery) Code { + return c +} + +type FloatCode struct { + typ *runtime.Type + bitSize uint8 + isPtr bool +} + +func (c *FloatCode) Kind() CodeKind { + return CodeKindFloat +} + +func (c *FloatCode) ToOpcode(ctx *compileContext) Opcodes { + var code *Opcode + switch { + case c.isPtr: + switch c.bitSize { + case 32: + code = newOpCode(ctx, c.typ, OpFloat32Ptr) + default: + code = newOpCode(ctx, c.typ, OpFloat64Ptr) + } + default: + switch c.bitSize { + case 32: + code = newOpCode(ctx, c.typ, OpFloat32) + default: + code = newOpCode(ctx, c.typ, OpFloat64) + } + } + ctx.incIndex() + return Opcodes{code} +} + +func (c *FloatCode) Filter(_ *FieldQuery) Code { + return c +} + +type StringCode struct { + typ *runtime.Type + isPtr bool +} + +func (c *StringCode) Kind() CodeKind { + return CodeKindString +} + +func (c *StringCode) ToOpcode(ctx *compileContext) Opcodes { + isJSONNumberType := c.typ == runtime.Type2RType(jsonNumberType) + var code *Opcode + if c.isPtr { + if isJSONNumberType { + code = newOpCode(ctx, c.typ, OpNumberPtr) + } else { + code = newOpCode(ctx, c.typ, OpStringPtr) + } + } else { + if isJSONNumberType { + code = newOpCode(ctx, c.typ, OpNumber) + } else { + code = newOpCode(ctx, c.typ, OpString) + } + } + ctx.incIndex() + return Opcodes{code} +} + +func (c *StringCode) Filter(_ *FieldQuery) Code { + return c +} + +type BoolCode struct { + typ *runtime.Type + isPtr bool +} + +func (c *BoolCode) Kind() CodeKind { + return CodeKindBool +} + +func (c *BoolCode) ToOpcode(ctx *compileContext) Opcodes { + var code *Opcode + switch { + case c.isPtr: + code = newOpCode(ctx, c.typ, OpBoolPtr) + default: + code = newOpCode(ctx, c.typ, OpBool) + } + ctx.incIndex() + return Opcodes{code} +} + +func (c *BoolCode) Filter(_ *FieldQuery) Code { + return c +} + +type BytesCode struct { + typ *runtime.Type + isPtr bool +} + +func (c *BytesCode) Kind() CodeKind { + return CodeKindBytes +} + +func (c *BytesCode) ToOpcode(ctx *compileContext) Opcodes { + var code *Opcode + switch { + case c.isPtr: + code = newOpCode(ctx, c.typ, OpBytesPtr) + default: + code = newOpCode(ctx, c.typ, OpBytes) + } + ctx.incIndex() + return Opcodes{code} +} + +func (c *BytesCode) Filter(_ *FieldQuery) Code { + return c +} + +type SliceCode struct { + typ *runtime.Type + value Code +} + +func (c *SliceCode) Kind() CodeKind { + return CodeKindSlice +} + +func (c *SliceCode) ToOpcode(ctx *compileContext) Opcodes { + // header => opcode => elem => end + // ^ | + // |________| + size := c.typ.Elem().Size() + header := newSliceHeaderCode(ctx, c.typ) + ctx.incIndex() + + ctx.incIndent() + codes := c.value.ToOpcode(ctx) + ctx.decIndent() + + codes.First().Flags |= IndirectFlags + elemCode := newSliceElemCode(ctx, c.typ.Elem(), header, size) + ctx.incIndex() + end := newOpCode(ctx, c.typ, OpSliceEnd) + ctx.incIndex() + header.End = end + header.Next = codes.First() + codes.Last().Next = elemCode + elemCode.Next = codes.First() + elemCode.End = end + return Opcodes{header}.Add(codes...).Add(elemCode).Add(end) +} + +func (c *SliceCode) Filter(_ *FieldQuery) Code { + return c +} + +type ArrayCode struct { + typ *runtime.Type + value Code +} + +func (c *ArrayCode) Kind() CodeKind { + return CodeKindArray +} + +func (c *ArrayCode) ToOpcode(ctx *compileContext) Opcodes { + // header => opcode => elem => end + // ^ | + // |________| + elem := c.typ.Elem() + alen := c.typ.Len() + size := elem.Size() + + header := newArrayHeaderCode(ctx, c.typ, alen) + ctx.incIndex() + + ctx.incIndent() + codes := c.value.ToOpcode(ctx) + ctx.decIndent() + + codes.First().Flags |= IndirectFlags + + elemCode := newArrayElemCode(ctx, elem, header, alen, size) + ctx.incIndex() + + end := newOpCode(ctx, c.typ, OpArrayEnd) + ctx.incIndex() + + header.End = end + header.Next = codes.First() + codes.Last().Next = elemCode + elemCode.Next = codes.First() + elemCode.End = end + + return Opcodes{header}.Add(codes...).Add(elemCode).Add(end) +} + +func (c *ArrayCode) Filter(_ *FieldQuery) Code { + return c +} + +type MapCode struct { + typ *runtime.Type + key Code + value Code +} + +func (c *MapCode) Kind() CodeKind { + return CodeKindMap +} + +func (c *MapCode) ToOpcode(ctx *compileContext) Opcodes { + // header => code => value => code => key => code => value => code => end + // ^ | + // |_______________________| + header := newMapHeaderCode(ctx, c.typ) + ctx.incIndex() + + keyCodes := c.key.ToOpcode(ctx) + + value := newMapValueCode(ctx, c.typ.Elem(), header) + ctx.incIndex() + + ctx.incIndent() + valueCodes := c.value.ToOpcode(ctx) + ctx.decIndent() + + valueCodes.First().Flags |= IndirectFlags + + key := newMapKeyCode(ctx, c.typ.Key(), header) + ctx.incIndex() + + end := newMapEndCode(ctx, c.typ, header) + ctx.incIndex() + + header.Next = keyCodes.First() + keyCodes.Last().Next = value + value.Next = valueCodes.First() + valueCodes.Last().Next = key + key.Next = keyCodes.First() + + header.End = end + key.End = end + value.End = end + return Opcodes{header}.Add(keyCodes...).Add(value).Add(valueCodes...).Add(key).Add(end) +} + +func (c *MapCode) Filter(_ *FieldQuery) Code { + return c +} + +type StructCode struct { + typ *runtime.Type + fields []*StructFieldCode + isPtr bool + disableIndirectConversion bool + isIndirect bool + isRecursive bool +} + +func (c *StructCode) Kind() CodeKind { + return CodeKindStruct +} + +func (c *StructCode) lastFieldCode(field *StructFieldCode, firstField *Opcode) *Opcode { + if isEmbeddedStruct(field) { + return c.lastAnonymousFieldCode(firstField) + } + lastField := firstField + for lastField.NextField != nil { + lastField = lastField.NextField + } + return lastField +} + +func (c *StructCode) lastAnonymousFieldCode(firstField *Opcode) *Opcode { + // firstField is special StructHead operation for anonymous structure. + // So, StructHead's next operation is truly struct head operation. + for firstField.Op == OpStructHead || firstField.Op == OpStructField { + firstField = firstField.Next + } + lastField := firstField + for lastField.NextField != nil { + lastField = lastField.NextField + } + return lastField +} + +func (c *StructCode) ToOpcode(ctx *compileContext) Opcodes { + // header => code => structField => code => end + // ^ | + // |__________| + if c.isRecursive { + recursive := newRecursiveCode(ctx, c.typ, &CompiledCode{}) + recursive.Type = c.typ + ctx.incIndex() + *ctx.recursiveCodes = append(*ctx.recursiveCodes, recursive) + return Opcodes{recursive} + } + codes := Opcodes{} + var prevField *Opcode + ctx.incIndent() + for idx, field := range c.fields { + isFirstField := idx == 0 + isEndField := idx == len(c.fields)-1 + fieldCodes := field.ToOpcode(ctx, isFirstField, isEndField) + for _, code := range fieldCodes { + if c.isIndirect { + code.Flags |= IndirectFlags + } + } + firstField := fieldCodes.First() + if len(codes) > 0 { + codes.Last().Next = firstField + firstField.Idx = codes.First().Idx + } + if prevField != nil { + prevField.NextField = firstField + } + if isEndField { + endField := fieldCodes.Last() + if len(codes) > 0 { + codes.First().End = endField + } else { + firstField.End = endField + } + codes = codes.Add(fieldCodes...) + break + } + prevField = c.lastFieldCode(field, firstField) + codes = codes.Add(fieldCodes...) + } + if len(codes) == 0 { + head := &Opcode{ + Op: OpStructHead, + Idx: opcodeOffset(ctx.ptrIndex), + Type: c.typ, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + } + ctx.incOpcodeIndex() + end := &Opcode{ + Op: OpStructEnd, + Idx: opcodeOffset(ctx.ptrIndex), + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + } + head.NextField = end + head.Next = end + head.End = end + codes = codes.Add(head, end) + ctx.incIndex() + } + ctx.decIndent() + ctx.structTypeToCodes[uintptr(unsafe.Pointer(c.typ))] = codes + return codes +} + +func (c *StructCode) ToAnonymousOpcode(ctx *compileContext) Opcodes { + // header => code => structField => code => end + // ^ | + // |__________| + if c.isRecursive { + recursive := newRecursiveCode(ctx, c.typ, &CompiledCode{}) + recursive.Type = c.typ + ctx.incIndex() + *ctx.recursiveCodes = append(*ctx.recursiveCodes, recursive) + return Opcodes{recursive} + } + codes := Opcodes{} + var prevField *Opcode + for idx, field := range c.fields { + isFirstField := idx == 0 + isEndField := idx == len(c.fields)-1 + fieldCodes := field.ToAnonymousOpcode(ctx, isFirstField, isEndField) + for _, code := range fieldCodes { + if c.isIndirect { + code.Flags |= IndirectFlags + } + } + firstField := fieldCodes.First() + if len(codes) > 0 { + codes.Last().Next = firstField + firstField.Idx = codes.First().Idx + } + if prevField != nil { + prevField.NextField = firstField + } + if isEndField { + lastField := fieldCodes.Last() + if len(codes) > 0 { + codes.First().End = lastField + } else { + firstField.End = lastField + } + } + prevField = firstField + codes = codes.Add(fieldCodes...) + } + return codes +} + +func (c *StructCode) removeFieldsByTags(tags runtime.StructTags) { + fields := make([]*StructFieldCode, 0, len(c.fields)) + for _, field := range c.fields { + if field.isAnonymous { + structCode := field.getAnonymousStruct() + if structCode != nil && !structCode.isRecursive { + structCode.removeFieldsByTags(tags) + if len(structCode.fields) > 0 { + fields = append(fields, field) + } + continue + } + } + if tags.ExistsKey(field.key) { + continue + } + fields = append(fields, field) + } + c.fields = fields +} + +func (c *StructCode) enableIndirect() { + if c.isIndirect { + return + } + c.isIndirect = true + if len(c.fields) == 0 { + return + } + structCode := c.fields[0].getStruct() + if structCode == nil { + return + } + structCode.enableIndirect() +} + +func (c *StructCode) Filter(query *FieldQuery) Code { + fieldMap := map[string]*FieldQuery{} + for _, field := range query.Fields { + fieldMap[field.Name] = field + } + fields := make([]*StructFieldCode, 0, len(c.fields)) + for _, field := range c.fields { + query, exists := fieldMap[field.key] + if !exists { + continue + } + fieldCode := &StructFieldCode{ + typ: field.typ, + key: field.key, + tag: field.tag, + value: field.value, + offset: field.offset, + isAnonymous: field.isAnonymous, + isTaggedKey: field.isTaggedKey, + isNilableType: field.isNilableType, + isNilCheck: field.isNilCheck, + isAddrForMarshaler: field.isAddrForMarshaler, + isNextOpPtrType: field.isNextOpPtrType, + } + if len(query.Fields) > 0 { + fieldCode.value = fieldCode.value.Filter(query) + } + fields = append(fields, fieldCode) + } + return &StructCode{ + typ: c.typ, + fields: fields, + isPtr: c.isPtr, + disableIndirectConversion: c.disableIndirectConversion, + isIndirect: c.isIndirect, + isRecursive: c.isRecursive, + } +} + +type StructFieldCode struct { + typ *runtime.Type + key string + tag *runtime.StructTag + value Code + offset uintptr + isAnonymous bool + isTaggedKey bool + isNilableType bool + isNilCheck bool + isAddrForMarshaler bool + isNextOpPtrType bool + isMarshalerContext bool +} + +func (c *StructFieldCode) getStruct() *StructCode { + value := c.value + ptr, ok := value.(*PtrCode) + if ok { + value = ptr.value + } + structCode, ok := value.(*StructCode) + if ok { + return structCode + } + return nil +} + +func (c *StructFieldCode) getAnonymousStruct() *StructCode { + if !c.isAnonymous { + return nil + } + return c.getStruct() +} + +func optimizeStructHeader(code *Opcode, tag *runtime.StructTag) OpType { + headType := code.ToHeaderType(tag.IsString) + if tag.IsOmitEmpty { + headType = headType.HeadToOmitEmptyHead() + } + return headType +} + +func optimizeStructField(code *Opcode, tag *runtime.StructTag) OpType { + fieldType := code.ToFieldType(tag.IsString) + if tag.IsOmitEmpty { + fieldType = fieldType.FieldToOmitEmptyField() + } + return fieldType +} + +func (c *StructFieldCode) headerOpcodes(ctx *compileContext, field *Opcode, valueCodes Opcodes) Opcodes { + value := valueCodes.First() + op := optimizeStructHeader(value, c.tag) + field.Op = op + if value.Flags&MarshalerContextFlags != 0 { + field.Flags |= MarshalerContextFlags + } + field.NumBitSize = value.NumBitSize + field.PtrNum = value.PtrNum + field.FieldQuery = value.FieldQuery + fieldCodes := Opcodes{field} + if op.IsMultipleOpHead() { + field.Next = value + fieldCodes = fieldCodes.Add(valueCodes...) + } else { + ctx.decIndex() + } + return fieldCodes +} + +func (c *StructFieldCode) fieldOpcodes(ctx *compileContext, field *Opcode, valueCodes Opcodes) Opcodes { + value := valueCodes.First() + op := optimizeStructField(value, c.tag) + field.Op = op + if value.Flags&MarshalerContextFlags != 0 { + field.Flags |= MarshalerContextFlags + } + field.NumBitSize = value.NumBitSize + field.PtrNum = value.PtrNum + field.FieldQuery = value.FieldQuery + + fieldCodes := Opcodes{field} + if op.IsMultipleOpField() { + field.Next = value + fieldCodes = fieldCodes.Add(valueCodes...) + } else { + ctx.decIndex() + } + return fieldCodes +} + +func (c *StructFieldCode) addStructEndCode(ctx *compileContext, codes Opcodes) Opcodes { + end := &Opcode{ + Op: OpStructEnd, + Idx: opcodeOffset(ctx.ptrIndex), + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + } + codes.Last().Next = end + code := codes.First() + for code.Op == OpStructField || code.Op == OpStructHead { + code = code.Next + } + for code.NextField != nil { + code = code.NextField + } + code.NextField = end + + codes = codes.Add(end) + ctx.incOpcodeIndex() + return codes +} + +func (c *StructFieldCode) structKey(ctx *compileContext) string { + if ctx.escapeKey { + rctx := &RuntimeContext{Option: &Option{Flag: HTMLEscapeOption}} + return fmt.Sprintf(`%s:`, string(AppendString(rctx, []byte{}, c.key))) + } + return fmt.Sprintf(`"%s":`, c.key) +} + +func (c *StructFieldCode) flags() OpFlags { + var flags OpFlags + if c.isTaggedKey { + flags |= IsTaggedKeyFlags + } + if c.isNilableType { + flags |= IsNilableTypeFlags + } + if c.isNilCheck { + flags |= NilCheckFlags + } + if c.isAddrForMarshaler { + flags |= AddrForMarshalerFlags + } + if c.isNextOpPtrType { + flags |= IsNextOpPtrTypeFlags + } + if c.isAnonymous { + flags |= AnonymousKeyFlags + } + if c.isMarshalerContext { + flags |= MarshalerContextFlags + } + return flags +} + +func (c *StructFieldCode) toValueOpcodes(ctx *compileContext) Opcodes { + if c.isAnonymous { + anonymCode, ok := c.value.(AnonymousCode) + if ok { + return anonymCode.ToAnonymousOpcode(ctx) + } + } + return c.value.ToOpcode(ctx) +} + +func (c *StructFieldCode) ToOpcode(ctx *compileContext, isFirstField, isEndField bool) Opcodes { + field := &Opcode{ + Idx: opcodeOffset(ctx.ptrIndex), + Flags: c.flags(), + Key: c.structKey(ctx), + Offset: uint32(c.offset), + Type: c.typ, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + DisplayKey: c.key, + } + ctx.incIndex() + valueCodes := c.toValueOpcodes(ctx) + if isFirstField { + codes := c.headerOpcodes(ctx, field, valueCodes) + if isEndField { + codes = c.addStructEndCode(ctx, codes) + } + return codes + } + codes := c.fieldOpcodes(ctx, field, valueCodes) + if isEndField { + if isEnableStructEndOptimization(c.value) { + field.Op = field.Op.FieldToEnd() + } else { + codes = c.addStructEndCode(ctx, codes) + } + } + return codes +} + +func (c *StructFieldCode) ToAnonymousOpcode(ctx *compileContext, isFirstField, isEndField bool) Opcodes { + field := &Opcode{ + Idx: opcodeOffset(ctx.ptrIndex), + Flags: c.flags() | AnonymousHeadFlags, + Key: c.structKey(ctx), + Offset: uint32(c.offset), + Type: c.typ, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + DisplayKey: c.key, + } + ctx.incIndex() + valueCodes := c.toValueOpcodes(ctx) + if isFirstField { + return c.headerOpcodes(ctx, field, valueCodes) + } + return c.fieldOpcodes(ctx, field, valueCodes) +} + +func isEnableStructEndOptimization(value Code) bool { + switch value.Kind() { + case CodeKindInt, + CodeKindUint, + CodeKindFloat, + CodeKindString, + CodeKindBool, + CodeKindBytes: + return true + case CodeKindPtr: + return isEnableStructEndOptimization(value.(*PtrCode).value) + default: + return false + } +} + +type InterfaceCode struct { + typ *runtime.Type + fieldQuery *FieldQuery + isPtr bool +} + +func (c *InterfaceCode) Kind() CodeKind { + return CodeKindInterface +} + +func (c *InterfaceCode) ToOpcode(ctx *compileContext) Opcodes { + var code *Opcode + switch { + case c.isPtr: + code = newOpCode(ctx, c.typ, OpInterfacePtr) + default: + code = newOpCode(ctx, c.typ, OpInterface) + } + code.FieldQuery = c.fieldQuery + if c.typ.NumMethod() > 0 { + code.Flags |= NonEmptyInterfaceFlags + } + ctx.incIndex() + return Opcodes{code} +} + +func (c *InterfaceCode) Filter(query *FieldQuery) Code { + return &InterfaceCode{ + typ: c.typ, + fieldQuery: query, + isPtr: c.isPtr, + } +} + +type MarshalJSONCode struct { + typ *runtime.Type + fieldQuery *FieldQuery + isAddrForMarshaler bool + isNilableType bool + isMarshalerContext bool +} + +func (c *MarshalJSONCode) Kind() CodeKind { + return CodeKindMarshalJSON +} + +func (c *MarshalJSONCode) ToOpcode(ctx *compileContext) Opcodes { + code := newOpCode(ctx, c.typ, OpMarshalJSON) + code.FieldQuery = c.fieldQuery + if c.isAddrForMarshaler { + code.Flags |= AddrForMarshalerFlags + } + if c.isMarshalerContext { + code.Flags |= MarshalerContextFlags + } + if c.isNilableType { + code.Flags |= IsNilableTypeFlags + } else { + code.Flags &= ^IsNilableTypeFlags + } + ctx.incIndex() + return Opcodes{code} +} + +func (c *MarshalJSONCode) Filter(query *FieldQuery) Code { + return &MarshalJSONCode{ + typ: c.typ, + fieldQuery: query, + isAddrForMarshaler: c.isAddrForMarshaler, + isNilableType: c.isNilableType, + isMarshalerContext: c.isMarshalerContext, + } +} + +type MarshalTextCode struct { + typ *runtime.Type + fieldQuery *FieldQuery + isAddrForMarshaler bool + isNilableType bool +} + +func (c *MarshalTextCode) Kind() CodeKind { + return CodeKindMarshalText +} + +func (c *MarshalTextCode) ToOpcode(ctx *compileContext) Opcodes { + code := newOpCode(ctx, c.typ, OpMarshalText) + code.FieldQuery = c.fieldQuery + if c.isAddrForMarshaler { + code.Flags |= AddrForMarshalerFlags + } + if c.isNilableType { + code.Flags |= IsNilableTypeFlags + } else { + code.Flags &= ^IsNilableTypeFlags + } + ctx.incIndex() + return Opcodes{code} +} + +func (c *MarshalTextCode) Filter(query *FieldQuery) Code { + return &MarshalTextCode{ + typ: c.typ, + fieldQuery: query, + isAddrForMarshaler: c.isAddrForMarshaler, + isNilableType: c.isNilableType, + } +} + +type PtrCode struct { + typ *runtime.Type + value Code + ptrNum uint8 +} + +func (c *PtrCode) Kind() CodeKind { + return CodeKindPtr +} + +func (c *PtrCode) ToOpcode(ctx *compileContext) Opcodes { + codes := c.value.ToOpcode(ctx) + codes.First().Op = convertPtrOp(codes.First()) + codes.First().PtrNum = c.ptrNum + return codes +} + +func (c *PtrCode) ToAnonymousOpcode(ctx *compileContext) Opcodes { + var codes Opcodes + anonymCode, ok := c.value.(AnonymousCode) + if ok { + codes = anonymCode.ToAnonymousOpcode(ctx) + } else { + codes = c.value.ToOpcode(ctx) + } + codes.First().Op = convertPtrOp(codes.First()) + codes.First().PtrNum = c.ptrNum + return codes +} + +func (c *PtrCode) Filter(query *FieldQuery) Code { + return &PtrCode{ + typ: c.typ, + value: c.value.Filter(query), + ptrNum: c.ptrNum, + } +} + +func convertPtrOp(code *Opcode) OpType { + ptrHeadOp := code.Op.HeadToPtrHead() + if code.Op != ptrHeadOp { + if code.PtrNum > 0 { + // ptr field and ptr head + code.PtrNum-- + } + return ptrHeadOp + } + switch code.Op { + case OpInt: + return OpIntPtr + case OpUint: + return OpUintPtr + case OpFloat32: + return OpFloat32Ptr + case OpFloat64: + return OpFloat64Ptr + case OpString: + return OpStringPtr + case OpBool: + return OpBoolPtr + case OpBytes: + return OpBytesPtr + case OpNumber: + return OpNumberPtr + case OpArray: + return OpArrayPtr + case OpSlice: + return OpSlicePtr + case OpMap: + return OpMapPtr + case OpMarshalJSON: + return OpMarshalJSONPtr + case OpMarshalText: + return OpMarshalTextPtr + case OpInterface: + return OpInterfacePtr + case OpRecursive: + return OpRecursivePtr + } + return code.Op +} + +func isEmbeddedStruct(field *StructFieldCode) bool { + if !field.isAnonymous { + return false + } + t := field.typ + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t.Kind() == reflect.Struct +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compact.go b/vendor/github.com/goccy/go-json/internal/encoder/compact.go new file mode 100644 index 0000000..e287a6c --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/compact.go @@ -0,0 +1,286 @@ +package encoder + +import ( + "bytes" + "fmt" + "strconv" + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +var ( + isWhiteSpace = [256]bool{ + ' ': true, + '\n': true, + '\t': true, + '\r': true, + } + isHTMLEscapeChar = [256]bool{ + '<': true, + '>': true, + '&': true, + } + nul = byte('\000') +) + +func Compact(buf *bytes.Buffer, src []byte, escape bool) error { + if len(src) == 0 { + return errors.ErrUnexpectedEndOfJSON("", 0) + } + buf.Grow(len(src)) + dst := buf.Bytes() + + ctx := TakeRuntimeContext() + ctxBuf := ctx.Buf[:0] + ctxBuf = append(append(ctxBuf, src...), nul) + ctx.Buf = ctxBuf + + if err := compactAndWrite(buf, dst, ctxBuf, escape); err != nil { + ReleaseRuntimeContext(ctx) + return err + } + ReleaseRuntimeContext(ctx) + return nil +} + +func compactAndWrite(buf *bytes.Buffer, dst []byte, src []byte, escape bool) error { + dst, err := compact(dst, src, escape) + if err != nil { + return err + } + if _, err := buf.Write(dst); err != nil { + return err + } + return nil +} + +func compact(dst, src []byte, escape bool) ([]byte, error) { + buf, cursor, err := compactValue(dst, src, 0, escape) + if err != nil { + return nil, err + } + if err := validateEndBuf(src, cursor); err != nil { + return nil, err + } + return buf, nil +} + +func validateEndBuf(src []byte, cursor int64) error { + for { + switch src[cursor] { + case ' ', '\t', '\n', '\r': + cursor++ + continue + case nul: + return nil + } + return errors.ErrSyntax( + fmt.Sprintf("invalid character '%c' after top-level value", src[cursor]), + cursor+1, + ) + } +} + +func skipWhiteSpace(buf []byte, cursor int64) int64 { +LOOP: + if isWhiteSpace[buf[cursor]] { + cursor++ + goto LOOP + } + return cursor +} + +func compactValue(dst, src []byte, cursor int64, escape bool) ([]byte, int64, error) { + for { + switch src[cursor] { + case ' ', '\t', '\n', '\r': + cursor++ + continue + case '{': + return compactObject(dst, src, cursor, escape) + case '}': + return nil, 0, errors.ErrSyntax("unexpected character '}'", cursor) + case '[': + return compactArray(dst, src, cursor, escape) + case ']': + return nil, 0, errors.ErrSyntax("unexpected character ']'", cursor) + case '"': + return compactString(dst, src, cursor, escape) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return compactNumber(dst, src, cursor) + case 't': + return compactTrue(dst, src, cursor) + case 'f': + return compactFalse(dst, src, cursor) + case 'n': + return compactNull(dst, src, cursor) + default: + return nil, 0, errors.ErrSyntax(fmt.Sprintf("unexpected character '%c'", src[cursor]), cursor) + } + } +} + +func compactObject(dst, src []byte, cursor int64, escape bool) ([]byte, int64, error) { + if src[cursor] == '{' { + dst = append(dst, '{') + } else { + return nil, 0, errors.ErrExpected("expected { character for object value", cursor) + } + cursor = skipWhiteSpace(src, cursor+1) + if src[cursor] == '}' { + dst = append(dst, '}') + return dst, cursor + 1, nil + } + var err error + for { + cursor = skipWhiteSpace(src, cursor) + dst, cursor, err = compactString(dst, src, cursor, escape) + if err != nil { + return nil, 0, err + } + cursor = skipWhiteSpace(src, cursor) + if src[cursor] != ':' { + return nil, 0, errors.ErrExpected("colon after object key", cursor) + } + dst = append(dst, ':') + dst, cursor, err = compactValue(dst, src, cursor+1, escape) + if err != nil { + return nil, 0, err + } + cursor = skipWhiteSpace(src, cursor) + switch src[cursor] { + case '}': + dst = append(dst, '}') + cursor++ + return dst, cursor, nil + case ',': + dst = append(dst, ',') + default: + return nil, 0, errors.ErrExpected("comma after object value", cursor) + } + cursor++ + } +} + +func compactArray(dst, src []byte, cursor int64, escape bool) ([]byte, int64, error) { + if src[cursor] == '[' { + dst = append(dst, '[') + } else { + return nil, 0, errors.ErrExpected("expected [ character for array value", cursor) + } + cursor = skipWhiteSpace(src, cursor+1) + if src[cursor] == ']' { + dst = append(dst, ']') + return dst, cursor + 1, nil + } + var err error + for { + dst, cursor, err = compactValue(dst, src, cursor, escape) + if err != nil { + return nil, 0, err + } + cursor = skipWhiteSpace(src, cursor) + switch src[cursor] { + case ']': + dst = append(dst, ']') + cursor++ + return dst, cursor, nil + case ',': + dst = append(dst, ',') + default: + return nil, 0, errors.ErrExpected("comma after array value", cursor) + } + cursor++ + } +} + +func compactString(dst, src []byte, cursor int64, escape bool) ([]byte, int64, error) { + if src[cursor] != '"' { + return nil, 0, errors.ErrInvalidCharacter(src[cursor], "string", cursor) + } + start := cursor + for { + cursor++ + c := src[cursor] + if escape { + if isHTMLEscapeChar[c] { + dst = append(dst, src[start:cursor]...) + dst = append(dst, `\u00`...) + dst = append(dst, hex[c>>4], hex[c&0xF]) + start = cursor + 1 + } else if c == 0xE2 && cursor+2 < int64(len(src)) && src[cursor+1] == 0x80 && src[cursor+2]&^1 == 0xA8 { + dst = append(dst, src[start:cursor]...) + dst = append(dst, `\u202`...) + dst = append(dst, hex[src[cursor+2]&0xF]) + start = cursor + 3 + cursor += 2 + } + } + switch c { + case '\\': + cursor++ + if src[cursor] == nul { + return nil, 0, errors.ErrUnexpectedEndOfJSON("string", int64(len(src))) + } + case '"': + cursor++ + return append(dst, src[start:cursor]...), cursor, nil + case nul: + return nil, 0, errors.ErrUnexpectedEndOfJSON("string", int64(len(src))) + } + } +} + +func compactNumber(dst, src []byte, cursor int64) ([]byte, int64, error) { + start := cursor + for { + cursor++ + if floatTable[src[cursor]] { + continue + } + break + } + num := src[start:cursor] + if _, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&num)), 64); err != nil { + return nil, 0, err + } + dst = append(dst, num...) + return dst, cursor, nil +} + +func compactTrue(dst, src []byte, cursor int64) ([]byte, int64, error) { + if cursor+3 >= int64(len(src)) { + return nil, 0, errors.ErrUnexpectedEndOfJSON("true", cursor) + } + if !bytes.Equal(src[cursor:cursor+4], []byte(`true`)) { + return nil, 0, errors.ErrInvalidCharacter(src[cursor], "true", cursor) + } + dst = append(dst, "true"...) + cursor += 4 + return dst, cursor, nil +} + +func compactFalse(dst, src []byte, cursor int64) ([]byte, int64, error) { + if cursor+4 >= int64(len(src)) { + return nil, 0, errors.ErrUnexpectedEndOfJSON("false", cursor) + } + if !bytes.Equal(src[cursor:cursor+5], []byte(`false`)) { + return nil, 0, errors.ErrInvalidCharacter(src[cursor], "false", cursor) + } + dst = append(dst, "false"...) + cursor += 5 + return dst, cursor, nil +} + +func compactNull(dst, src []byte, cursor int64) ([]byte, int64, error) { + if cursor+3 >= int64(len(src)) { + return nil, 0, errors.ErrUnexpectedEndOfJSON("null", cursor) + } + if !bytes.Equal(src[cursor:cursor+4], []byte(`null`)) { + return nil, 0, errors.ErrInvalidCharacter(src[cursor], "null", cursor) + } + dst = append(dst, "null"...) + cursor += 4 + return dst, cursor, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler.go new file mode 100644 index 0000000..37b7aa3 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler.go @@ -0,0 +1,935 @@ +package encoder + +import ( + "context" + "encoding" + "encoding/json" + "reflect" + "sync/atomic" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type marshalerContext interface { + MarshalJSON(context.Context) ([]byte, error) +} + +var ( + marshalJSONType = reflect.TypeOf((*json.Marshaler)(nil)).Elem() + marshalJSONContextType = reflect.TypeOf((*marshalerContext)(nil)).Elem() + marshalTextType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + jsonNumberType = reflect.TypeOf(json.Number("")) + cachedOpcodeSets []*OpcodeSet + cachedOpcodeMap unsafe.Pointer // map[uintptr]*OpcodeSet + typeAddr *runtime.TypeAddr +) + +func init() { + typeAddr = runtime.AnalyzeTypeAddr() + if typeAddr == nil { + typeAddr = &runtime.TypeAddr{} + } + cachedOpcodeSets = make([]*OpcodeSet, typeAddr.AddrRange>>typeAddr.AddrShift+1) +} + +func loadOpcodeMap() map[uintptr]*OpcodeSet { + p := atomic.LoadPointer(&cachedOpcodeMap) + return *(*map[uintptr]*OpcodeSet)(unsafe.Pointer(&p)) +} + +func storeOpcodeSet(typ uintptr, set *OpcodeSet, m map[uintptr]*OpcodeSet) { + newOpcodeMap := make(map[uintptr]*OpcodeSet, len(m)+1) + newOpcodeMap[typ] = set + + for k, v := range m { + newOpcodeMap[k] = v + } + + atomic.StorePointer(&cachedOpcodeMap, *(*unsafe.Pointer)(unsafe.Pointer(&newOpcodeMap))) +} + +func compileToGetCodeSetSlowPath(typeptr uintptr) (*OpcodeSet, error) { + opcodeMap := loadOpcodeMap() + if codeSet, exists := opcodeMap[typeptr]; exists { + return codeSet, nil + } + codeSet, err := newCompiler().compile(typeptr) + if err != nil { + return nil, err + } + storeOpcodeSet(typeptr, codeSet, opcodeMap) + return codeSet, nil +} + +func getFilteredCodeSetIfNeeded(ctx *RuntimeContext, codeSet *OpcodeSet) (*OpcodeSet, error) { + if (ctx.Option.Flag & ContextOption) == 0 { + return codeSet, nil + } + query := FieldQueryFromContext(ctx.Option.Context) + if query == nil { + return codeSet, nil + } + ctx.Option.Flag |= FieldQueryOption + cacheCodeSet := codeSet.getQueryCache(query.Hash()) + if cacheCodeSet != nil { + return cacheCodeSet, nil + } + queryCodeSet, err := newCompiler().codeToOpcodeSet(codeSet.Type, codeSet.Code.Filter(query)) + if err != nil { + return nil, err + } + codeSet.setQueryCache(query.Hash(), queryCodeSet) + return queryCodeSet, nil +} + +type Compiler struct { + structTypeToCode map[uintptr]*StructCode +} + +func newCompiler() *Compiler { + return &Compiler{ + structTypeToCode: map[uintptr]*StructCode{}, + } +} + +func (c *Compiler) compile(typeptr uintptr) (*OpcodeSet, error) { + // noescape trick for header.typ ( reflect.*rtype ) + typ := *(**runtime.Type)(unsafe.Pointer(&typeptr)) + code, err := c.typeToCode(typ) + if err != nil { + return nil, err + } + return c.codeToOpcodeSet(typ, code) +} + +func (c *Compiler) codeToOpcodeSet(typ *runtime.Type, code Code) (*OpcodeSet, error) { + noescapeKeyCode := c.codeToOpcode(&compileContext{ + structTypeToCodes: map[uintptr]Opcodes{}, + recursiveCodes: &Opcodes{}, + }, typ, code) + if err := noescapeKeyCode.Validate(); err != nil { + return nil, err + } + escapeKeyCode := c.codeToOpcode(&compileContext{ + structTypeToCodes: map[uintptr]Opcodes{}, + recursiveCodes: &Opcodes{}, + escapeKey: true, + }, typ, code) + noescapeKeyCode = copyOpcode(noescapeKeyCode) + escapeKeyCode = copyOpcode(escapeKeyCode) + setTotalLengthToInterfaceOp(noescapeKeyCode) + setTotalLengthToInterfaceOp(escapeKeyCode) + interfaceNoescapeKeyCode := copyToInterfaceOpcode(noescapeKeyCode) + interfaceEscapeKeyCode := copyToInterfaceOpcode(escapeKeyCode) + codeLength := noescapeKeyCode.TotalLength() + return &OpcodeSet{ + Type: typ, + NoescapeKeyCode: noescapeKeyCode, + EscapeKeyCode: escapeKeyCode, + InterfaceNoescapeKeyCode: interfaceNoescapeKeyCode, + InterfaceEscapeKeyCode: interfaceEscapeKeyCode, + CodeLength: codeLength, + EndCode: ToEndCode(interfaceNoescapeKeyCode), + Code: code, + QueryCache: map[string]*OpcodeSet{}, + }, nil +} + +func (c *Compiler) typeToCode(typ *runtime.Type) (Code, error) { + switch { + case c.implementsMarshalJSON(typ): + return c.marshalJSONCode(typ) + case c.implementsMarshalText(typ): + return c.marshalTextCode(typ) + } + + isPtr := false + orgType := typ + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + isPtr = true + } + switch { + case c.implementsMarshalJSON(typ): + return c.marshalJSONCode(orgType) + case c.implementsMarshalText(typ): + return c.marshalTextCode(orgType) + } + switch typ.Kind() { + case reflect.Slice: + elem := typ.Elem() + if elem.Kind() == reflect.Uint8 { + p := runtime.PtrTo(elem) + if !c.implementsMarshalJSONType(p) && !p.Implements(marshalTextType) { + return c.bytesCode(typ, isPtr) + } + } + return c.sliceCode(typ) + case reflect.Map: + if isPtr { + return c.ptrCode(runtime.PtrTo(typ)) + } + return c.mapCode(typ) + case reflect.Struct: + return c.structCode(typ, isPtr) + case reflect.Int: + return c.intCode(typ, isPtr) + case reflect.Int8: + return c.int8Code(typ, isPtr) + case reflect.Int16: + return c.int16Code(typ, isPtr) + case reflect.Int32: + return c.int32Code(typ, isPtr) + case reflect.Int64: + return c.int64Code(typ, isPtr) + case reflect.Uint, reflect.Uintptr: + return c.uintCode(typ, isPtr) + case reflect.Uint8: + return c.uint8Code(typ, isPtr) + case reflect.Uint16: + return c.uint16Code(typ, isPtr) + case reflect.Uint32: + return c.uint32Code(typ, isPtr) + case reflect.Uint64: + return c.uint64Code(typ, isPtr) + case reflect.Float32: + return c.float32Code(typ, isPtr) + case reflect.Float64: + return c.float64Code(typ, isPtr) + case reflect.String: + return c.stringCode(typ, isPtr) + case reflect.Bool: + return c.boolCode(typ, isPtr) + case reflect.Interface: + return c.interfaceCode(typ, isPtr) + default: + if isPtr && typ.Implements(marshalTextType) { + typ = orgType + } + return c.typeToCodeWithPtr(typ, isPtr) + } +} + +func (c *Compiler) typeToCodeWithPtr(typ *runtime.Type, isPtr bool) (Code, error) { + switch { + case c.implementsMarshalJSON(typ): + return c.marshalJSONCode(typ) + case c.implementsMarshalText(typ): + return c.marshalTextCode(typ) + } + switch typ.Kind() { + case reflect.Ptr: + return c.ptrCode(typ) + case reflect.Slice: + elem := typ.Elem() + if elem.Kind() == reflect.Uint8 { + p := runtime.PtrTo(elem) + if !c.implementsMarshalJSONType(p) && !p.Implements(marshalTextType) { + return c.bytesCode(typ, false) + } + } + return c.sliceCode(typ) + case reflect.Array: + return c.arrayCode(typ) + case reflect.Map: + return c.mapCode(typ) + case reflect.Struct: + return c.structCode(typ, isPtr) + case reflect.Interface: + return c.interfaceCode(typ, false) + case reflect.Int: + return c.intCode(typ, false) + case reflect.Int8: + return c.int8Code(typ, false) + case reflect.Int16: + return c.int16Code(typ, false) + case reflect.Int32: + return c.int32Code(typ, false) + case reflect.Int64: + return c.int64Code(typ, false) + case reflect.Uint: + return c.uintCode(typ, false) + case reflect.Uint8: + return c.uint8Code(typ, false) + case reflect.Uint16: + return c.uint16Code(typ, false) + case reflect.Uint32: + return c.uint32Code(typ, false) + case reflect.Uint64: + return c.uint64Code(typ, false) + case reflect.Uintptr: + return c.uintCode(typ, false) + case reflect.Float32: + return c.float32Code(typ, false) + case reflect.Float64: + return c.float64Code(typ, false) + case reflect.String: + return c.stringCode(typ, false) + case reflect.Bool: + return c.boolCode(typ, false) + } + return nil, &errors.UnsupportedTypeError{Type: runtime.RType2Type(typ)} +} + +const intSize = 32 << (^uint(0) >> 63) + +//nolint:unparam +func (c *Compiler) intCode(typ *runtime.Type, isPtr bool) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: intSize, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) int8Code(typ *runtime.Type, isPtr bool) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 8, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) int16Code(typ *runtime.Type, isPtr bool) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 16, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) int32Code(typ *runtime.Type, isPtr bool) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 32, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) int64Code(typ *runtime.Type, isPtr bool) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 64, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) uintCode(typ *runtime.Type, isPtr bool) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: intSize, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) uint8Code(typ *runtime.Type, isPtr bool) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 8, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) uint16Code(typ *runtime.Type, isPtr bool) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 16, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) uint32Code(typ *runtime.Type, isPtr bool) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 32, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) uint64Code(typ *runtime.Type, isPtr bool) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 64, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) float32Code(typ *runtime.Type, isPtr bool) (*FloatCode, error) { + return &FloatCode{typ: typ, bitSize: 32, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) float64Code(typ *runtime.Type, isPtr bool) (*FloatCode, error) { + return &FloatCode{typ: typ, bitSize: 64, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) stringCode(typ *runtime.Type, isPtr bool) (*StringCode, error) { + return &StringCode{typ: typ, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) boolCode(typ *runtime.Type, isPtr bool) (*BoolCode, error) { + return &BoolCode{typ: typ, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) intStringCode(typ *runtime.Type) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: intSize, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) int8StringCode(typ *runtime.Type) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 8, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) int16StringCode(typ *runtime.Type) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 16, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) int32StringCode(typ *runtime.Type) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 32, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) int64StringCode(typ *runtime.Type) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 64, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) uintStringCode(typ *runtime.Type) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: intSize, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) uint8StringCode(typ *runtime.Type) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 8, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) uint16StringCode(typ *runtime.Type) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 16, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) uint32StringCode(typ *runtime.Type) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 32, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) uint64StringCode(typ *runtime.Type) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 64, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) bytesCode(typ *runtime.Type, isPtr bool) (*BytesCode, error) { + return &BytesCode{typ: typ, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) interfaceCode(typ *runtime.Type, isPtr bool) (*InterfaceCode, error) { + return &InterfaceCode{typ: typ, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) marshalJSONCode(typ *runtime.Type) (*MarshalJSONCode, error) { + return &MarshalJSONCode{ + typ: typ, + isAddrForMarshaler: c.isPtrMarshalJSONType(typ), + isNilableType: c.isNilableType(typ), + isMarshalerContext: typ.Implements(marshalJSONContextType) || runtime.PtrTo(typ).Implements(marshalJSONContextType), + }, nil +} + +//nolint:unparam +func (c *Compiler) marshalTextCode(typ *runtime.Type) (*MarshalTextCode, error) { + return &MarshalTextCode{ + typ: typ, + isAddrForMarshaler: c.isPtrMarshalTextType(typ), + isNilableType: c.isNilableType(typ), + }, nil +} + +func (c *Compiler) ptrCode(typ *runtime.Type) (*PtrCode, error) { + code, err := c.typeToCodeWithPtr(typ.Elem(), true) + if err != nil { + return nil, err + } + ptr, ok := code.(*PtrCode) + if ok { + return &PtrCode{typ: typ, value: ptr.value, ptrNum: ptr.ptrNum + 1}, nil + } + return &PtrCode{typ: typ, value: code, ptrNum: 1}, nil +} + +func (c *Compiler) sliceCode(typ *runtime.Type) (*SliceCode, error) { + elem := typ.Elem() + code, err := c.listElemCode(elem) + if err != nil { + return nil, err + } + if code.Kind() == CodeKindStruct { + structCode := code.(*StructCode) + structCode.enableIndirect() + } + return &SliceCode{typ: typ, value: code}, nil +} + +func (c *Compiler) arrayCode(typ *runtime.Type) (*ArrayCode, error) { + elem := typ.Elem() + code, err := c.listElemCode(elem) + if err != nil { + return nil, err + } + if code.Kind() == CodeKindStruct { + structCode := code.(*StructCode) + structCode.enableIndirect() + } + return &ArrayCode{typ: typ, value: code}, nil +} + +func (c *Compiler) mapCode(typ *runtime.Type) (*MapCode, error) { + keyCode, err := c.mapKeyCode(typ.Key()) + if err != nil { + return nil, err + } + valueCode, err := c.mapValueCode(typ.Elem()) + if err != nil { + return nil, err + } + if valueCode.Kind() == CodeKindStruct { + structCode := valueCode.(*StructCode) + structCode.enableIndirect() + } + return &MapCode{typ: typ, key: keyCode, value: valueCode}, nil +} + +func (c *Compiler) listElemCode(typ *runtime.Type) (Code, error) { + switch { + case c.implementsMarshalJSONType(typ) || c.implementsMarshalJSONType(runtime.PtrTo(typ)): + return c.marshalJSONCode(typ) + case !typ.Implements(marshalTextType) && runtime.PtrTo(typ).Implements(marshalTextType): + return c.marshalTextCode(typ) + case typ.Kind() == reflect.Map: + return c.ptrCode(runtime.PtrTo(typ)) + default: + // isPtr was originally used to indicate whether the type of top level is pointer. + // However, since the slice/array element is a specification that can get the pointer address, explicitly set isPtr to true. + // See here for related issues: https://github.com/goccy/go-json/issues/370 + code, err := c.typeToCodeWithPtr(typ, true) + if err != nil { + return nil, err + } + ptr, ok := code.(*PtrCode) + if ok { + if ptr.value.Kind() == CodeKindMap { + ptr.ptrNum++ + } + } + return code, nil + } +} + +func (c *Compiler) mapKeyCode(typ *runtime.Type) (Code, error) { + switch { + case c.implementsMarshalText(typ): + return c.marshalTextCode(typ) + } + switch typ.Kind() { + case reflect.Ptr: + return c.ptrCode(typ) + case reflect.String: + return c.stringCode(typ, false) + case reflect.Int: + return c.intStringCode(typ) + case reflect.Int8: + return c.int8StringCode(typ) + case reflect.Int16: + return c.int16StringCode(typ) + case reflect.Int32: + return c.int32StringCode(typ) + case reflect.Int64: + return c.int64StringCode(typ) + case reflect.Uint: + return c.uintStringCode(typ) + case reflect.Uint8: + return c.uint8StringCode(typ) + case reflect.Uint16: + return c.uint16StringCode(typ) + case reflect.Uint32: + return c.uint32StringCode(typ) + case reflect.Uint64: + return c.uint64StringCode(typ) + case reflect.Uintptr: + return c.uintStringCode(typ) + } + return nil, &errors.UnsupportedTypeError{Type: runtime.RType2Type(typ)} +} + +func (c *Compiler) mapValueCode(typ *runtime.Type) (Code, error) { + switch typ.Kind() { + case reflect.Map: + return c.ptrCode(runtime.PtrTo(typ)) + default: + code, err := c.typeToCodeWithPtr(typ, false) + if err != nil { + return nil, err + } + ptr, ok := code.(*PtrCode) + if ok { + if ptr.value.Kind() == CodeKindMap { + ptr.ptrNum++ + } + } + return code, nil + } +} + +func (c *Compiler) structCode(typ *runtime.Type, isPtr bool) (*StructCode, error) { + typeptr := uintptr(unsafe.Pointer(typ)) + if code, exists := c.structTypeToCode[typeptr]; exists { + derefCode := *code + derefCode.isRecursive = true + return &derefCode, nil + } + indirect := runtime.IfaceIndir(typ) + code := &StructCode{typ: typ, isPtr: isPtr, isIndirect: indirect} + c.structTypeToCode[typeptr] = code + + fieldNum := typ.NumField() + tags := c.typeToStructTags(typ) + fields := []*StructFieldCode{} + for i, tag := range tags { + isOnlyOneFirstField := i == 0 && fieldNum == 1 + field, err := c.structFieldCode(code, tag, isPtr, isOnlyOneFirstField) + if err != nil { + return nil, err + } + if field.isAnonymous { + structCode := field.getAnonymousStruct() + if structCode != nil { + structCode.removeFieldsByTags(tags) + if c.isAssignableIndirect(field, isPtr) { + if indirect { + structCode.isIndirect = true + } else { + structCode.isIndirect = false + } + } + } + } else { + structCode := field.getStruct() + if structCode != nil { + if indirect { + // if parent is indirect type, set child indirect property to true + structCode.isIndirect = true + } else { + // if parent is not indirect type, set child indirect property to false. + // but if parent's indirect is false and isPtr is true, then indirect must be true. + // Do this only if indirectConversion is enabled at the end of compileStruct. + structCode.isIndirect = false + } + } + } + fields = append(fields, field) + } + fieldMap := c.getFieldMap(fields) + duplicatedFieldMap := c.getDuplicatedFieldMap(fieldMap) + code.fields = c.filteredDuplicatedFields(fields, duplicatedFieldMap) + if !code.disableIndirectConversion && !indirect && isPtr { + code.enableIndirect() + } + delete(c.structTypeToCode, typeptr) + return code, nil +} + +func toElemType(t *runtime.Type) *runtime.Type { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t +} + +func (c *Compiler) structFieldCode(structCode *StructCode, tag *runtime.StructTag, isPtr, isOnlyOneFirstField bool) (*StructFieldCode, error) { + field := tag.Field + fieldType := runtime.Type2RType(field.Type) + isIndirectSpecialCase := isPtr && isOnlyOneFirstField + fieldCode := &StructFieldCode{ + typ: fieldType, + key: tag.Key, + tag: tag, + offset: field.Offset, + isAnonymous: field.Anonymous && !tag.IsTaggedKey && toElemType(fieldType).Kind() == reflect.Struct, + isTaggedKey: tag.IsTaggedKey, + isNilableType: c.isNilableType(fieldType), + isNilCheck: true, + } + switch { + case c.isMovePointerPositionFromHeadToFirstMarshalJSONFieldCase(fieldType, isIndirectSpecialCase): + code, err := c.marshalJSONCode(fieldType) + if err != nil { + return nil, err + } + fieldCode.value = code + fieldCode.isAddrForMarshaler = true + fieldCode.isNilCheck = false + structCode.isIndirect = false + structCode.disableIndirectConversion = true + case c.isMovePointerPositionFromHeadToFirstMarshalTextFieldCase(fieldType, isIndirectSpecialCase): + code, err := c.marshalTextCode(fieldType) + if err != nil { + return nil, err + } + fieldCode.value = code + fieldCode.isAddrForMarshaler = true + fieldCode.isNilCheck = false + structCode.isIndirect = false + structCode.disableIndirectConversion = true + case isPtr && c.isPtrMarshalJSONType(fieldType): + // *struct{ field T } + // func (*T) MarshalJSON() ([]byte, error) + code, err := c.marshalJSONCode(fieldType) + if err != nil { + return nil, err + } + fieldCode.value = code + fieldCode.isAddrForMarshaler = true + fieldCode.isNilCheck = false + case isPtr && c.isPtrMarshalTextType(fieldType): + // *struct{ field T } + // func (*T) MarshalText() ([]byte, error) + code, err := c.marshalTextCode(fieldType) + if err != nil { + return nil, err + } + fieldCode.value = code + fieldCode.isAddrForMarshaler = true + fieldCode.isNilCheck = false + default: + code, err := c.typeToCodeWithPtr(fieldType, isPtr) + if err != nil { + return nil, err + } + switch code.Kind() { + case CodeKindPtr, CodeKindInterface: + fieldCode.isNextOpPtrType = true + } + fieldCode.value = code + } + return fieldCode, nil +} + +func (c *Compiler) isAssignableIndirect(fieldCode *StructFieldCode, isPtr bool) bool { + if isPtr { + return false + } + codeType := fieldCode.value.Kind() + if codeType == CodeKindMarshalJSON { + return false + } + if codeType == CodeKindMarshalText { + return false + } + return true +} + +func (c *Compiler) getFieldMap(fields []*StructFieldCode) map[string][]*StructFieldCode { + fieldMap := map[string][]*StructFieldCode{} + for _, field := range fields { + if field.isAnonymous { + for k, v := range c.getAnonymousFieldMap(field) { + fieldMap[k] = append(fieldMap[k], v...) + } + continue + } + fieldMap[field.key] = append(fieldMap[field.key], field) + } + return fieldMap +} + +func (c *Compiler) getAnonymousFieldMap(field *StructFieldCode) map[string][]*StructFieldCode { + fieldMap := map[string][]*StructFieldCode{} + structCode := field.getAnonymousStruct() + if structCode == nil || structCode.isRecursive { + fieldMap[field.key] = append(fieldMap[field.key], field) + return fieldMap + } + for k, v := range c.getFieldMapFromAnonymousParent(structCode.fields) { + fieldMap[k] = append(fieldMap[k], v...) + } + return fieldMap +} + +func (c *Compiler) getFieldMapFromAnonymousParent(fields []*StructFieldCode) map[string][]*StructFieldCode { + fieldMap := map[string][]*StructFieldCode{} + for _, field := range fields { + if field.isAnonymous { + for k, v := range c.getAnonymousFieldMap(field) { + // Do not handle tagged key when embedding more than once + for _, vv := range v { + vv.isTaggedKey = false + } + fieldMap[k] = append(fieldMap[k], v...) + } + continue + } + fieldMap[field.key] = append(fieldMap[field.key], field) + } + return fieldMap +} + +func (c *Compiler) getDuplicatedFieldMap(fieldMap map[string][]*StructFieldCode) map[*StructFieldCode]struct{} { + duplicatedFieldMap := map[*StructFieldCode]struct{}{} + for _, fields := range fieldMap { + if len(fields) == 1 { + continue + } + if c.isTaggedKeyOnly(fields) { + for _, field := range fields { + if field.isTaggedKey { + continue + } + duplicatedFieldMap[field] = struct{}{} + } + } else { + for _, field := range fields { + duplicatedFieldMap[field] = struct{}{} + } + } + } + return duplicatedFieldMap +} + +func (c *Compiler) filteredDuplicatedFields(fields []*StructFieldCode, duplicatedFieldMap map[*StructFieldCode]struct{}) []*StructFieldCode { + filteredFields := make([]*StructFieldCode, 0, len(fields)) + for _, field := range fields { + if field.isAnonymous { + structCode := field.getAnonymousStruct() + if structCode != nil && !structCode.isRecursive { + structCode.fields = c.filteredDuplicatedFields(structCode.fields, duplicatedFieldMap) + if len(structCode.fields) > 0 { + filteredFields = append(filteredFields, field) + } + continue + } + } + if _, exists := duplicatedFieldMap[field]; exists { + continue + } + filteredFields = append(filteredFields, field) + } + return filteredFields +} + +func (c *Compiler) isTaggedKeyOnly(fields []*StructFieldCode) bool { + var taggedKeyFieldCount int + for _, field := range fields { + if field.isTaggedKey { + taggedKeyFieldCount++ + } + } + return taggedKeyFieldCount == 1 +} + +func (c *Compiler) typeToStructTags(typ *runtime.Type) runtime.StructTags { + tags := runtime.StructTags{} + fieldNum := typ.NumField() + for i := 0; i < fieldNum; i++ { + field := typ.Field(i) + if runtime.IsIgnoredStructField(field) { + continue + } + tags = append(tags, runtime.StructTagFromField(field)) + } + return tags +} + +// *struct{ field T } => struct { field *T } +// func (*T) MarshalJSON() ([]byte, error) +func (c *Compiler) isMovePointerPositionFromHeadToFirstMarshalJSONFieldCase(typ *runtime.Type, isIndirectSpecialCase bool) bool { + return isIndirectSpecialCase && !c.isNilableType(typ) && c.isPtrMarshalJSONType(typ) +} + +// *struct{ field T } => struct { field *T } +// func (*T) MarshalText() ([]byte, error) +func (c *Compiler) isMovePointerPositionFromHeadToFirstMarshalTextFieldCase(typ *runtime.Type, isIndirectSpecialCase bool) bool { + return isIndirectSpecialCase && !c.isNilableType(typ) && c.isPtrMarshalTextType(typ) +} + +func (c *Compiler) implementsMarshalJSON(typ *runtime.Type) bool { + if !c.implementsMarshalJSONType(typ) { + return false + } + if typ.Kind() != reflect.Ptr { + return true + } + // type kind is reflect.Ptr + if !c.implementsMarshalJSONType(typ.Elem()) { + return true + } + // needs to dereference + return false +} + +func (c *Compiler) implementsMarshalText(typ *runtime.Type) bool { + if !typ.Implements(marshalTextType) { + return false + } + if typ.Kind() != reflect.Ptr { + return true + } + // type kind is reflect.Ptr + if !typ.Elem().Implements(marshalTextType) { + return true + } + // needs to dereference + return false +} + +func (c *Compiler) isNilableType(typ *runtime.Type) bool { + if !runtime.IfaceIndir(typ) { + return true + } + switch typ.Kind() { + case reflect.Ptr: + return true + case reflect.Map: + return true + case reflect.Func: + return true + default: + return false + } +} + +func (c *Compiler) implementsMarshalJSONType(typ *runtime.Type) bool { + return typ.Implements(marshalJSONType) || typ.Implements(marshalJSONContextType) +} + +func (c *Compiler) isPtrMarshalJSONType(typ *runtime.Type) bool { + return !c.implementsMarshalJSONType(typ) && c.implementsMarshalJSONType(runtime.PtrTo(typ)) +} + +func (c *Compiler) isPtrMarshalTextType(typ *runtime.Type) bool { + return !typ.Implements(marshalTextType) && runtime.PtrTo(typ).Implements(marshalTextType) +} + +func (c *Compiler) codeToOpcode(ctx *compileContext, typ *runtime.Type, code Code) *Opcode { + codes := code.ToOpcode(ctx) + codes.Last().Next = newEndOp(ctx, typ) + c.linkRecursiveCode(ctx) + return codes.First() +} + +func (c *Compiler) linkRecursiveCode(ctx *compileContext) { + recursiveCodes := map[uintptr]*CompiledCode{} + for _, recursive := range *ctx.recursiveCodes { + typeptr := uintptr(unsafe.Pointer(recursive.Type)) + codes := ctx.structTypeToCodes[typeptr] + if recursiveCode, ok := recursiveCodes[typeptr]; ok { + *recursive.Jmp = *recursiveCode + continue + } + + code := copyOpcode(codes.First()) + code.Op = code.Op.PtrHeadToHead() + lastCode := newEndOp(&compileContext{}, recursive.Type) + lastCode.Op = OpRecursiveEnd + + // OpRecursiveEnd must set before call TotalLength + code.End.Next = lastCode + + totalLength := code.TotalLength() + + // Idx, ElemIdx, Length must set after call TotalLength + lastCode.Idx = uint32((totalLength + 1) * uintptrSize) + lastCode.ElemIdx = lastCode.Idx + uintptrSize + lastCode.Length = lastCode.Idx + 2*uintptrSize + + // extend length to alloc slot for elemIdx + length + curTotalLength := uintptr(recursive.TotalLength()) + 3 + nextTotalLength := uintptr(totalLength) + 3 + + compiled := recursive.Jmp + compiled.Code = code + compiled.CurLen = curTotalLength + compiled.NextLen = nextTotalLength + compiled.Linked = true + + recursiveCodes[typeptr] = compiled + } +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go new file mode 100644 index 0000000..20c93cb --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go @@ -0,0 +1,32 @@ +//go:build !race +// +build !race + +package encoder + +func CompileToGetCodeSet(ctx *RuntimeContext, typeptr uintptr) (*OpcodeSet, error) { + if typeptr > typeAddr.MaxTypeAddr || typeptr < typeAddr.BaseTypeAddr { + codeSet, err := compileToGetCodeSetSlowPath(typeptr) + if err != nil { + return nil, err + } + return getFilteredCodeSetIfNeeded(ctx, codeSet) + } + index := (typeptr - typeAddr.BaseTypeAddr) >> typeAddr.AddrShift + if codeSet := cachedOpcodeSets[index]; codeSet != nil { + filtered, err := getFilteredCodeSetIfNeeded(ctx, codeSet) + if err != nil { + return nil, err + } + return filtered, nil + } + codeSet, err := newCompiler().compile(typeptr) + if err != nil { + return nil, err + } + filtered, err := getFilteredCodeSetIfNeeded(ctx, codeSet) + if err != nil { + return nil, err + } + cachedOpcodeSets[index] = codeSet + return filtered, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go new file mode 100644 index 0000000..13ba23f --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go @@ -0,0 +1,45 @@ +//go:build race +// +build race + +package encoder + +import ( + "sync" +) + +var setsMu sync.RWMutex + +func CompileToGetCodeSet(ctx *RuntimeContext, typeptr uintptr) (*OpcodeSet, error) { + if typeptr > typeAddr.MaxTypeAddr || typeptr < typeAddr.BaseTypeAddr { + codeSet, err := compileToGetCodeSetSlowPath(typeptr) + if err != nil { + return nil, err + } + return getFilteredCodeSetIfNeeded(ctx, codeSet) + } + index := (typeptr - typeAddr.BaseTypeAddr) >> typeAddr.AddrShift + setsMu.RLock() + if codeSet := cachedOpcodeSets[index]; codeSet != nil { + filtered, err := getFilteredCodeSetIfNeeded(ctx, codeSet) + if err != nil { + setsMu.RUnlock() + return nil, err + } + setsMu.RUnlock() + return filtered, nil + } + setsMu.RUnlock() + + codeSet, err := newCompiler().compile(typeptr) + if err != nil { + return nil, err + } + filtered, err := getFilteredCodeSetIfNeeded(ctx, codeSet) + if err != nil { + return nil, err + } + setsMu.Lock() + cachedOpcodeSets[index] = codeSet + setsMu.Unlock() + return filtered, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/context.go b/vendor/github.com/goccy/go-json/internal/encoder/context.go new file mode 100644 index 0000000..3833d0c --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/context.go @@ -0,0 +1,105 @@ +package encoder + +import ( + "context" + "sync" + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +type compileContext struct { + opcodeIndex uint32 + ptrIndex int + indent uint32 + escapeKey bool + structTypeToCodes map[uintptr]Opcodes + recursiveCodes *Opcodes +} + +func (c *compileContext) incIndent() { + c.indent++ +} + +func (c *compileContext) decIndent() { + c.indent-- +} + +func (c *compileContext) incIndex() { + c.incOpcodeIndex() + c.incPtrIndex() +} + +func (c *compileContext) decIndex() { + c.decOpcodeIndex() + c.decPtrIndex() +} + +func (c *compileContext) incOpcodeIndex() { + c.opcodeIndex++ +} + +func (c *compileContext) decOpcodeIndex() { + c.opcodeIndex-- +} + +func (c *compileContext) incPtrIndex() { + c.ptrIndex++ +} + +func (c *compileContext) decPtrIndex() { + c.ptrIndex-- +} + +const ( + bufSize = 1024 +) + +var ( + runtimeContextPool = sync.Pool{ + New: func() interface{} { + return &RuntimeContext{ + Buf: make([]byte, 0, bufSize), + Ptrs: make([]uintptr, 128), + KeepRefs: make([]unsafe.Pointer, 0, 8), + Option: &Option{}, + } + }, + } +) + +type RuntimeContext struct { + Context context.Context + Buf []byte + MarshalBuf []byte + Ptrs []uintptr + KeepRefs []unsafe.Pointer + SeenPtr []uintptr + BaseIndent uint32 + Prefix []byte + IndentStr []byte + Option *Option +} + +func (c *RuntimeContext) Init(p uintptr, codelen int) { + if len(c.Ptrs) < codelen { + c.Ptrs = make([]uintptr, codelen) + } + c.Ptrs[0] = p + c.KeepRefs = c.KeepRefs[:0] + c.SeenPtr = c.SeenPtr[:0] + c.BaseIndent = 0 +} + +func (c *RuntimeContext) Ptr() uintptr { + header := (*runtime.SliceHeader)(unsafe.Pointer(&c.Ptrs)) + return uintptr(header.Data) +} + +func TakeRuntimeContext() *RuntimeContext { + return runtimeContextPool.Get().(*RuntimeContext) +} + +func ReleaseRuntimeContext(ctx *RuntimeContext) { + runtimeContextPool.Put(ctx) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/decode_rune.go b/vendor/github.com/goccy/go-json/internal/encoder/decode_rune.go new file mode 100644 index 0000000..35c959d --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/decode_rune.go @@ -0,0 +1,126 @@ +package encoder + +import "unicode/utf8" + +const ( + // The default lowest and highest continuation byte. + locb = 128 //0b10000000 + hicb = 191 //0b10111111 + + // These names of these constants are chosen to give nice alignment in the + // table below. The first nibble is an index into acceptRanges or F for + // special one-byte cases. The second nibble is the Rune length or the + // Status for the special one-byte case. + xx = 0xF1 // invalid: size 1 + as = 0xF0 // ASCII: size 1 + s1 = 0x02 // accept 0, size 2 + s2 = 0x13 // accept 1, size 3 + s3 = 0x03 // accept 0, size 3 + s4 = 0x23 // accept 2, size 3 + s5 = 0x34 // accept 3, size 4 + s6 = 0x04 // accept 0, size 4 + s7 = 0x44 // accept 4, size 4 +) + +// first is information about the first byte in a UTF-8 sequence. +var first = [256]uint8{ + // 1 2 3 4 5 6 7 8 9 A B C D E F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F + // 1 2 3 4 5 6 7 8 9 A B C D E F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF + xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF + s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF + s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF + s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF +} + +const ( + lineSep = byte(168) //'\u2028' + paragraphSep = byte(169) //'\u2029' +) + +type decodeRuneState int + +const ( + validUTF8State decodeRuneState = iota + runeErrorState + lineSepState + paragraphSepState +) + +func decodeRuneInString(s string) (decodeRuneState, int) { + n := len(s) + s0 := s[0] + x := first[s0] + if x >= as { + // The following code simulates an additional check for x == xx and + // handling the ASCII and invalid cases accordingly. This mask-and-or + // approach prevents an additional branch. + mask := rune(x) << 31 >> 31 // Create 0x0000 or 0xFFFF. + if rune(s[0])&^mask|utf8.RuneError&mask == utf8.RuneError { + return runeErrorState, 1 + } + return validUTF8State, 1 + } + sz := int(x & 7) + if n < sz { + return runeErrorState, 1 + } + s1 := s[1] + switch x >> 4 { + case 0: + if s1 < locb || hicb < s1 { + return runeErrorState, 1 + } + case 1: + if s1 < 0xA0 || hicb < s1 { + return runeErrorState, 1 + } + case 2: + if s1 < locb || 0x9F < s1 { + return runeErrorState, 1 + } + case 3: + if s1 < 0x90 || hicb < s1 { + return runeErrorState, 1 + } + case 4: + if s1 < locb || 0x8F < s1 { + return runeErrorState, 1 + } + } + if sz <= 2 { + return validUTF8State, 2 + } + s2 := s[2] + if s2 < locb || hicb < s2 { + return runeErrorState, 1 + } + if sz <= 3 { + // separator character prefixes: [2]byte{226, 128} + if s0 == 226 && s1 == 128 { + switch s2 { + case lineSep: + return lineSepState, 3 + case paragraphSep: + return paragraphSepState, 3 + } + } + return validUTF8State, 3 + } + s3 := s[3] + if s3 < locb || hicb < s3 { + return runeErrorState, 1 + } + return validUTF8State, 4 +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/encoder.go b/vendor/github.com/goccy/go-json/internal/encoder/encoder.go new file mode 100644 index 0000000..14eb6a0 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/encoder.go @@ -0,0 +1,596 @@ +package encoder + +import ( + "bytes" + "encoding" + "encoding/base64" + "encoding/json" + "fmt" + "math" + "reflect" + "strconv" + "strings" + "sync" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +func (t OpType) IsMultipleOpHead() bool { + switch t { + case OpStructHead: + return true + case OpStructHeadSlice: + return true + case OpStructHeadArray: + return true + case OpStructHeadMap: + return true + case OpStructHeadStruct: + return true + case OpStructHeadOmitEmpty: + return true + case OpStructHeadOmitEmptySlice: + return true + case OpStructHeadOmitEmptyArray: + return true + case OpStructHeadOmitEmptyMap: + return true + case OpStructHeadOmitEmptyStruct: + return true + case OpStructHeadSlicePtr: + return true + case OpStructHeadOmitEmptySlicePtr: + return true + case OpStructHeadArrayPtr: + return true + case OpStructHeadOmitEmptyArrayPtr: + return true + case OpStructHeadMapPtr: + return true + case OpStructHeadOmitEmptyMapPtr: + return true + } + return false +} + +func (t OpType) IsMultipleOpField() bool { + switch t { + case OpStructField: + return true + case OpStructFieldSlice: + return true + case OpStructFieldArray: + return true + case OpStructFieldMap: + return true + case OpStructFieldStruct: + return true + case OpStructFieldOmitEmpty: + return true + case OpStructFieldOmitEmptySlice: + return true + case OpStructFieldOmitEmptyArray: + return true + case OpStructFieldOmitEmptyMap: + return true + case OpStructFieldOmitEmptyStruct: + return true + case OpStructFieldSlicePtr: + return true + case OpStructFieldOmitEmptySlicePtr: + return true + case OpStructFieldArrayPtr: + return true + case OpStructFieldOmitEmptyArrayPtr: + return true + case OpStructFieldMapPtr: + return true + case OpStructFieldOmitEmptyMapPtr: + return true + } + return false +} + +type OpcodeSet struct { + Type *runtime.Type + NoescapeKeyCode *Opcode + EscapeKeyCode *Opcode + InterfaceNoescapeKeyCode *Opcode + InterfaceEscapeKeyCode *Opcode + CodeLength int + EndCode *Opcode + Code Code + QueryCache map[string]*OpcodeSet + cacheMu sync.RWMutex +} + +func (s *OpcodeSet) getQueryCache(hash string) *OpcodeSet { + s.cacheMu.RLock() + codeSet := s.QueryCache[hash] + s.cacheMu.RUnlock() + return codeSet +} + +func (s *OpcodeSet) setQueryCache(hash string, codeSet *OpcodeSet) { + s.cacheMu.Lock() + s.QueryCache[hash] = codeSet + s.cacheMu.Unlock() +} + +type CompiledCode struct { + Code *Opcode + Linked bool // whether recursive code already have linked + CurLen uintptr + NextLen uintptr +} + +const StartDetectingCyclesAfter = 1000 + +func Load(base uintptr, idx uintptr) uintptr { + addr := base + idx + return **(**uintptr)(unsafe.Pointer(&addr)) +} + +func Store(base uintptr, idx uintptr, p uintptr) { + addr := base + idx + **(**uintptr)(unsafe.Pointer(&addr)) = p +} + +func LoadNPtr(base uintptr, idx uintptr, ptrNum int) uintptr { + addr := base + idx + p := **(**uintptr)(unsafe.Pointer(&addr)) + if p == 0 { + return 0 + } + return PtrToPtr(p) + /* + for i := 0; i < ptrNum; i++ { + if p == 0 { + return p + } + p = PtrToPtr(p) + } + return p + */ +} + +func PtrToUint64(p uintptr) uint64 { return **(**uint64)(unsafe.Pointer(&p)) } +func PtrToFloat32(p uintptr) float32 { return **(**float32)(unsafe.Pointer(&p)) } +func PtrToFloat64(p uintptr) float64 { return **(**float64)(unsafe.Pointer(&p)) } +func PtrToBool(p uintptr) bool { return **(**bool)(unsafe.Pointer(&p)) } +func PtrToBytes(p uintptr) []byte { return **(**[]byte)(unsafe.Pointer(&p)) } +func PtrToNumber(p uintptr) json.Number { return **(**json.Number)(unsafe.Pointer(&p)) } +func PtrToString(p uintptr) string { return **(**string)(unsafe.Pointer(&p)) } +func PtrToSlice(p uintptr) *runtime.SliceHeader { return *(**runtime.SliceHeader)(unsafe.Pointer(&p)) } +func PtrToPtr(p uintptr) uintptr { + return uintptr(**(**unsafe.Pointer)(unsafe.Pointer(&p))) +} +func PtrToNPtr(p uintptr, ptrNum int) uintptr { + for i := 0; i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = PtrToPtr(p) + } + return p +} + +func PtrToUnsafePtr(p uintptr) unsafe.Pointer { + return *(*unsafe.Pointer)(unsafe.Pointer(&p)) +} +func PtrToInterface(code *Opcode, p uintptr) interface{} { + return *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: code.Type, + ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)), + })) +} + +func ErrUnsupportedValue(code *Opcode, ptr uintptr) *errors.UnsupportedValueError { + v := *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: code.Type, + ptr: *(*unsafe.Pointer)(unsafe.Pointer(&ptr)), + })) + return &errors.UnsupportedValueError{ + Value: reflect.ValueOf(v), + Str: fmt.Sprintf("encountered a cycle via %s", code.Type), + } +} + +func ErrUnsupportedFloat(v float64) *errors.UnsupportedValueError { + return &errors.UnsupportedValueError{ + Value: reflect.ValueOf(v), + Str: strconv.FormatFloat(v, 'g', -1, 64), + } +} + +func ErrMarshalerWithCode(code *Opcode, err error) *errors.MarshalerError { + return &errors.MarshalerError{ + Type: runtime.RType2Type(code.Type), + Err: err, + } +} + +type emptyInterface struct { + typ *runtime.Type + ptr unsafe.Pointer +} + +type MapItem struct { + Key []byte + Value []byte +} + +type Mapslice struct { + Items []MapItem +} + +func (m *Mapslice) Len() int { + return len(m.Items) +} + +func (m *Mapslice) Less(i, j int) bool { + return bytes.Compare(m.Items[i].Key, m.Items[j].Key) < 0 +} + +func (m *Mapslice) Swap(i, j int) { + m.Items[i], m.Items[j] = m.Items[j], m.Items[i] +} + +//nolint:structcheck,unused +type mapIter struct { + key unsafe.Pointer + elem unsafe.Pointer + t unsafe.Pointer + h unsafe.Pointer + buckets unsafe.Pointer + bptr unsafe.Pointer + overflow unsafe.Pointer + oldoverflow unsafe.Pointer + startBucket uintptr + offset uint8 + wrapped bool + B uint8 + i uint8 + bucket uintptr + checkBucket uintptr +} + +type MapContext struct { + Start int + First int + Idx int + Slice *Mapslice + Buf []byte + Len int + Iter mapIter +} + +var mapContextPool = sync.Pool{ + New: func() interface{} { + return &MapContext{ + Slice: &Mapslice{}, + } + }, +} + +func NewMapContext(mapLen int, unorderedMap bool) *MapContext { + ctx := mapContextPool.Get().(*MapContext) + if !unorderedMap { + if len(ctx.Slice.Items) < mapLen { + ctx.Slice.Items = make([]MapItem, mapLen) + } else { + ctx.Slice.Items = ctx.Slice.Items[:mapLen] + } + } + ctx.Buf = ctx.Buf[:0] + ctx.Iter = mapIter{} + ctx.Idx = 0 + ctx.Len = mapLen + return ctx +} + +func ReleaseMapContext(c *MapContext) { + mapContextPool.Put(c) +} + +//go:linkname MapIterInit runtime.mapiterinit +//go:noescape +func MapIterInit(mapType *runtime.Type, m unsafe.Pointer, it *mapIter) + +//go:linkname MapIterKey reflect.mapiterkey +//go:noescape +func MapIterKey(it *mapIter) unsafe.Pointer + +//go:linkname MapIterNext reflect.mapiternext +//go:noescape +func MapIterNext(it *mapIter) + +//go:linkname MapLen reflect.maplen +//go:noescape +func MapLen(m unsafe.Pointer) int + +func AppendByteSlice(_ *RuntimeContext, b []byte, src []byte) []byte { + if src == nil { + return append(b, `null`...) + } + encodedLen := base64.StdEncoding.EncodedLen(len(src)) + b = append(b, '"') + pos := len(b) + remainLen := cap(b[pos:]) + var buf []byte + if remainLen > encodedLen { + buf = b[pos : pos+encodedLen] + } else { + buf = make([]byte, encodedLen) + } + base64.StdEncoding.Encode(buf, src) + return append(append(b, buf...), '"') +} + +func AppendFloat32(_ *RuntimeContext, b []byte, v float32) []byte { + f64 := float64(v) + abs := math.Abs(f64) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + f32 := float32(abs) + if f32 < 1e-6 || f32 >= 1e21 { + fmt = 'e' + } + } + return strconv.AppendFloat(b, f64, fmt, -1, 32) +} + +func AppendFloat64(_ *RuntimeContext, b []byte, v float64) []byte { + abs := math.Abs(v) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if abs < 1e-6 || abs >= 1e21 { + fmt = 'e' + } + } + return strconv.AppendFloat(b, v, fmt, -1, 64) +} + +func AppendBool(_ *RuntimeContext, b []byte, v bool) []byte { + if v { + return append(b, "true"...) + } + return append(b, "false"...) +} + +var ( + floatTable = [256]bool{ + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + '.': true, + 'e': true, + 'E': true, + '+': true, + '-': true, + } +) + +func AppendNumber(_ *RuntimeContext, b []byte, n json.Number) ([]byte, error) { + if len(n) == 0 { + return append(b, '0'), nil + } + for i := 0; i < len(n); i++ { + if !floatTable[n[i]] { + return nil, fmt.Errorf("json: invalid number literal %q", n) + } + } + b = append(b, n...) + return b, nil +} + +func AppendMarshalJSON(ctx *RuntimeContext, code *Opcode, b []byte, v interface{}) ([]byte, error) { + rv := reflect.ValueOf(v) // convert by dynamic interface type + if (code.Flags & AddrForMarshalerFlags) != 0 { + if rv.CanAddr() { + rv = rv.Addr() + } else { + newV := reflect.New(rv.Type()) + newV.Elem().Set(rv) + rv = newV + } + } + v = rv.Interface() + var bb []byte + if (code.Flags & MarshalerContextFlags) != 0 { + marshaler, ok := v.(marshalerContext) + if !ok { + return AppendNull(ctx, b), nil + } + stdctx := ctx.Option.Context + if ctx.Option.Flag&FieldQueryOption != 0 { + stdctx = SetFieldQueryToContext(stdctx, code.FieldQuery) + } + b, err := marshaler.MarshalJSON(stdctx) + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + bb = b + } else { + marshaler, ok := v.(json.Marshaler) + if !ok { + return AppendNull(ctx, b), nil + } + b, err := marshaler.MarshalJSON() + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + bb = b + } + marshalBuf := ctx.MarshalBuf[:0] + marshalBuf = append(append(marshalBuf, bb...), nul) + compactedBuf, err := compact(b, marshalBuf, (ctx.Option.Flag&HTMLEscapeOption) != 0) + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + ctx.MarshalBuf = marshalBuf + return compactedBuf, nil +} + +func AppendMarshalJSONIndent(ctx *RuntimeContext, code *Opcode, b []byte, v interface{}) ([]byte, error) { + rv := reflect.ValueOf(v) // convert by dynamic interface type + if (code.Flags & AddrForMarshalerFlags) != 0 { + if rv.CanAddr() { + rv = rv.Addr() + } else { + newV := reflect.New(rv.Type()) + newV.Elem().Set(rv) + rv = newV + } + } + v = rv.Interface() + var bb []byte + if (code.Flags & MarshalerContextFlags) != 0 { + marshaler, ok := v.(marshalerContext) + if !ok { + return AppendNull(ctx, b), nil + } + b, err := marshaler.MarshalJSON(ctx.Option.Context) + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + bb = b + } else { + marshaler, ok := v.(json.Marshaler) + if !ok { + return AppendNull(ctx, b), nil + } + b, err := marshaler.MarshalJSON() + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + bb = b + } + marshalBuf := ctx.MarshalBuf[:0] + marshalBuf = append(append(marshalBuf, bb...), nul) + indentedBuf, err := doIndent( + b, + marshalBuf, + string(ctx.Prefix)+strings.Repeat(string(ctx.IndentStr), int(ctx.BaseIndent+code.Indent)), + string(ctx.IndentStr), + (ctx.Option.Flag&HTMLEscapeOption) != 0, + ) + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + ctx.MarshalBuf = marshalBuf + return indentedBuf, nil +} + +func AppendMarshalText(ctx *RuntimeContext, code *Opcode, b []byte, v interface{}) ([]byte, error) { + rv := reflect.ValueOf(v) // convert by dynamic interface type + if (code.Flags & AddrForMarshalerFlags) != 0 { + if rv.CanAddr() { + rv = rv.Addr() + } else { + newV := reflect.New(rv.Type()) + newV.Elem().Set(rv) + rv = newV + } + } + v = rv.Interface() + marshaler, ok := v.(encoding.TextMarshaler) + if !ok { + return AppendNull(ctx, b), nil + } + bytes, err := marshaler.MarshalText() + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + return AppendString(ctx, b, *(*string)(unsafe.Pointer(&bytes))), nil +} + +func AppendMarshalTextIndent(ctx *RuntimeContext, code *Opcode, b []byte, v interface{}) ([]byte, error) { + rv := reflect.ValueOf(v) // convert by dynamic interface type + if (code.Flags & AddrForMarshalerFlags) != 0 { + if rv.CanAddr() { + rv = rv.Addr() + } else { + newV := reflect.New(rv.Type()) + newV.Elem().Set(rv) + rv = newV + } + } + v = rv.Interface() + marshaler, ok := v.(encoding.TextMarshaler) + if !ok { + return AppendNull(ctx, b), nil + } + bytes, err := marshaler.MarshalText() + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + return AppendString(ctx, b, *(*string)(unsafe.Pointer(&bytes))), nil +} + +func AppendNull(_ *RuntimeContext, b []byte) []byte { + return append(b, "null"...) +} + +func AppendComma(_ *RuntimeContext, b []byte) []byte { + return append(b, ',') +} + +func AppendCommaIndent(_ *RuntimeContext, b []byte) []byte { + return append(b, ',', '\n') +} + +func AppendStructEnd(_ *RuntimeContext, b []byte) []byte { + return append(b, '}', ',') +} + +func AppendStructEndIndent(ctx *RuntimeContext, code *Opcode, b []byte) []byte { + b = append(b, '\n') + b = append(b, ctx.Prefix...) + indentNum := ctx.BaseIndent + code.Indent - 1 + for i := uint32(0); i < indentNum; i++ { + b = append(b, ctx.IndentStr...) + } + return append(b, '}', ',', '\n') +} + +func AppendIndent(ctx *RuntimeContext, b []byte, indent uint32) []byte { + b = append(b, ctx.Prefix...) + indentNum := ctx.BaseIndent + indent + for i := uint32(0); i < indentNum; i++ { + b = append(b, ctx.IndentStr...) + } + return b +} + +func IsNilForMarshaler(v interface{}) bool { + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Bool: + return !rv.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return rv.Uint() == 0 + case reflect.Float32, reflect.Float64: + return math.Float64bits(rv.Float()) == 0 + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Func: + return rv.IsNil() + case reflect.Slice: + return rv.IsNil() || rv.Len() == 0 + case reflect.String: + return rv.Len() == 0 + } + return false +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/indent.go b/vendor/github.com/goccy/go-json/internal/encoder/indent.go new file mode 100644 index 0000000..dfe04b5 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/indent.go @@ -0,0 +1,211 @@ +package encoder + +import ( + "bytes" + "fmt" + + "github.com/goccy/go-json/internal/errors" +) + +func takeIndentSrcRuntimeContext(src []byte) (*RuntimeContext, []byte) { + ctx := TakeRuntimeContext() + buf := ctx.Buf[:0] + buf = append(append(buf, src...), nul) + ctx.Buf = buf + return ctx, buf +} + +func Indent(buf *bytes.Buffer, src []byte, prefix, indentStr string) error { + if len(src) == 0 { + return errors.ErrUnexpectedEndOfJSON("", 0) + } + + srcCtx, srcBuf := takeIndentSrcRuntimeContext(src) + dstCtx := TakeRuntimeContext() + dst := dstCtx.Buf[:0] + + dst, err := indentAndWrite(buf, dst, srcBuf, prefix, indentStr) + if err != nil { + ReleaseRuntimeContext(srcCtx) + ReleaseRuntimeContext(dstCtx) + return err + } + dstCtx.Buf = dst + ReleaseRuntimeContext(srcCtx) + ReleaseRuntimeContext(dstCtx) + return nil +} + +func indentAndWrite(buf *bytes.Buffer, dst []byte, src []byte, prefix, indentStr string) ([]byte, error) { + dst, err := doIndent(dst, src, prefix, indentStr, false) + if err != nil { + return nil, err + } + if _, err := buf.Write(dst); err != nil { + return nil, err + } + return dst, nil +} + +func doIndent(dst, src []byte, prefix, indentStr string, escape bool) ([]byte, error) { + buf, cursor, err := indentValue(dst, src, 0, 0, []byte(prefix), []byte(indentStr), escape) + if err != nil { + return nil, err + } + if err := validateEndBuf(src, cursor); err != nil { + return nil, err + } + return buf, nil +} + +func indentValue( + dst []byte, + src []byte, + indentNum int, + cursor int64, + prefix []byte, + indentBytes []byte, + escape bool) ([]byte, int64, error) { + for { + switch src[cursor] { + case ' ', '\t', '\n', '\r': + cursor++ + continue + case '{': + return indentObject(dst, src, indentNum, cursor, prefix, indentBytes, escape) + case '}': + return nil, 0, errors.ErrSyntax("unexpected character '}'", cursor) + case '[': + return indentArray(dst, src, indentNum, cursor, prefix, indentBytes, escape) + case ']': + return nil, 0, errors.ErrSyntax("unexpected character ']'", cursor) + case '"': + return compactString(dst, src, cursor, escape) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return compactNumber(dst, src, cursor) + case 't': + return compactTrue(dst, src, cursor) + case 'f': + return compactFalse(dst, src, cursor) + case 'n': + return compactNull(dst, src, cursor) + default: + return nil, 0, errors.ErrSyntax(fmt.Sprintf("unexpected character '%c'", src[cursor]), cursor) + } + } +} + +func indentObject( + dst []byte, + src []byte, + indentNum int, + cursor int64, + prefix []byte, + indentBytes []byte, + escape bool) ([]byte, int64, error) { + if src[cursor] == '{' { + dst = append(dst, '{') + } else { + return nil, 0, errors.ErrExpected("expected { character for object value", cursor) + } + cursor = skipWhiteSpace(src, cursor+1) + if src[cursor] == '}' { + dst = append(dst, '}') + return dst, cursor + 1, nil + } + indentNum++ + var err error + for { + dst = append(append(dst, '\n'), prefix...) + for i := 0; i < indentNum; i++ { + dst = append(dst, indentBytes...) + } + cursor = skipWhiteSpace(src, cursor) + dst, cursor, err = compactString(dst, src, cursor, escape) + if err != nil { + return nil, 0, err + } + cursor = skipWhiteSpace(src, cursor) + if src[cursor] != ':' { + return nil, 0, errors.ErrSyntax( + fmt.Sprintf("invalid character '%c' after object key", src[cursor]), + cursor+1, + ) + } + dst = append(dst, ':', ' ') + dst, cursor, err = indentValue(dst, src, indentNum, cursor+1, prefix, indentBytes, escape) + if err != nil { + return nil, 0, err + } + cursor = skipWhiteSpace(src, cursor) + switch src[cursor] { + case '}': + dst = append(append(dst, '\n'), prefix...) + for i := 0; i < indentNum-1; i++ { + dst = append(dst, indentBytes...) + } + dst = append(dst, '}') + cursor++ + return dst, cursor, nil + case ',': + dst = append(dst, ',') + default: + return nil, 0, errors.ErrSyntax( + fmt.Sprintf("invalid character '%c' after object key:value pair", src[cursor]), + cursor+1, + ) + } + cursor++ + } +} + +func indentArray( + dst []byte, + src []byte, + indentNum int, + cursor int64, + prefix []byte, + indentBytes []byte, + escape bool) ([]byte, int64, error) { + if src[cursor] == '[' { + dst = append(dst, '[') + } else { + return nil, 0, errors.ErrExpected("expected [ character for array value", cursor) + } + cursor = skipWhiteSpace(src, cursor+1) + if src[cursor] == ']' { + dst = append(dst, ']') + return dst, cursor + 1, nil + } + indentNum++ + var err error + for { + dst = append(append(dst, '\n'), prefix...) + for i := 0; i < indentNum; i++ { + dst = append(dst, indentBytes...) + } + dst, cursor, err = indentValue(dst, src, indentNum, cursor, prefix, indentBytes, escape) + if err != nil { + return nil, 0, err + } + cursor = skipWhiteSpace(src, cursor) + switch src[cursor] { + case ']': + dst = append(append(dst, '\n'), prefix...) + for i := 0; i < indentNum-1; i++ { + dst = append(dst, indentBytes...) + } + dst = append(dst, ']') + cursor++ + return dst, cursor, nil + case ',': + dst = append(dst, ',') + default: + return nil, 0, errors.ErrSyntax( + fmt.Sprintf("invalid character '%c' after array value", src[cursor]), + cursor+1, + ) + } + cursor++ + } +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/int.go b/vendor/github.com/goccy/go-json/internal/encoder/int.go new file mode 100644 index 0000000..8b5febe --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/int.go @@ -0,0 +1,176 @@ +// This files's processing codes are inspired by https://github.com/segmentio/encoding. +// The license notation is as follows. +// +// # MIT License +// +// Copyright (c) 2019 Segment.io, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +package encoder + +import ( + "unsafe" +) + +var endianness int + +func init() { + var b [2]byte + *(*uint16)(unsafe.Pointer(&b)) = uint16(0xABCD) + + switch b[0] { + case 0xCD: + endianness = 0 // LE + case 0xAB: + endianness = 1 // BE + default: + panic("could not determine endianness") + } +} + +// "00010203...96979899" cast to []uint16 +var intLELookup = [100]uint16{ + 0x3030, 0x3130, 0x3230, 0x3330, 0x3430, 0x3530, 0x3630, 0x3730, 0x3830, 0x3930, + 0x3031, 0x3131, 0x3231, 0x3331, 0x3431, 0x3531, 0x3631, 0x3731, 0x3831, 0x3931, + 0x3032, 0x3132, 0x3232, 0x3332, 0x3432, 0x3532, 0x3632, 0x3732, 0x3832, 0x3932, + 0x3033, 0x3133, 0x3233, 0x3333, 0x3433, 0x3533, 0x3633, 0x3733, 0x3833, 0x3933, + 0x3034, 0x3134, 0x3234, 0x3334, 0x3434, 0x3534, 0x3634, 0x3734, 0x3834, 0x3934, + 0x3035, 0x3135, 0x3235, 0x3335, 0x3435, 0x3535, 0x3635, 0x3735, 0x3835, 0x3935, + 0x3036, 0x3136, 0x3236, 0x3336, 0x3436, 0x3536, 0x3636, 0x3736, 0x3836, 0x3936, + 0x3037, 0x3137, 0x3237, 0x3337, 0x3437, 0x3537, 0x3637, 0x3737, 0x3837, 0x3937, + 0x3038, 0x3138, 0x3238, 0x3338, 0x3438, 0x3538, 0x3638, 0x3738, 0x3838, 0x3938, + 0x3039, 0x3139, 0x3239, 0x3339, 0x3439, 0x3539, 0x3639, 0x3739, 0x3839, 0x3939, +} + +var intBELookup = [100]uint16{ + 0x3030, 0x3031, 0x3032, 0x3033, 0x3034, 0x3035, 0x3036, 0x3037, 0x3038, 0x3039, + 0x3130, 0x3131, 0x3132, 0x3133, 0x3134, 0x3135, 0x3136, 0x3137, 0x3138, 0x3139, + 0x3230, 0x3231, 0x3232, 0x3233, 0x3234, 0x3235, 0x3236, 0x3237, 0x3238, 0x3239, + 0x3330, 0x3331, 0x3332, 0x3333, 0x3334, 0x3335, 0x3336, 0x3337, 0x3338, 0x3339, + 0x3430, 0x3431, 0x3432, 0x3433, 0x3434, 0x3435, 0x3436, 0x3437, 0x3438, 0x3439, + 0x3530, 0x3531, 0x3532, 0x3533, 0x3534, 0x3535, 0x3536, 0x3537, 0x3538, 0x3539, + 0x3630, 0x3631, 0x3632, 0x3633, 0x3634, 0x3635, 0x3636, 0x3637, 0x3638, 0x3639, + 0x3730, 0x3731, 0x3732, 0x3733, 0x3734, 0x3735, 0x3736, 0x3737, 0x3738, 0x3739, + 0x3830, 0x3831, 0x3832, 0x3833, 0x3834, 0x3835, 0x3836, 0x3837, 0x3838, 0x3839, + 0x3930, 0x3931, 0x3932, 0x3933, 0x3934, 0x3935, 0x3936, 0x3937, 0x3938, 0x3939, +} + +var intLookup = [2]*[100]uint16{&intLELookup, &intBELookup} + +func numMask(numBitSize uint8) uint64 { + return 1<>(code.NumBitSize-1))&1 == 1 + if !negative { + if n < 10 { + return append(out, byte(n+'0')) + } else if n < 100 { + u := intLELookup[n] + return append(out, byte(u), byte(u>>8)) + } + } else { + n = -n & mask + } + + lookup := intLookup[endianness] + + var b [22]byte + u := (*[11]uint16)(unsafe.Pointer(&b)) + i := 11 + + for n >= 100 { + j := n % 100 + n /= 100 + i-- + u[i] = lookup[j] + } + + i-- + u[i] = lookup[n] + + i *= 2 // convert to byte index + if n < 10 { + i++ // remove leading zero + } + if negative { + i-- + b[i] = '-' + } + + return append(out, b[i:]...) +} + +func AppendUint(_ *RuntimeContext, out []byte, p uintptr, code *Opcode) []byte { + var u64 uint64 + switch code.NumBitSize { + case 8: + u64 = (uint64)(**(**uint8)(unsafe.Pointer(&p))) + case 16: + u64 = (uint64)(**(**uint16)(unsafe.Pointer(&p))) + case 32: + u64 = (uint64)(**(**uint32)(unsafe.Pointer(&p))) + case 64: + u64 = **(**uint64)(unsafe.Pointer(&p)) + } + mask := numMask(code.NumBitSize) + n := u64 & mask + if n < 10 { + return append(out, byte(n+'0')) + } else if n < 100 { + u := intLELookup[n] + return append(out, byte(u), byte(u>>8)) + } + + lookup := intLookup[endianness] + + var b [22]byte + u := (*[11]uint16)(unsafe.Pointer(&b)) + i := 11 + + for n >= 100 { + j := n % 100 + n /= 100 + i-- + u[i] = lookup[j] + } + + i-- + u[i] = lookup[n] + + i *= 2 // convert to byte index + if n < 10 { + i++ // remove leading zero + } + return append(out, b[i:]...) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/map112.go b/vendor/github.com/goccy/go-json/internal/encoder/map112.go new file mode 100644 index 0000000..e96ffad --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/map112.go @@ -0,0 +1,9 @@ +//go:build !go1.13 +// +build !go1.13 + +package encoder + +import "unsafe" + +//go:linkname MapIterValue reflect.mapitervalue +func MapIterValue(it *mapIter) unsafe.Pointer diff --git a/vendor/github.com/goccy/go-json/internal/encoder/map113.go b/vendor/github.com/goccy/go-json/internal/encoder/map113.go new file mode 100644 index 0000000..9b69dcc --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/map113.go @@ -0,0 +1,9 @@ +//go:build go1.13 +// +build go1.13 + +package encoder + +import "unsafe" + +//go:linkname MapIterValue reflect.mapiterelem +func MapIterValue(it *mapIter) unsafe.Pointer diff --git a/vendor/github.com/goccy/go-json/internal/encoder/opcode.go b/vendor/github.com/goccy/go-json/internal/encoder/opcode.go new file mode 100644 index 0000000..df22f55 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/opcode.go @@ -0,0 +1,752 @@ +package encoder + +import ( + "bytes" + "fmt" + "sort" + "strings" + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +const uintptrSize = 4 << (^uintptr(0) >> 63) + +type OpFlags uint16 + +const ( + AnonymousHeadFlags OpFlags = 1 << 0 + AnonymousKeyFlags OpFlags = 1 << 1 + IndirectFlags OpFlags = 1 << 2 + IsTaggedKeyFlags OpFlags = 1 << 3 + NilCheckFlags OpFlags = 1 << 4 + AddrForMarshalerFlags OpFlags = 1 << 5 + IsNextOpPtrTypeFlags OpFlags = 1 << 6 + IsNilableTypeFlags OpFlags = 1 << 7 + MarshalerContextFlags OpFlags = 1 << 8 + NonEmptyInterfaceFlags OpFlags = 1 << 9 +) + +type Opcode struct { + Op OpType // operation type + Idx uint32 // offset to access ptr + Next *Opcode // next opcode + End *Opcode // array/slice/struct/map end + NextField *Opcode // next struct field + Key string // struct field key + Offset uint32 // offset size from struct header + PtrNum uint8 // pointer number: e.g. double pointer is 2. + NumBitSize uint8 + Flags OpFlags + + Type *runtime.Type // go type + Jmp *CompiledCode // for recursive call + FieldQuery *FieldQuery // field query for Interface / MarshalJSON / MarshalText + ElemIdx uint32 // offset to access array/slice elem + Length uint32 // offset to access slice length or array length + Indent uint32 // indent number + Size uint32 // array/slice elem size + DisplayIdx uint32 // opcode index + DisplayKey string // key text to display +} + +func (c *Opcode) Validate() error { + var prevIdx uint32 + for code := c; !code.IsEnd(); { + if prevIdx != 0 { + if code.DisplayIdx != prevIdx+1 { + return fmt.Errorf( + "invalid index. previous display index is %d but next is %d. dump = %s", + prevIdx, code.DisplayIdx, c.Dump(), + ) + } + } + prevIdx = code.DisplayIdx + code = code.IterNext() + } + return nil +} + +func (c *Opcode) IterNext() *Opcode { + if c == nil { + return nil + } + switch c.Op.CodeType() { + case CodeArrayElem, CodeSliceElem, CodeMapKey: + return c.End + default: + return c.Next + } +} + +func (c *Opcode) IsEnd() bool { + if c == nil { + return true + } + return c.Op == OpEnd || c.Op == OpInterfaceEnd || c.Op == OpRecursiveEnd +} + +func (c *Opcode) MaxIdx() uint32 { + max := uint32(0) + for _, value := range []uint32{ + c.Idx, + c.ElemIdx, + c.Length, + c.Size, + } { + if max < value { + max = value + } + } + return max +} + +func (c *Opcode) ToHeaderType(isString bool) OpType { + switch c.Op { + case OpInt: + if isString { + return OpStructHeadIntString + } + return OpStructHeadInt + case OpIntPtr: + if isString { + return OpStructHeadIntPtrString + } + return OpStructHeadIntPtr + case OpUint: + if isString { + return OpStructHeadUintString + } + return OpStructHeadUint + case OpUintPtr: + if isString { + return OpStructHeadUintPtrString + } + return OpStructHeadUintPtr + case OpFloat32: + if isString { + return OpStructHeadFloat32String + } + return OpStructHeadFloat32 + case OpFloat32Ptr: + if isString { + return OpStructHeadFloat32PtrString + } + return OpStructHeadFloat32Ptr + case OpFloat64: + if isString { + return OpStructHeadFloat64String + } + return OpStructHeadFloat64 + case OpFloat64Ptr: + if isString { + return OpStructHeadFloat64PtrString + } + return OpStructHeadFloat64Ptr + case OpString: + if isString { + return OpStructHeadStringString + } + return OpStructHeadString + case OpStringPtr: + if isString { + return OpStructHeadStringPtrString + } + return OpStructHeadStringPtr + case OpNumber: + if isString { + return OpStructHeadNumberString + } + return OpStructHeadNumber + case OpNumberPtr: + if isString { + return OpStructHeadNumberPtrString + } + return OpStructHeadNumberPtr + case OpBool: + if isString { + return OpStructHeadBoolString + } + return OpStructHeadBool + case OpBoolPtr: + if isString { + return OpStructHeadBoolPtrString + } + return OpStructHeadBoolPtr + case OpBytes: + return OpStructHeadBytes + case OpBytesPtr: + return OpStructHeadBytesPtr + case OpMap: + return OpStructHeadMap + case OpMapPtr: + c.Op = OpMap + return OpStructHeadMapPtr + case OpArray: + return OpStructHeadArray + case OpArrayPtr: + c.Op = OpArray + return OpStructHeadArrayPtr + case OpSlice: + return OpStructHeadSlice + case OpSlicePtr: + c.Op = OpSlice + return OpStructHeadSlicePtr + case OpMarshalJSON: + return OpStructHeadMarshalJSON + case OpMarshalJSONPtr: + return OpStructHeadMarshalJSONPtr + case OpMarshalText: + return OpStructHeadMarshalText + case OpMarshalTextPtr: + return OpStructHeadMarshalTextPtr + } + return OpStructHead +} + +func (c *Opcode) ToFieldType(isString bool) OpType { + switch c.Op { + case OpInt: + if isString { + return OpStructFieldIntString + } + return OpStructFieldInt + case OpIntPtr: + if isString { + return OpStructFieldIntPtrString + } + return OpStructFieldIntPtr + case OpUint: + if isString { + return OpStructFieldUintString + } + return OpStructFieldUint + case OpUintPtr: + if isString { + return OpStructFieldUintPtrString + } + return OpStructFieldUintPtr + case OpFloat32: + if isString { + return OpStructFieldFloat32String + } + return OpStructFieldFloat32 + case OpFloat32Ptr: + if isString { + return OpStructFieldFloat32PtrString + } + return OpStructFieldFloat32Ptr + case OpFloat64: + if isString { + return OpStructFieldFloat64String + } + return OpStructFieldFloat64 + case OpFloat64Ptr: + if isString { + return OpStructFieldFloat64PtrString + } + return OpStructFieldFloat64Ptr + case OpString: + if isString { + return OpStructFieldStringString + } + return OpStructFieldString + case OpStringPtr: + if isString { + return OpStructFieldStringPtrString + } + return OpStructFieldStringPtr + case OpNumber: + if isString { + return OpStructFieldNumberString + } + return OpStructFieldNumber + case OpNumberPtr: + if isString { + return OpStructFieldNumberPtrString + } + return OpStructFieldNumberPtr + case OpBool: + if isString { + return OpStructFieldBoolString + } + return OpStructFieldBool + case OpBoolPtr: + if isString { + return OpStructFieldBoolPtrString + } + return OpStructFieldBoolPtr + case OpBytes: + return OpStructFieldBytes + case OpBytesPtr: + return OpStructFieldBytesPtr + case OpMap: + return OpStructFieldMap + case OpMapPtr: + c.Op = OpMap + return OpStructFieldMapPtr + case OpArray: + return OpStructFieldArray + case OpArrayPtr: + c.Op = OpArray + return OpStructFieldArrayPtr + case OpSlice: + return OpStructFieldSlice + case OpSlicePtr: + c.Op = OpSlice + return OpStructFieldSlicePtr + case OpMarshalJSON: + return OpStructFieldMarshalJSON + case OpMarshalJSONPtr: + return OpStructFieldMarshalJSONPtr + case OpMarshalText: + return OpStructFieldMarshalText + case OpMarshalTextPtr: + return OpStructFieldMarshalTextPtr + } + return OpStructField +} + +func newOpCode(ctx *compileContext, typ *runtime.Type, op OpType) *Opcode { + return newOpCodeWithNext(ctx, typ, op, newEndOp(ctx, typ)) +} + +func opcodeOffset(idx int) uint32 { + return uint32(idx) * uintptrSize +} + +func getCodeAddrByIdx(head *Opcode, idx uint32) *Opcode { + addr := uintptr(unsafe.Pointer(head)) + uintptr(idx)*unsafe.Sizeof(Opcode{}) + return *(**Opcode)(unsafe.Pointer(&addr)) +} + +func copyOpcode(code *Opcode) *Opcode { + codeNum := ToEndCode(code).DisplayIdx + 1 + codeSlice := make([]Opcode, codeNum) + head := (*Opcode)((*runtime.SliceHeader)(unsafe.Pointer(&codeSlice)).Data) + ptr := head + c := code + for { + *ptr = Opcode{ + Op: c.Op, + Key: c.Key, + PtrNum: c.PtrNum, + NumBitSize: c.NumBitSize, + Flags: c.Flags, + Idx: c.Idx, + Offset: c.Offset, + Type: c.Type, + FieldQuery: c.FieldQuery, + DisplayIdx: c.DisplayIdx, + DisplayKey: c.DisplayKey, + ElemIdx: c.ElemIdx, + Length: c.Length, + Size: c.Size, + Indent: c.Indent, + Jmp: c.Jmp, + } + if c.End != nil { + ptr.End = getCodeAddrByIdx(head, c.End.DisplayIdx) + } + if c.NextField != nil { + ptr.NextField = getCodeAddrByIdx(head, c.NextField.DisplayIdx) + } + if c.Next != nil { + ptr.Next = getCodeAddrByIdx(head, c.Next.DisplayIdx) + } + if c.IsEnd() { + break + } + ptr = getCodeAddrByIdx(head, c.DisplayIdx+1) + c = c.IterNext() + } + return head +} + +func setTotalLengthToInterfaceOp(code *Opcode) { + for c := code; !c.IsEnd(); { + if c.Op == OpInterface || c.Op == OpInterfacePtr { + c.Length = uint32(code.TotalLength()) + } + c = c.IterNext() + } +} + +func ToEndCode(code *Opcode) *Opcode { + c := code + for !c.IsEnd() { + c = c.IterNext() + } + return c +} + +func copyToInterfaceOpcode(code *Opcode) *Opcode { + copied := copyOpcode(code) + c := copied + c = ToEndCode(c) + c.Idx += uintptrSize + c.ElemIdx = c.Idx + uintptrSize + c.Length = c.Idx + 2*uintptrSize + c.Op = OpInterfaceEnd + return copied +} + +func newOpCodeWithNext(ctx *compileContext, typ *runtime.Type, op OpType, next *Opcode) *Opcode { + return &Opcode{ + Op: op, + Idx: opcodeOffset(ctx.ptrIndex), + Next: next, + Type: typ, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + } +} + +func newEndOp(ctx *compileContext, typ *runtime.Type) *Opcode { + return newOpCodeWithNext(ctx, typ, OpEnd, nil) +} + +func (c *Opcode) TotalLength() int { + var idx int + code := c + for !code.IsEnd() { + maxIdx := int(code.MaxIdx() / uintptrSize) + if idx < maxIdx { + idx = maxIdx + } + if code.Op == OpRecursiveEnd { + break + } + code = code.IterNext() + } + maxIdx := int(code.MaxIdx() / uintptrSize) + if idx < maxIdx { + idx = maxIdx + } + return idx + 1 +} + +func (c *Opcode) dumpHead(code *Opcode) string { + var length uint32 + if code.Op.CodeType() == CodeArrayHead { + length = code.Length + } else { + length = code.Length / uintptrSize + } + return fmt.Sprintf( + `[%03d]%s%s ([idx:%d][elemIdx:%d][length:%d])`, + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + code.ElemIdx/uintptrSize, + length, + ) +} + +func (c *Opcode) dumpMapHead(code *Opcode) string { + return fmt.Sprintf( + `[%03d]%s%s ([idx:%d])`, + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + ) +} + +func (c *Opcode) dumpMapEnd(code *Opcode) string { + return fmt.Sprintf( + `[%03d]%s%s ([idx:%d])`, + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + ) +} + +func (c *Opcode) dumpElem(code *Opcode) string { + var length uint32 + if code.Op.CodeType() == CodeArrayElem { + length = code.Length + } else { + length = code.Length / uintptrSize + } + return fmt.Sprintf( + `[%03d]%s%s ([idx:%d][elemIdx:%d][length:%d][size:%d])`, + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + code.ElemIdx/uintptrSize, + length, + code.Size, + ) +} + +func (c *Opcode) dumpField(code *Opcode) string { + return fmt.Sprintf( + `[%03d]%s%s ([idx:%d][key:%s][offset:%d])`, + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + code.DisplayKey, + code.Offset, + ) +} + +func (c *Opcode) dumpKey(code *Opcode) string { + return fmt.Sprintf( + `[%03d]%s%s ([idx:%d])`, + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + ) +} + +func (c *Opcode) dumpValue(code *Opcode) string { + return fmt.Sprintf( + `[%03d]%s%s ([idx:%d])`, + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + ) +} + +func (c *Opcode) Dump() string { + codes := []string{} + for code := c; !code.IsEnd(); { + switch code.Op.CodeType() { + case CodeSliceHead: + codes = append(codes, c.dumpHead(code)) + code = code.Next + case CodeMapHead: + codes = append(codes, c.dumpMapHead(code)) + code = code.Next + case CodeArrayElem, CodeSliceElem: + codes = append(codes, c.dumpElem(code)) + code = code.End + case CodeMapKey: + codes = append(codes, c.dumpKey(code)) + code = code.End + case CodeMapValue: + codes = append(codes, c.dumpValue(code)) + code = code.Next + case CodeMapEnd: + codes = append(codes, c.dumpMapEnd(code)) + code = code.Next + case CodeStructField: + codes = append(codes, c.dumpField(code)) + code = code.Next + case CodeStructEnd: + codes = append(codes, c.dumpField(code)) + code = code.Next + default: + codes = append(codes, fmt.Sprintf( + "[%03d]%s%s ([idx:%d])", + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + )) + code = code.Next + } + } + return strings.Join(codes, "\n") +} + +func (c *Opcode) DumpDOT() string { + type edge struct { + from, to *Opcode + label string + weight int + } + var edges []edge + + b := &bytes.Buffer{} + fmt.Fprintf(b, "digraph \"%p\" {\n", c.Type) + fmt.Fprintln(b, "mclimit=1.5;\nrankdir=TD;\nordering=out;\nnode[shape=box];") + for code := c; !code.IsEnd(); { + label := code.Op.String() + fmt.Fprintf(b, "\"%p\" [label=%q];\n", code, label) + if p := code.Next; p != nil { + edges = append(edges, edge{ + from: code, + to: p, + label: "Next", + weight: 10, + }) + } + if p := code.NextField; p != nil { + edges = append(edges, edge{ + from: code, + to: p, + label: "NextField", + weight: 2, + }) + } + if p := code.End; p != nil { + edges = append(edges, edge{ + from: code, + to: p, + label: "End", + weight: 1, + }) + } + if p := code.Jmp; p != nil { + edges = append(edges, edge{ + from: code, + to: p.Code, + label: "Jmp", + weight: 1, + }) + } + + switch code.Op.CodeType() { + case CodeSliceHead: + code = code.Next + case CodeMapHead: + code = code.Next + case CodeArrayElem, CodeSliceElem: + code = code.End + case CodeMapKey: + code = code.End + case CodeMapValue: + code = code.Next + case CodeMapEnd: + code = code.Next + case CodeStructField: + code = code.Next + case CodeStructEnd: + code = code.Next + default: + code = code.Next + } + if code.IsEnd() { + fmt.Fprintf(b, "\"%p\" [label=%q];\n", code, code.Op.String()) + } + } + sort.Slice(edges, func(i, j int) bool { + return edges[i].to.DisplayIdx < edges[j].to.DisplayIdx + }) + for _, e := range edges { + fmt.Fprintf(b, "\"%p\" -> \"%p\" [label=%q][weight=%d];\n", e.from, e.to, e.label, e.weight) + } + fmt.Fprint(b, "}") + return b.String() +} + +func newSliceHeaderCode(ctx *compileContext, typ *runtime.Type) *Opcode { + idx := opcodeOffset(ctx.ptrIndex) + ctx.incPtrIndex() + elemIdx := opcodeOffset(ctx.ptrIndex) + ctx.incPtrIndex() + length := opcodeOffset(ctx.ptrIndex) + return &Opcode{ + Op: OpSlice, + Type: typ, + Idx: idx, + DisplayIdx: ctx.opcodeIndex, + ElemIdx: elemIdx, + Length: length, + Indent: ctx.indent, + } +} + +func newSliceElemCode(ctx *compileContext, typ *runtime.Type, head *Opcode, size uintptr) *Opcode { + return &Opcode{ + Op: OpSliceElem, + Type: typ, + Idx: head.Idx, + DisplayIdx: ctx.opcodeIndex, + ElemIdx: head.ElemIdx, + Length: head.Length, + Indent: ctx.indent, + Size: uint32(size), + } +} + +func newArrayHeaderCode(ctx *compileContext, typ *runtime.Type, alen int) *Opcode { + idx := opcodeOffset(ctx.ptrIndex) + ctx.incPtrIndex() + elemIdx := opcodeOffset(ctx.ptrIndex) + return &Opcode{ + Op: OpArray, + Type: typ, + Idx: idx, + DisplayIdx: ctx.opcodeIndex, + ElemIdx: elemIdx, + Indent: ctx.indent, + Length: uint32(alen), + } +} + +func newArrayElemCode(ctx *compileContext, typ *runtime.Type, head *Opcode, length int, size uintptr) *Opcode { + return &Opcode{ + Op: OpArrayElem, + Type: typ, + Idx: head.Idx, + DisplayIdx: ctx.opcodeIndex, + ElemIdx: head.ElemIdx, + Length: uint32(length), + Indent: ctx.indent, + Size: uint32(size), + } +} + +func newMapHeaderCode(ctx *compileContext, typ *runtime.Type) *Opcode { + idx := opcodeOffset(ctx.ptrIndex) + ctx.incPtrIndex() + return &Opcode{ + Op: OpMap, + Type: typ, + Idx: idx, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + } +} + +func newMapKeyCode(ctx *compileContext, typ *runtime.Type, head *Opcode) *Opcode { + return &Opcode{ + Op: OpMapKey, + Type: typ, + Idx: head.Idx, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + } +} + +func newMapValueCode(ctx *compileContext, typ *runtime.Type, head *Opcode) *Opcode { + return &Opcode{ + Op: OpMapValue, + Type: typ, + Idx: head.Idx, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + } +} + +func newMapEndCode(ctx *compileContext, typ *runtime.Type, head *Opcode) *Opcode { + return &Opcode{ + Op: OpMapEnd, + Type: typ, + Idx: head.Idx, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + Next: newEndOp(ctx, typ), + } +} + +func newRecursiveCode(ctx *compileContext, typ *runtime.Type, jmp *CompiledCode) *Opcode { + return &Opcode{ + Op: OpRecursive, + Type: typ, + Idx: opcodeOffset(ctx.ptrIndex), + Next: newEndOp(ctx, typ), + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + Jmp: jmp, + } +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/option.go b/vendor/github.com/goccy/go-json/internal/encoder/option.go new file mode 100644 index 0000000..12c58e4 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/option.go @@ -0,0 +1,48 @@ +package encoder + +import ( + "context" + "io" +) + +type OptionFlag uint8 + +const ( + HTMLEscapeOption OptionFlag = 1 << iota + IndentOption + UnorderedMapOption + DebugOption + ColorizeOption + ContextOption + NormalizeUTF8Option + FieldQueryOption +) + +type Option struct { + Flag OptionFlag + ColorScheme *ColorScheme + Context context.Context + DebugOut io.Writer + DebugDOTOut io.WriteCloser +} + +type EncodeFormat struct { + Header string + Footer string +} + +type EncodeFormatScheme struct { + Int EncodeFormat + Uint EncodeFormat + Float EncodeFormat + Bool EncodeFormat + String EncodeFormat + Binary EncodeFormat + ObjectKey EncodeFormat + Null EncodeFormat +} + +type ( + ColorScheme = EncodeFormatScheme + ColorFormat = EncodeFormat +) diff --git a/vendor/github.com/goccy/go-json/internal/encoder/optype.go b/vendor/github.com/goccy/go-json/internal/encoder/optype.go new file mode 100644 index 0000000..5c1241b --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/optype.go @@ -0,0 +1,932 @@ +// Code generated by internal/cmd/generator. DO NOT EDIT! +package encoder + +import ( + "strings" +) + +type CodeType int + +const ( + CodeOp CodeType = 0 + CodeArrayHead CodeType = 1 + CodeArrayElem CodeType = 2 + CodeSliceHead CodeType = 3 + CodeSliceElem CodeType = 4 + CodeMapHead CodeType = 5 + CodeMapKey CodeType = 6 + CodeMapValue CodeType = 7 + CodeMapEnd CodeType = 8 + CodeRecursive CodeType = 9 + CodeStructField CodeType = 10 + CodeStructEnd CodeType = 11 +) + +var opTypeStrings = [400]string{ + "End", + "Interface", + "Ptr", + "SliceElem", + "SliceEnd", + "ArrayElem", + "ArrayEnd", + "MapKey", + "MapValue", + "MapEnd", + "Recursive", + "RecursivePtr", + "RecursiveEnd", + "InterfaceEnd", + "Int", + "Uint", + "Float32", + "Float64", + "Bool", + "String", + "Bytes", + "Number", + "Array", + "Map", + "Slice", + "Struct", + "MarshalJSON", + "MarshalText", + "IntString", + "UintString", + "Float32String", + "Float64String", + "BoolString", + "StringString", + "NumberString", + "IntPtr", + "UintPtr", + "Float32Ptr", + "Float64Ptr", + "BoolPtr", + "StringPtr", + "BytesPtr", + "NumberPtr", + "ArrayPtr", + "MapPtr", + "SlicePtr", + "MarshalJSONPtr", + "MarshalTextPtr", + "InterfacePtr", + "IntPtrString", + "UintPtrString", + "Float32PtrString", + "Float64PtrString", + "BoolPtrString", + "StringPtrString", + "NumberPtrString", + "StructHeadInt", + "StructHeadOmitEmptyInt", + "StructPtrHeadInt", + "StructPtrHeadOmitEmptyInt", + "StructHeadUint", + "StructHeadOmitEmptyUint", + "StructPtrHeadUint", + "StructPtrHeadOmitEmptyUint", + "StructHeadFloat32", + "StructHeadOmitEmptyFloat32", + "StructPtrHeadFloat32", + "StructPtrHeadOmitEmptyFloat32", + "StructHeadFloat64", + "StructHeadOmitEmptyFloat64", + "StructPtrHeadFloat64", + "StructPtrHeadOmitEmptyFloat64", + "StructHeadBool", + "StructHeadOmitEmptyBool", + "StructPtrHeadBool", + "StructPtrHeadOmitEmptyBool", + "StructHeadString", + "StructHeadOmitEmptyString", + "StructPtrHeadString", + "StructPtrHeadOmitEmptyString", + "StructHeadBytes", + "StructHeadOmitEmptyBytes", + "StructPtrHeadBytes", + "StructPtrHeadOmitEmptyBytes", + "StructHeadNumber", + "StructHeadOmitEmptyNumber", + "StructPtrHeadNumber", + "StructPtrHeadOmitEmptyNumber", + "StructHeadArray", + "StructHeadOmitEmptyArray", + "StructPtrHeadArray", + "StructPtrHeadOmitEmptyArray", + "StructHeadMap", + "StructHeadOmitEmptyMap", + "StructPtrHeadMap", + "StructPtrHeadOmitEmptyMap", + "StructHeadSlice", + "StructHeadOmitEmptySlice", + "StructPtrHeadSlice", + "StructPtrHeadOmitEmptySlice", + "StructHeadStruct", + "StructHeadOmitEmptyStruct", + "StructPtrHeadStruct", + "StructPtrHeadOmitEmptyStruct", + "StructHeadMarshalJSON", + "StructHeadOmitEmptyMarshalJSON", + "StructPtrHeadMarshalJSON", + "StructPtrHeadOmitEmptyMarshalJSON", + "StructHeadMarshalText", + "StructHeadOmitEmptyMarshalText", + "StructPtrHeadMarshalText", + "StructPtrHeadOmitEmptyMarshalText", + "StructHeadIntString", + "StructHeadOmitEmptyIntString", + "StructPtrHeadIntString", + "StructPtrHeadOmitEmptyIntString", + "StructHeadUintString", + "StructHeadOmitEmptyUintString", + "StructPtrHeadUintString", + "StructPtrHeadOmitEmptyUintString", + "StructHeadFloat32String", + "StructHeadOmitEmptyFloat32String", + "StructPtrHeadFloat32String", + "StructPtrHeadOmitEmptyFloat32String", + "StructHeadFloat64String", + "StructHeadOmitEmptyFloat64String", + "StructPtrHeadFloat64String", + "StructPtrHeadOmitEmptyFloat64String", + "StructHeadBoolString", + "StructHeadOmitEmptyBoolString", + "StructPtrHeadBoolString", + "StructPtrHeadOmitEmptyBoolString", + "StructHeadStringString", + "StructHeadOmitEmptyStringString", + "StructPtrHeadStringString", + "StructPtrHeadOmitEmptyStringString", + "StructHeadNumberString", + "StructHeadOmitEmptyNumberString", + "StructPtrHeadNumberString", + "StructPtrHeadOmitEmptyNumberString", + "StructHeadIntPtr", + "StructHeadOmitEmptyIntPtr", + "StructPtrHeadIntPtr", + "StructPtrHeadOmitEmptyIntPtr", + "StructHeadUintPtr", + "StructHeadOmitEmptyUintPtr", + "StructPtrHeadUintPtr", + "StructPtrHeadOmitEmptyUintPtr", + "StructHeadFloat32Ptr", + "StructHeadOmitEmptyFloat32Ptr", + "StructPtrHeadFloat32Ptr", + "StructPtrHeadOmitEmptyFloat32Ptr", + "StructHeadFloat64Ptr", + "StructHeadOmitEmptyFloat64Ptr", + "StructPtrHeadFloat64Ptr", + "StructPtrHeadOmitEmptyFloat64Ptr", + "StructHeadBoolPtr", + "StructHeadOmitEmptyBoolPtr", + "StructPtrHeadBoolPtr", + "StructPtrHeadOmitEmptyBoolPtr", + "StructHeadStringPtr", + "StructHeadOmitEmptyStringPtr", + "StructPtrHeadStringPtr", + "StructPtrHeadOmitEmptyStringPtr", + "StructHeadBytesPtr", + "StructHeadOmitEmptyBytesPtr", + "StructPtrHeadBytesPtr", + "StructPtrHeadOmitEmptyBytesPtr", + "StructHeadNumberPtr", + "StructHeadOmitEmptyNumberPtr", + "StructPtrHeadNumberPtr", + "StructPtrHeadOmitEmptyNumberPtr", + "StructHeadArrayPtr", + "StructHeadOmitEmptyArrayPtr", + "StructPtrHeadArrayPtr", + "StructPtrHeadOmitEmptyArrayPtr", + "StructHeadMapPtr", + "StructHeadOmitEmptyMapPtr", + "StructPtrHeadMapPtr", + "StructPtrHeadOmitEmptyMapPtr", + "StructHeadSlicePtr", + "StructHeadOmitEmptySlicePtr", + "StructPtrHeadSlicePtr", + "StructPtrHeadOmitEmptySlicePtr", + "StructHeadMarshalJSONPtr", + "StructHeadOmitEmptyMarshalJSONPtr", + "StructPtrHeadMarshalJSONPtr", + "StructPtrHeadOmitEmptyMarshalJSONPtr", + "StructHeadMarshalTextPtr", + "StructHeadOmitEmptyMarshalTextPtr", + "StructPtrHeadMarshalTextPtr", + "StructPtrHeadOmitEmptyMarshalTextPtr", + "StructHeadInterfacePtr", + "StructHeadOmitEmptyInterfacePtr", + "StructPtrHeadInterfacePtr", + "StructPtrHeadOmitEmptyInterfacePtr", + "StructHeadIntPtrString", + "StructHeadOmitEmptyIntPtrString", + "StructPtrHeadIntPtrString", + "StructPtrHeadOmitEmptyIntPtrString", + "StructHeadUintPtrString", + "StructHeadOmitEmptyUintPtrString", + "StructPtrHeadUintPtrString", + "StructPtrHeadOmitEmptyUintPtrString", + "StructHeadFloat32PtrString", + "StructHeadOmitEmptyFloat32PtrString", + "StructPtrHeadFloat32PtrString", + "StructPtrHeadOmitEmptyFloat32PtrString", + "StructHeadFloat64PtrString", + "StructHeadOmitEmptyFloat64PtrString", + "StructPtrHeadFloat64PtrString", + "StructPtrHeadOmitEmptyFloat64PtrString", + "StructHeadBoolPtrString", + "StructHeadOmitEmptyBoolPtrString", + "StructPtrHeadBoolPtrString", + "StructPtrHeadOmitEmptyBoolPtrString", + "StructHeadStringPtrString", + "StructHeadOmitEmptyStringPtrString", + "StructPtrHeadStringPtrString", + "StructPtrHeadOmitEmptyStringPtrString", + "StructHeadNumberPtrString", + "StructHeadOmitEmptyNumberPtrString", + "StructPtrHeadNumberPtrString", + "StructPtrHeadOmitEmptyNumberPtrString", + "StructHead", + "StructHeadOmitEmpty", + "StructPtrHead", + "StructPtrHeadOmitEmpty", + "StructFieldInt", + "StructFieldOmitEmptyInt", + "StructEndInt", + "StructEndOmitEmptyInt", + "StructFieldUint", + "StructFieldOmitEmptyUint", + "StructEndUint", + "StructEndOmitEmptyUint", + "StructFieldFloat32", + "StructFieldOmitEmptyFloat32", + "StructEndFloat32", + "StructEndOmitEmptyFloat32", + "StructFieldFloat64", + "StructFieldOmitEmptyFloat64", + "StructEndFloat64", + "StructEndOmitEmptyFloat64", + "StructFieldBool", + "StructFieldOmitEmptyBool", + "StructEndBool", + "StructEndOmitEmptyBool", + "StructFieldString", + "StructFieldOmitEmptyString", + "StructEndString", + "StructEndOmitEmptyString", + "StructFieldBytes", + "StructFieldOmitEmptyBytes", + "StructEndBytes", + "StructEndOmitEmptyBytes", + "StructFieldNumber", + "StructFieldOmitEmptyNumber", + "StructEndNumber", + "StructEndOmitEmptyNumber", + "StructFieldArray", + "StructFieldOmitEmptyArray", + "StructEndArray", + "StructEndOmitEmptyArray", + "StructFieldMap", + "StructFieldOmitEmptyMap", + "StructEndMap", + "StructEndOmitEmptyMap", + "StructFieldSlice", + "StructFieldOmitEmptySlice", + "StructEndSlice", + "StructEndOmitEmptySlice", + "StructFieldStruct", + "StructFieldOmitEmptyStruct", + "StructEndStruct", + "StructEndOmitEmptyStruct", + "StructFieldMarshalJSON", + "StructFieldOmitEmptyMarshalJSON", + "StructEndMarshalJSON", + "StructEndOmitEmptyMarshalJSON", + "StructFieldMarshalText", + "StructFieldOmitEmptyMarshalText", + "StructEndMarshalText", + "StructEndOmitEmptyMarshalText", + "StructFieldIntString", + "StructFieldOmitEmptyIntString", + "StructEndIntString", + "StructEndOmitEmptyIntString", + "StructFieldUintString", + "StructFieldOmitEmptyUintString", + "StructEndUintString", + "StructEndOmitEmptyUintString", + "StructFieldFloat32String", + "StructFieldOmitEmptyFloat32String", + "StructEndFloat32String", + "StructEndOmitEmptyFloat32String", + "StructFieldFloat64String", + "StructFieldOmitEmptyFloat64String", + "StructEndFloat64String", + "StructEndOmitEmptyFloat64String", + "StructFieldBoolString", + "StructFieldOmitEmptyBoolString", + "StructEndBoolString", + "StructEndOmitEmptyBoolString", + "StructFieldStringString", + "StructFieldOmitEmptyStringString", + "StructEndStringString", + "StructEndOmitEmptyStringString", + "StructFieldNumberString", + "StructFieldOmitEmptyNumberString", + "StructEndNumberString", + "StructEndOmitEmptyNumberString", + "StructFieldIntPtr", + "StructFieldOmitEmptyIntPtr", + "StructEndIntPtr", + "StructEndOmitEmptyIntPtr", + "StructFieldUintPtr", + "StructFieldOmitEmptyUintPtr", + "StructEndUintPtr", + "StructEndOmitEmptyUintPtr", + "StructFieldFloat32Ptr", + "StructFieldOmitEmptyFloat32Ptr", + "StructEndFloat32Ptr", + "StructEndOmitEmptyFloat32Ptr", + "StructFieldFloat64Ptr", + "StructFieldOmitEmptyFloat64Ptr", + "StructEndFloat64Ptr", + "StructEndOmitEmptyFloat64Ptr", + "StructFieldBoolPtr", + "StructFieldOmitEmptyBoolPtr", + "StructEndBoolPtr", + "StructEndOmitEmptyBoolPtr", + "StructFieldStringPtr", + "StructFieldOmitEmptyStringPtr", + "StructEndStringPtr", + "StructEndOmitEmptyStringPtr", + "StructFieldBytesPtr", + "StructFieldOmitEmptyBytesPtr", + "StructEndBytesPtr", + "StructEndOmitEmptyBytesPtr", + "StructFieldNumberPtr", + "StructFieldOmitEmptyNumberPtr", + "StructEndNumberPtr", + "StructEndOmitEmptyNumberPtr", + "StructFieldArrayPtr", + "StructFieldOmitEmptyArrayPtr", + "StructEndArrayPtr", + "StructEndOmitEmptyArrayPtr", + "StructFieldMapPtr", + "StructFieldOmitEmptyMapPtr", + "StructEndMapPtr", + "StructEndOmitEmptyMapPtr", + "StructFieldSlicePtr", + "StructFieldOmitEmptySlicePtr", + "StructEndSlicePtr", + "StructEndOmitEmptySlicePtr", + "StructFieldMarshalJSONPtr", + "StructFieldOmitEmptyMarshalJSONPtr", + "StructEndMarshalJSONPtr", + "StructEndOmitEmptyMarshalJSONPtr", + "StructFieldMarshalTextPtr", + "StructFieldOmitEmptyMarshalTextPtr", + "StructEndMarshalTextPtr", + "StructEndOmitEmptyMarshalTextPtr", + "StructFieldInterfacePtr", + "StructFieldOmitEmptyInterfacePtr", + "StructEndInterfacePtr", + "StructEndOmitEmptyInterfacePtr", + "StructFieldIntPtrString", + "StructFieldOmitEmptyIntPtrString", + "StructEndIntPtrString", + "StructEndOmitEmptyIntPtrString", + "StructFieldUintPtrString", + "StructFieldOmitEmptyUintPtrString", + "StructEndUintPtrString", + "StructEndOmitEmptyUintPtrString", + "StructFieldFloat32PtrString", + "StructFieldOmitEmptyFloat32PtrString", + "StructEndFloat32PtrString", + "StructEndOmitEmptyFloat32PtrString", + "StructFieldFloat64PtrString", + "StructFieldOmitEmptyFloat64PtrString", + "StructEndFloat64PtrString", + "StructEndOmitEmptyFloat64PtrString", + "StructFieldBoolPtrString", + "StructFieldOmitEmptyBoolPtrString", + "StructEndBoolPtrString", + "StructEndOmitEmptyBoolPtrString", + "StructFieldStringPtrString", + "StructFieldOmitEmptyStringPtrString", + "StructEndStringPtrString", + "StructEndOmitEmptyStringPtrString", + "StructFieldNumberPtrString", + "StructFieldOmitEmptyNumberPtrString", + "StructEndNumberPtrString", + "StructEndOmitEmptyNumberPtrString", + "StructField", + "StructFieldOmitEmpty", + "StructEnd", + "StructEndOmitEmpty", +} + +type OpType uint16 + +const ( + OpEnd OpType = 0 + OpInterface OpType = 1 + OpPtr OpType = 2 + OpSliceElem OpType = 3 + OpSliceEnd OpType = 4 + OpArrayElem OpType = 5 + OpArrayEnd OpType = 6 + OpMapKey OpType = 7 + OpMapValue OpType = 8 + OpMapEnd OpType = 9 + OpRecursive OpType = 10 + OpRecursivePtr OpType = 11 + OpRecursiveEnd OpType = 12 + OpInterfaceEnd OpType = 13 + OpInt OpType = 14 + OpUint OpType = 15 + OpFloat32 OpType = 16 + OpFloat64 OpType = 17 + OpBool OpType = 18 + OpString OpType = 19 + OpBytes OpType = 20 + OpNumber OpType = 21 + OpArray OpType = 22 + OpMap OpType = 23 + OpSlice OpType = 24 + OpStruct OpType = 25 + OpMarshalJSON OpType = 26 + OpMarshalText OpType = 27 + OpIntString OpType = 28 + OpUintString OpType = 29 + OpFloat32String OpType = 30 + OpFloat64String OpType = 31 + OpBoolString OpType = 32 + OpStringString OpType = 33 + OpNumberString OpType = 34 + OpIntPtr OpType = 35 + OpUintPtr OpType = 36 + OpFloat32Ptr OpType = 37 + OpFloat64Ptr OpType = 38 + OpBoolPtr OpType = 39 + OpStringPtr OpType = 40 + OpBytesPtr OpType = 41 + OpNumberPtr OpType = 42 + OpArrayPtr OpType = 43 + OpMapPtr OpType = 44 + OpSlicePtr OpType = 45 + OpMarshalJSONPtr OpType = 46 + OpMarshalTextPtr OpType = 47 + OpInterfacePtr OpType = 48 + OpIntPtrString OpType = 49 + OpUintPtrString OpType = 50 + OpFloat32PtrString OpType = 51 + OpFloat64PtrString OpType = 52 + OpBoolPtrString OpType = 53 + OpStringPtrString OpType = 54 + OpNumberPtrString OpType = 55 + OpStructHeadInt OpType = 56 + OpStructHeadOmitEmptyInt OpType = 57 + OpStructPtrHeadInt OpType = 58 + OpStructPtrHeadOmitEmptyInt OpType = 59 + OpStructHeadUint OpType = 60 + OpStructHeadOmitEmptyUint OpType = 61 + OpStructPtrHeadUint OpType = 62 + OpStructPtrHeadOmitEmptyUint OpType = 63 + OpStructHeadFloat32 OpType = 64 + OpStructHeadOmitEmptyFloat32 OpType = 65 + OpStructPtrHeadFloat32 OpType = 66 + OpStructPtrHeadOmitEmptyFloat32 OpType = 67 + OpStructHeadFloat64 OpType = 68 + OpStructHeadOmitEmptyFloat64 OpType = 69 + OpStructPtrHeadFloat64 OpType = 70 + OpStructPtrHeadOmitEmptyFloat64 OpType = 71 + OpStructHeadBool OpType = 72 + OpStructHeadOmitEmptyBool OpType = 73 + OpStructPtrHeadBool OpType = 74 + OpStructPtrHeadOmitEmptyBool OpType = 75 + OpStructHeadString OpType = 76 + OpStructHeadOmitEmptyString OpType = 77 + OpStructPtrHeadString OpType = 78 + OpStructPtrHeadOmitEmptyString OpType = 79 + OpStructHeadBytes OpType = 80 + OpStructHeadOmitEmptyBytes OpType = 81 + OpStructPtrHeadBytes OpType = 82 + OpStructPtrHeadOmitEmptyBytes OpType = 83 + OpStructHeadNumber OpType = 84 + OpStructHeadOmitEmptyNumber OpType = 85 + OpStructPtrHeadNumber OpType = 86 + OpStructPtrHeadOmitEmptyNumber OpType = 87 + OpStructHeadArray OpType = 88 + OpStructHeadOmitEmptyArray OpType = 89 + OpStructPtrHeadArray OpType = 90 + OpStructPtrHeadOmitEmptyArray OpType = 91 + OpStructHeadMap OpType = 92 + OpStructHeadOmitEmptyMap OpType = 93 + OpStructPtrHeadMap OpType = 94 + OpStructPtrHeadOmitEmptyMap OpType = 95 + OpStructHeadSlice OpType = 96 + OpStructHeadOmitEmptySlice OpType = 97 + OpStructPtrHeadSlice OpType = 98 + OpStructPtrHeadOmitEmptySlice OpType = 99 + OpStructHeadStruct OpType = 100 + OpStructHeadOmitEmptyStruct OpType = 101 + OpStructPtrHeadStruct OpType = 102 + OpStructPtrHeadOmitEmptyStruct OpType = 103 + OpStructHeadMarshalJSON OpType = 104 + OpStructHeadOmitEmptyMarshalJSON OpType = 105 + OpStructPtrHeadMarshalJSON OpType = 106 + OpStructPtrHeadOmitEmptyMarshalJSON OpType = 107 + OpStructHeadMarshalText OpType = 108 + OpStructHeadOmitEmptyMarshalText OpType = 109 + OpStructPtrHeadMarshalText OpType = 110 + OpStructPtrHeadOmitEmptyMarshalText OpType = 111 + OpStructHeadIntString OpType = 112 + OpStructHeadOmitEmptyIntString OpType = 113 + OpStructPtrHeadIntString OpType = 114 + OpStructPtrHeadOmitEmptyIntString OpType = 115 + OpStructHeadUintString OpType = 116 + OpStructHeadOmitEmptyUintString OpType = 117 + OpStructPtrHeadUintString OpType = 118 + OpStructPtrHeadOmitEmptyUintString OpType = 119 + OpStructHeadFloat32String OpType = 120 + OpStructHeadOmitEmptyFloat32String OpType = 121 + OpStructPtrHeadFloat32String OpType = 122 + OpStructPtrHeadOmitEmptyFloat32String OpType = 123 + OpStructHeadFloat64String OpType = 124 + OpStructHeadOmitEmptyFloat64String OpType = 125 + OpStructPtrHeadFloat64String OpType = 126 + OpStructPtrHeadOmitEmptyFloat64String OpType = 127 + OpStructHeadBoolString OpType = 128 + OpStructHeadOmitEmptyBoolString OpType = 129 + OpStructPtrHeadBoolString OpType = 130 + OpStructPtrHeadOmitEmptyBoolString OpType = 131 + OpStructHeadStringString OpType = 132 + OpStructHeadOmitEmptyStringString OpType = 133 + OpStructPtrHeadStringString OpType = 134 + OpStructPtrHeadOmitEmptyStringString OpType = 135 + OpStructHeadNumberString OpType = 136 + OpStructHeadOmitEmptyNumberString OpType = 137 + OpStructPtrHeadNumberString OpType = 138 + OpStructPtrHeadOmitEmptyNumberString OpType = 139 + OpStructHeadIntPtr OpType = 140 + OpStructHeadOmitEmptyIntPtr OpType = 141 + OpStructPtrHeadIntPtr OpType = 142 + OpStructPtrHeadOmitEmptyIntPtr OpType = 143 + OpStructHeadUintPtr OpType = 144 + OpStructHeadOmitEmptyUintPtr OpType = 145 + OpStructPtrHeadUintPtr OpType = 146 + OpStructPtrHeadOmitEmptyUintPtr OpType = 147 + OpStructHeadFloat32Ptr OpType = 148 + OpStructHeadOmitEmptyFloat32Ptr OpType = 149 + OpStructPtrHeadFloat32Ptr OpType = 150 + OpStructPtrHeadOmitEmptyFloat32Ptr OpType = 151 + OpStructHeadFloat64Ptr OpType = 152 + OpStructHeadOmitEmptyFloat64Ptr OpType = 153 + OpStructPtrHeadFloat64Ptr OpType = 154 + OpStructPtrHeadOmitEmptyFloat64Ptr OpType = 155 + OpStructHeadBoolPtr OpType = 156 + OpStructHeadOmitEmptyBoolPtr OpType = 157 + OpStructPtrHeadBoolPtr OpType = 158 + OpStructPtrHeadOmitEmptyBoolPtr OpType = 159 + OpStructHeadStringPtr OpType = 160 + OpStructHeadOmitEmptyStringPtr OpType = 161 + OpStructPtrHeadStringPtr OpType = 162 + OpStructPtrHeadOmitEmptyStringPtr OpType = 163 + OpStructHeadBytesPtr OpType = 164 + OpStructHeadOmitEmptyBytesPtr OpType = 165 + OpStructPtrHeadBytesPtr OpType = 166 + OpStructPtrHeadOmitEmptyBytesPtr OpType = 167 + OpStructHeadNumberPtr OpType = 168 + OpStructHeadOmitEmptyNumberPtr OpType = 169 + OpStructPtrHeadNumberPtr OpType = 170 + OpStructPtrHeadOmitEmptyNumberPtr OpType = 171 + OpStructHeadArrayPtr OpType = 172 + OpStructHeadOmitEmptyArrayPtr OpType = 173 + OpStructPtrHeadArrayPtr OpType = 174 + OpStructPtrHeadOmitEmptyArrayPtr OpType = 175 + OpStructHeadMapPtr OpType = 176 + OpStructHeadOmitEmptyMapPtr OpType = 177 + OpStructPtrHeadMapPtr OpType = 178 + OpStructPtrHeadOmitEmptyMapPtr OpType = 179 + OpStructHeadSlicePtr OpType = 180 + OpStructHeadOmitEmptySlicePtr OpType = 181 + OpStructPtrHeadSlicePtr OpType = 182 + OpStructPtrHeadOmitEmptySlicePtr OpType = 183 + OpStructHeadMarshalJSONPtr OpType = 184 + OpStructHeadOmitEmptyMarshalJSONPtr OpType = 185 + OpStructPtrHeadMarshalJSONPtr OpType = 186 + OpStructPtrHeadOmitEmptyMarshalJSONPtr OpType = 187 + OpStructHeadMarshalTextPtr OpType = 188 + OpStructHeadOmitEmptyMarshalTextPtr OpType = 189 + OpStructPtrHeadMarshalTextPtr OpType = 190 + OpStructPtrHeadOmitEmptyMarshalTextPtr OpType = 191 + OpStructHeadInterfacePtr OpType = 192 + OpStructHeadOmitEmptyInterfacePtr OpType = 193 + OpStructPtrHeadInterfacePtr OpType = 194 + OpStructPtrHeadOmitEmptyInterfacePtr OpType = 195 + OpStructHeadIntPtrString OpType = 196 + OpStructHeadOmitEmptyIntPtrString OpType = 197 + OpStructPtrHeadIntPtrString OpType = 198 + OpStructPtrHeadOmitEmptyIntPtrString OpType = 199 + OpStructHeadUintPtrString OpType = 200 + OpStructHeadOmitEmptyUintPtrString OpType = 201 + OpStructPtrHeadUintPtrString OpType = 202 + OpStructPtrHeadOmitEmptyUintPtrString OpType = 203 + OpStructHeadFloat32PtrString OpType = 204 + OpStructHeadOmitEmptyFloat32PtrString OpType = 205 + OpStructPtrHeadFloat32PtrString OpType = 206 + OpStructPtrHeadOmitEmptyFloat32PtrString OpType = 207 + OpStructHeadFloat64PtrString OpType = 208 + OpStructHeadOmitEmptyFloat64PtrString OpType = 209 + OpStructPtrHeadFloat64PtrString OpType = 210 + OpStructPtrHeadOmitEmptyFloat64PtrString OpType = 211 + OpStructHeadBoolPtrString OpType = 212 + OpStructHeadOmitEmptyBoolPtrString OpType = 213 + OpStructPtrHeadBoolPtrString OpType = 214 + OpStructPtrHeadOmitEmptyBoolPtrString OpType = 215 + OpStructHeadStringPtrString OpType = 216 + OpStructHeadOmitEmptyStringPtrString OpType = 217 + OpStructPtrHeadStringPtrString OpType = 218 + OpStructPtrHeadOmitEmptyStringPtrString OpType = 219 + OpStructHeadNumberPtrString OpType = 220 + OpStructHeadOmitEmptyNumberPtrString OpType = 221 + OpStructPtrHeadNumberPtrString OpType = 222 + OpStructPtrHeadOmitEmptyNumberPtrString OpType = 223 + OpStructHead OpType = 224 + OpStructHeadOmitEmpty OpType = 225 + OpStructPtrHead OpType = 226 + OpStructPtrHeadOmitEmpty OpType = 227 + OpStructFieldInt OpType = 228 + OpStructFieldOmitEmptyInt OpType = 229 + OpStructEndInt OpType = 230 + OpStructEndOmitEmptyInt OpType = 231 + OpStructFieldUint OpType = 232 + OpStructFieldOmitEmptyUint OpType = 233 + OpStructEndUint OpType = 234 + OpStructEndOmitEmptyUint OpType = 235 + OpStructFieldFloat32 OpType = 236 + OpStructFieldOmitEmptyFloat32 OpType = 237 + OpStructEndFloat32 OpType = 238 + OpStructEndOmitEmptyFloat32 OpType = 239 + OpStructFieldFloat64 OpType = 240 + OpStructFieldOmitEmptyFloat64 OpType = 241 + OpStructEndFloat64 OpType = 242 + OpStructEndOmitEmptyFloat64 OpType = 243 + OpStructFieldBool OpType = 244 + OpStructFieldOmitEmptyBool OpType = 245 + OpStructEndBool OpType = 246 + OpStructEndOmitEmptyBool OpType = 247 + OpStructFieldString OpType = 248 + OpStructFieldOmitEmptyString OpType = 249 + OpStructEndString OpType = 250 + OpStructEndOmitEmptyString OpType = 251 + OpStructFieldBytes OpType = 252 + OpStructFieldOmitEmptyBytes OpType = 253 + OpStructEndBytes OpType = 254 + OpStructEndOmitEmptyBytes OpType = 255 + OpStructFieldNumber OpType = 256 + OpStructFieldOmitEmptyNumber OpType = 257 + OpStructEndNumber OpType = 258 + OpStructEndOmitEmptyNumber OpType = 259 + OpStructFieldArray OpType = 260 + OpStructFieldOmitEmptyArray OpType = 261 + OpStructEndArray OpType = 262 + OpStructEndOmitEmptyArray OpType = 263 + OpStructFieldMap OpType = 264 + OpStructFieldOmitEmptyMap OpType = 265 + OpStructEndMap OpType = 266 + OpStructEndOmitEmptyMap OpType = 267 + OpStructFieldSlice OpType = 268 + OpStructFieldOmitEmptySlice OpType = 269 + OpStructEndSlice OpType = 270 + OpStructEndOmitEmptySlice OpType = 271 + OpStructFieldStruct OpType = 272 + OpStructFieldOmitEmptyStruct OpType = 273 + OpStructEndStruct OpType = 274 + OpStructEndOmitEmptyStruct OpType = 275 + OpStructFieldMarshalJSON OpType = 276 + OpStructFieldOmitEmptyMarshalJSON OpType = 277 + OpStructEndMarshalJSON OpType = 278 + OpStructEndOmitEmptyMarshalJSON OpType = 279 + OpStructFieldMarshalText OpType = 280 + OpStructFieldOmitEmptyMarshalText OpType = 281 + OpStructEndMarshalText OpType = 282 + OpStructEndOmitEmptyMarshalText OpType = 283 + OpStructFieldIntString OpType = 284 + OpStructFieldOmitEmptyIntString OpType = 285 + OpStructEndIntString OpType = 286 + OpStructEndOmitEmptyIntString OpType = 287 + OpStructFieldUintString OpType = 288 + OpStructFieldOmitEmptyUintString OpType = 289 + OpStructEndUintString OpType = 290 + OpStructEndOmitEmptyUintString OpType = 291 + OpStructFieldFloat32String OpType = 292 + OpStructFieldOmitEmptyFloat32String OpType = 293 + OpStructEndFloat32String OpType = 294 + OpStructEndOmitEmptyFloat32String OpType = 295 + OpStructFieldFloat64String OpType = 296 + OpStructFieldOmitEmptyFloat64String OpType = 297 + OpStructEndFloat64String OpType = 298 + OpStructEndOmitEmptyFloat64String OpType = 299 + OpStructFieldBoolString OpType = 300 + OpStructFieldOmitEmptyBoolString OpType = 301 + OpStructEndBoolString OpType = 302 + OpStructEndOmitEmptyBoolString OpType = 303 + OpStructFieldStringString OpType = 304 + OpStructFieldOmitEmptyStringString OpType = 305 + OpStructEndStringString OpType = 306 + OpStructEndOmitEmptyStringString OpType = 307 + OpStructFieldNumberString OpType = 308 + OpStructFieldOmitEmptyNumberString OpType = 309 + OpStructEndNumberString OpType = 310 + OpStructEndOmitEmptyNumberString OpType = 311 + OpStructFieldIntPtr OpType = 312 + OpStructFieldOmitEmptyIntPtr OpType = 313 + OpStructEndIntPtr OpType = 314 + OpStructEndOmitEmptyIntPtr OpType = 315 + OpStructFieldUintPtr OpType = 316 + OpStructFieldOmitEmptyUintPtr OpType = 317 + OpStructEndUintPtr OpType = 318 + OpStructEndOmitEmptyUintPtr OpType = 319 + OpStructFieldFloat32Ptr OpType = 320 + OpStructFieldOmitEmptyFloat32Ptr OpType = 321 + OpStructEndFloat32Ptr OpType = 322 + OpStructEndOmitEmptyFloat32Ptr OpType = 323 + OpStructFieldFloat64Ptr OpType = 324 + OpStructFieldOmitEmptyFloat64Ptr OpType = 325 + OpStructEndFloat64Ptr OpType = 326 + OpStructEndOmitEmptyFloat64Ptr OpType = 327 + OpStructFieldBoolPtr OpType = 328 + OpStructFieldOmitEmptyBoolPtr OpType = 329 + OpStructEndBoolPtr OpType = 330 + OpStructEndOmitEmptyBoolPtr OpType = 331 + OpStructFieldStringPtr OpType = 332 + OpStructFieldOmitEmptyStringPtr OpType = 333 + OpStructEndStringPtr OpType = 334 + OpStructEndOmitEmptyStringPtr OpType = 335 + OpStructFieldBytesPtr OpType = 336 + OpStructFieldOmitEmptyBytesPtr OpType = 337 + OpStructEndBytesPtr OpType = 338 + OpStructEndOmitEmptyBytesPtr OpType = 339 + OpStructFieldNumberPtr OpType = 340 + OpStructFieldOmitEmptyNumberPtr OpType = 341 + OpStructEndNumberPtr OpType = 342 + OpStructEndOmitEmptyNumberPtr OpType = 343 + OpStructFieldArrayPtr OpType = 344 + OpStructFieldOmitEmptyArrayPtr OpType = 345 + OpStructEndArrayPtr OpType = 346 + OpStructEndOmitEmptyArrayPtr OpType = 347 + OpStructFieldMapPtr OpType = 348 + OpStructFieldOmitEmptyMapPtr OpType = 349 + OpStructEndMapPtr OpType = 350 + OpStructEndOmitEmptyMapPtr OpType = 351 + OpStructFieldSlicePtr OpType = 352 + OpStructFieldOmitEmptySlicePtr OpType = 353 + OpStructEndSlicePtr OpType = 354 + OpStructEndOmitEmptySlicePtr OpType = 355 + OpStructFieldMarshalJSONPtr OpType = 356 + OpStructFieldOmitEmptyMarshalJSONPtr OpType = 357 + OpStructEndMarshalJSONPtr OpType = 358 + OpStructEndOmitEmptyMarshalJSONPtr OpType = 359 + OpStructFieldMarshalTextPtr OpType = 360 + OpStructFieldOmitEmptyMarshalTextPtr OpType = 361 + OpStructEndMarshalTextPtr OpType = 362 + OpStructEndOmitEmptyMarshalTextPtr OpType = 363 + OpStructFieldInterfacePtr OpType = 364 + OpStructFieldOmitEmptyInterfacePtr OpType = 365 + OpStructEndInterfacePtr OpType = 366 + OpStructEndOmitEmptyInterfacePtr OpType = 367 + OpStructFieldIntPtrString OpType = 368 + OpStructFieldOmitEmptyIntPtrString OpType = 369 + OpStructEndIntPtrString OpType = 370 + OpStructEndOmitEmptyIntPtrString OpType = 371 + OpStructFieldUintPtrString OpType = 372 + OpStructFieldOmitEmptyUintPtrString OpType = 373 + OpStructEndUintPtrString OpType = 374 + OpStructEndOmitEmptyUintPtrString OpType = 375 + OpStructFieldFloat32PtrString OpType = 376 + OpStructFieldOmitEmptyFloat32PtrString OpType = 377 + OpStructEndFloat32PtrString OpType = 378 + OpStructEndOmitEmptyFloat32PtrString OpType = 379 + OpStructFieldFloat64PtrString OpType = 380 + OpStructFieldOmitEmptyFloat64PtrString OpType = 381 + OpStructEndFloat64PtrString OpType = 382 + OpStructEndOmitEmptyFloat64PtrString OpType = 383 + OpStructFieldBoolPtrString OpType = 384 + OpStructFieldOmitEmptyBoolPtrString OpType = 385 + OpStructEndBoolPtrString OpType = 386 + OpStructEndOmitEmptyBoolPtrString OpType = 387 + OpStructFieldStringPtrString OpType = 388 + OpStructFieldOmitEmptyStringPtrString OpType = 389 + OpStructEndStringPtrString OpType = 390 + OpStructEndOmitEmptyStringPtrString OpType = 391 + OpStructFieldNumberPtrString OpType = 392 + OpStructFieldOmitEmptyNumberPtrString OpType = 393 + OpStructEndNumberPtrString OpType = 394 + OpStructEndOmitEmptyNumberPtrString OpType = 395 + OpStructField OpType = 396 + OpStructFieldOmitEmpty OpType = 397 + OpStructEnd OpType = 398 + OpStructEndOmitEmpty OpType = 399 +) + +func (t OpType) String() string { + if int(t) >= 400 { + return "" + } + return opTypeStrings[int(t)] +} + +func (t OpType) CodeType() CodeType { + if strings.Contains(t.String(), "Struct") { + if strings.Contains(t.String(), "End") { + return CodeStructEnd + } + return CodeStructField + } + switch t { + case OpArray, OpArrayPtr: + return CodeArrayHead + case OpArrayElem: + return CodeArrayElem + case OpSlice, OpSlicePtr: + return CodeSliceHead + case OpSliceElem: + return CodeSliceElem + case OpMap, OpMapPtr: + return CodeMapHead + case OpMapKey: + return CodeMapKey + case OpMapValue: + return CodeMapValue + case OpMapEnd: + return CodeMapEnd + } + + return CodeOp +} + +func (t OpType) HeadToPtrHead() OpType { + if strings.Index(t.String(), "PtrHead") > 0 { + return t + } + + idx := strings.Index(t.String(), "Head") + if idx == -1 { + return t + } + suffix := "PtrHead" + t.String()[idx+len("Head"):] + + const toPtrOffset = 2 + if strings.Contains(OpType(int(t)+toPtrOffset).String(), suffix) { + return OpType(int(t) + toPtrOffset) + } + return t +} + +func (t OpType) HeadToOmitEmptyHead() OpType { + const toOmitEmptyOffset = 1 + if strings.Contains(OpType(int(t)+toOmitEmptyOffset).String(), "OmitEmpty") { + return OpType(int(t) + toOmitEmptyOffset) + } + + return t +} + +func (t OpType) PtrHeadToHead() OpType { + idx := strings.Index(t.String(), "PtrHead") + if idx == -1 { + return t + } + suffix := t.String()[idx+len("Ptr"):] + + const toPtrOffset = 2 + if strings.Contains(OpType(int(t)-toPtrOffset).String(), suffix) { + return OpType(int(t) - toPtrOffset) + } + return t +} + +func (t OpType) FieldToEnd() OpType { + idx := strings.Index(t.String(), "Field") + if idx == -1 { + return t + } + suffix := t.String()[idx+len("Field"):] + if suffix == "" || suffix == "OmitEmpty" { + return t + } + const toEndOffset = 2 + if strings.Contains(OpType(int(t)+toEndOffset).String(), "End"+suffix) { + return OpType(int(t) + toEndOffset) + } + return t +} + +func (t OpType) FieldToOmitEmptyField() OpType { + const toOmitEmptyOffset = 1 + if strings.Contains(OpType(int(t)+toOmitEmptyOffset).String(), "OmitEmpty") { + return OpType(int(t) + toOmitEmptyOffset) + } + return t +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/query.go b/vendor/github.com/goccy/go-json/internal/encoder/query.go new file mode 100644 index 0000000..1e1850c --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/query.go @@ -0,0 +1,135 @@ +package encoder + +import ( + "context" + "fmt" + "reflect" +) + +var ( + Marshal func(interface{}) ([]byte, error) + Unmarshal func([]byte, interface{}) error +) + +type FieldQuery struct { + Name string + Fields []*FieldQuery + hash string +} + +func (q *FieldQuery) Hash() string { + if q.hash != "" { + return q.hash + } + b, _ := Marshal(q) + q.hash = string(b) + return q.hash +} + +func (q *FieldQuery) MarshalJSON() ([]byte, error) { + if q.Name != "" { + if len(q.Fields) > 0 { + return Marshal(map[string][]*FieldQuery{q.Name: q.Fields}) + } + return Marshal(q.Name) + } + return Marshal(q.Fields) +} + +func (q *FieldQuery) QueryString() (FieldQueryString, error) { + b, err := Marshal(q) + if err != nil { + return "", err + } + return FieldQueryString(b), nil +} + +type FieldQueryString string + +func (s FieldQueryString) Build() (*FieldQuery, error) { + var query interface{} + if err := Unmarshal([]byte(s), &query); err != nil { + return nil, err + } + return s.build(reflect.ValueOf(query)) +} + +func (s FieldQueryString) build(v reflect.Value) (*FieldQuery, error) { + switch v.Type().Kind() { + case reflect.String: + return s.buildString(v) + case reflect.Map: + return s.buildMap(v) + case reflect.Slice: + return s.buildSlice(v) + case reflect.Interface: + return s.build(reflect.ValueOf(v.Interface())) + } + return nil, fmt.Errorf("failed to build field query") +} + +func (s FieldQueryString) buildString(v reflect.Value) (*FieldQuery, error) { + b := []byte(v.String()) + switch b[0] { + case '[', '{': + var query interface{} + if err := Unmarshal(b, &query); err != nil { + return nil, err + } + if str, ok := query.(string); ok { + return &FieldQuery{Name: str}, nil + } + return s.build(reflect.ValueOf(query)) + } + return &FieldQuery{Name: string(b)}, nil +} + +func (s FieldQueryString) buildSlice(v reflect.Value) (*FieldQuery, error) { + fields := make([]*FieldQuery, 0, v.Len()) + for i := 0; i < v.Len(); i++ { + def, err := s.build(v.Index(i)) + if err != nil { + return nil, err + } + fields = append(fields, def) + } + return &FieldQuery{Fields: fields}, nil +} + +func (s FieldQueryString) buildMap(v reflect.Value) (*FieldQuery, error) { + keys := v.MapKeys() + if len(keys) != 1 { + return nil, fmt.Errorf("failed to build field query object") + } + key := keys[0] + if key.Type().Kind() != reflect.String { + return nil, fmt.Errorf("failed to build field query. invalid object key type") + } + name := key.String() + def, err := s.build(v.MapIndex(key)) + if err != nil { + return nil, err + } + return &FieldQuery{ + Name: name, + Fields: def.Fields, + }, nil +} + +type queryKey struct{} + +func FieldQueryFromContext(ctx context.Context) *FieldQuery { + query := ctx.Value(queryKey{}) + if query == nil { + return nil + } + q, ok := query.(*FieldQuery) + if !ok { + return nil + } + return q +} + +func SetFieldQueryToContext(ctx context.Context, query *FieldQuery) context.Context { + return context.WithValue(ctx, queryKey{}, query) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/string.go b/vendor/github.com/goccy/go-json/internal/encoder/string.go new file mode 100644 index 0000000..4abb841 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/string.go @@ -0,0 +1,483 @@ +// This files's string processing codes are inspired by https://github.com/segmentio/encoding. +// The license notation is as follows. +// +// # MIT License +// +// Copyright (c) 2019 Segment.io, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +package encoder + +import ( + "math/bits" + "reflect" + "unsafe" +) + +const ( + lsb = 0x0101010101010101 + msb = 0x8080808080808080 +) + +var hex = "0123456789abcdef" + +//nolint:govet +func stringToUint64Slice(s string) []uint64 { + return *(*[]uint64)(unsafe.Pointer(&reflect.SliceHeader{ + Data: ((*reflect.StringHeader)(unsafe.Pointer(&s))).Data, + Len: len(s) / 8, + Cap: len(s) / 8, + })) +} + +func AppendString(ctx *RuntimeContext, buf []byte, s string) []byte { + if ctx.Option.Flag&HTMLEscapeOption != 0 { + if ctx.Option.Flag&NormalizeUTF8Option != 0 { + return appendNormalizedHTMLString(buf, s) + } + return appendHTMLString(buf, s) + } + if ctx.Option.Flag&NormalizeUTF8Option != 0 { + return appendNormalizedString(buf, s) + } + return appendString(buf, s) +} + +func appendNormalizedHTMLString(buf []byte, s string) []byte { + valLen := len(s) + if valLen == 0 { + return append(buf, `""`...) + } + buf = append(buf, '"') + var ( + i, j int + ) + if valLen >= 8 { + chunks := stringToUint64Slice(s) + for _, n := range chunks { + // combine masks before checking for the MSB of each byte. We include + // `n` in the mask to check whether any of the *input* byte MSBs were + // set (i.e. the byte was outside the ASCII range). + mask := n | (n - (lsb * 0x20)) | + ((n ^ (lsb * '"')) - lsb) | + ((n ^ (lsb * '\\')) - lsb) | + ((n ^ (lsb * '<')) - lsb) | + ((n ^ (lsb * '>')) - lsb) | + ((n ^ (lsb * '&')) - lsb) + if (mask & msb) != 0 { + j = bits.TrailingZeros64(mask&msb) / 8 + goto ESCAPE_END + } + } + for i := len(chunks) * 8; i < valLen; i++ { + if needEscapeHTMLNormalizeUTF8[s[i]] { + j = i + goto ESCAPE_END + } + } + // no found any escape characters. + return append(append(buf, s...), '"') + } +ESCAPE_END: + for j < valLen { + c := s[j] + + if !needEscapeHTMLNormalizeUTF8[c] { + // fast path: most of the time, printable ascii characters are used + j++ + continue + } + + switch c { + case '\\', '"': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', c) + i = j + 1 + j = j + 1 + continue + + case '\n': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'n') + i = j + 1 + j = j + 1 + continue + + case '\r': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'r') + i = j + 1 + j = j + 1 + continue + + case '\t': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 't') + i = j + 1 + j = j + 1 + continue + + case '<', '>', '&': + buf = append(buf, s[i:j]...) + buf = append(buf, `\u00`...) + buf = append(buf, hex[c>>4], hex[c&0xF]) + i = j + 1 + j = j + 1 + continue + + case 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x0B, 0x0C, 0x0E, 0x0F, // 0x00-0x0F + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F: // 0x10-0x1F + buf = append(buf, s[i:j]...) + buf = append(buf, `\u00`...) + buf = append(buf, hex[c>>4], hex[c&0xF]) + i = j + 1 + j = j + 1 + continue + } + state, size := decodeRuneInString(s[j:]) + switch state { + case runeErrorState: + buf = append(buf, s[i:j]...) + buf = append(buf, `\ufffd`...) + i = j + 1 + j = j + 1 + continue + // U+2028 is LINE SEPARATOR. + // U+2029 is PARAGRAPH SEPARATOR. + // They are both technically valid characters in JSON strings, + // but don't work in JSONP, which has to be evaluated as JavaScript, + // and can lead to security holes there. It is valid JSON to + // escape them, so we do so unconditionally. + // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. + case lineSepState: + buf = append(buf, s[i:j]...) + buf = append(buf, `\u2028`...) + i = j + 3 + j = j + 3 + continue + case paragraphSepState: + buf = append(buf, s[i:j]...) + buf = append(buf, `\u2029`...) + i = j + 3 + j = j + 3 + continue + } + j += size + } + + return append(append(buf, s[i:]...), '"') +} + +func appendHTMLString(buf []byte, s string) []byte { + valLen := len(s) + if valLen == 0 { + return append(buf, `""`...) + } + buf = append(buf, '"') + var ( + i, j int + ) + if valLen >= 8 { + chunks := stringToUint64Slice(s) + for _, n := range chunks { + // combine masks before checking for the MSB of each byte. We include + // `n` in the mask to check whether any of the *input* byte MSBs were + // set (i.e. the byte was outside the ASCII range). + mask := n | (n - (lsb * 0x20)) | + ((n ^ (lsb * '"')) - lsb) | + ((n ^ (lsb * '\\')) - lsb) | + ((n ^ (lsb * '<')) - lsb) | + ((n ^ (lsb * '>')) - lsb) | + ((n ^ (lsb * '&')) - lsb) + if (mask & msb) != 0 { + j = bits.TrailingZeros64(mask&msb) / 8 + goto ESCAPE_END + } + } + for i := len(chunks) * 8; i < valLen; i++ { + if needEscapeHTML[s[i]] { + j = i + goto ESCAPE_END + } + } + // no found any escape characters. + return append(append(buf, s...), '"') + } +ESCAPE_END: + for j < valLen { + c := s[j] + + if !needEscapeHTML[c] { + // fast path: most of the time, printable ascii characters are used + j++ + continue + } + + switch c { + case '\\', '"': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', c) + i = j + 1 + j = j + 1 + continue + + case '\n': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'n') + i = j + 1 + j = j + 1 + continue + + case '\r': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'r') + i = j + 1 + j = j + 1 + continue + + case '\t': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 't') + i = j + 1 + j = j + 1 + continue + + case '<', '>', '&': + buf = append(buf, s[i:j]...) + buf = append(buf, `\u00`...) + buf = append(buf, hex[c>>4], hex[c&0xF]) + i = j + 1 + j = j + 1 + continue + + case 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x0B, 0x0C, 0x0E, 0x0F, // 0x00-0x0F + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F: // 0x10-0x1F + buf = append(buf, s[i:j]...) + buf = append(buf, `\u00`...) + buf = append(buf, hex[c>>4], hex[c&0xF]) + i = j + 1 + j = j + 1 + continue + } + j++ + } + + return append(append(buf, s[i:]...), '"') +} + +func appendNormalizedString(buf []byte, s string) []byte { + valLen := len(s) + if valLen == 0 { + return append(buf, `""`...) + } + buf = append(buf, '"') + var ( + i, j int + ) + if valLen >= 8 { + chunks := stringToUint64Slice(s) + for _, n := range chunks { + // combine masks before checking for the MSB of each byte. We include + // `n` in the mask to check whether any of the *input* byte MSBs were + // set (i.e. the byte was outside the ASCII range). + mask := n | (n - (lsb * 0x20)) | + ((n ^ (lsb * '"')) - lsb) | + ((n ^ (lsb * '\\')) - lsb) + if (mask & msb) != 0 { + j = bits.TrailingZeros64(mask&msb) / 8 + goto ESCAPE_END + } + } + valLen := len(s) + for i := len(chunks) * 8; i < valLen; i++ { + if needEscapeNormalizeUTF8[s[i]] { + j = i + goto ESCAPE_END + } + } + return append(append(buf, s...), '"') + } +ESCAPE_END: + for j < valLen { + c := s[j] + + if !needEscapeNormalizeUTF8[c] { + // fast path: most of the time, printable ascii characters are used + j++ + continue + } + + switch c { + case '\\', '"': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', c) + i = j + 1 + j = j + 1 + continue + + case '\n': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'n') + i = j + 1 + j = j + 1 + continue + + case '\r': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'r') + i = j + 1 + j = j + 1 + continue + + case '\t': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 't') + i = j + 1 + j = j + 1 + continue + + case 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x0B, 0x0C, 0x0E, 0x0F, // 0x00-0x0F + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F: // 0x10-0x1F + buf = append(buf, s[i:j]...) + buf = append(buf, `\u00`...) + buf = append(buf, hex[c>>4], hex[c&0xF]) + i = j + 1 + j = j + 1 + continue + } + + state, size := decodeRuneInString(s[j:]) + switch state { + case runeErrorState: + buf = append(buf, s[i:j]...) + buf = append(buf, `\ufffd`...) + i = j + 1 + j = j + 1 + continue + // U+2028 is LINE SEPARATOR. + // U+2029 is PARAGRAPH SEPARATOR. + // They are both technically valid characters in JSON strings, + // but don't work in JSONP, which has to be evaluated as JavaScript, + // and can lead to security holes there. It is valid JSON to + // escape them, so we do so unconditionally. + // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. + case lineSepState: + buf = append(buf, s[i:j]...) + buf = append(buf, `\u2028`...) + i = j + 3 + j = j + 3 + continue + case paragraphSepState: + buf = append(buf, s[i:j]...) + buf = append(buf, `\u2029`...) + i = j + 3 + j = j + 3 + continue + } + j += size + } + + return append(append(buf, s[i:]...), '"') +} + +func appendString(buf []byte, s string) []byte { + valLen := len(s) + if valLen == 0 { + return append(buf, `""`...) + } + buf = append(buf, '"') + var ( + i, j int + ) + if valLen >= 8 { + chunks := stringToUint64Slice(s) + for _, n := range chunks { + // combine masks before checking for the MSB of each byte. We include + // `n` in the mask to check whether any of the *input* byte MSBs were + // set (i.e. the byte was outside the ASCII range). + mask := n | (n - (lsb * 0x20)) | + ((n ^ (lsb * '"')) - lsb) | + ((n ^ (lsb * '\\')) - lsb) + if (mask & msb) != 0 { + j = bits.TrailingZeros64(mask&msb) / 8 + goto ESCAPE_END + } + } + valLen := len(s) + for i := len(chunks) * 8; i < valLen; i++ { + if needEscape[s[i]] { + j = i + goto ESCAPE_END + } + } + return append(append(buf, s...), '"') + } +ESCAPE_END: + for j < valLen { + c := s[j] + + if !needEscape[c] { + // fast path: most of the time, printable ascii characters are used + j++ + continue + } + + switch c { + case '\\', '"': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', c) + i = j + 1 + j = j + 1 + continue + + case '\n': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'n') + i = j + 1 + j = j + 1 + continue + + case '\r': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'r') + i = j + 1 + j = j + 1 + continue + + case '\t': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 't') + i = j + 1 + j = j + 1 + continue + + case 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x0B, 0x0C, 0x0E, 0x0F, // 0x00-0x0F + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F: // 0x10-0x1F + buf = append(buf, s[i:j]...) + buf = append(buf, `\u00`...) + buf = append(buf, hex[c>>4], hex[c&0xF]) + i = j + 1 + j = j + 1 + continue + } + j++ + } + + return append(append(buf, s[i:]...), '"') +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/string_table.go b/vendor/github.com/goccy/go-json/internal/encoder/string_table.go new file mode 100644 index 0000000..ebe42c9 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/string_table.go @@ -0,0 +1,415 @@ +package encoder + +var needEscapeHTMLNormalizeUTF8 = [256]bool{ + '"': true, + '&': true, + '<': true, + '>': true, + '\\': true, + 0x00: true, + 0x01: true, + 0x02: true, + 0x03: true, + 0x04: true, + 0x05: true, + 0x06: true, + 0x07: true, + 0x08: true, + 0x09: true, + 0x0a: true, + 0x0b: true, + 0x0c: true, + 0x0d: true, + 0x0e: true, + 0x0f: true, + 0x10: true, + 0x11: true, + 0x12: true, + 0x13: true, + 0x14: true, + 0x15: true, + 0x16: true, + 0x17: true, + 0x18: true, + 0x19: true, + 0x1a: true, + 0x1b: true, + 0x1c: true, + 0x1d: true, + 0x1e: true, + 0x1f: true, + /* 0x20 - 0x7f */ + 0x80: true, + 0x81: true, + 0x82: true, + 0x83: true, + 0x84: true, + 0x85: true, + 0x86: true, + 0x87: true, + 0x88: true, + 0x89: true, + 0x8a: true, + 0x8b: true, + 0x8c: true, + 0x8d: true, + 0x8e: true, + 0x8f: true, + 0x90: true, + 0x91: true, + 0x92: true, + 0x93: true, + 0x94: true, + 0x95: true, + 0x96: true, + 0x97: true, + 0x98: true, + 0x99: true, + 0x9a: true, + 0x9b: true, + 0x9c: true, + 0x9d: true, + 0x9e: true, + 0x9f: true, + 0xa0: true, + 0xa1: true, + 0xa2: true, + 0xa3: true, + 0xa4: true, + 0xa5: true, + 0xa6: true, + 0xa7: true, + 0xa8: true, + 0xa9: true, + 0xaa: true, + 0xab: true, + 0xac: true, + 0xad: true, + 0xae: true, + 0xaf: true, + 0xb0: true, + 0xb1: true, + 0xb2: true, + 0xb3: true, + 0xb4: true, + 0xb5: true, + 0xb6: true, + 0xb7: true, + 0xb8: true, + 0xb9: true, + 0xba: true, + 0xbb: true, + 0xbc: true, + 0xbd: true, + 0xbe: true, + 0xbf: true, + 0xc0: true, + 0xc1: true, + 0xc2: true, + 0xc3: true, + 0xc4: true, + 0xc5: true, + 0xc6: true, + 0xc7: true, + 0xc8: true, + 0xc9: true, + 0xca: true, + 0xcb: true, + 0xcc: true, + 0xcd: true, + 0xce: true, + 0xcf: true, + 0xd0: true, + 0xd1: true, + 0xd2: true, + 0xd3: true, + 0xd4: true, + 0xd5: true, + 0xd6: true, + 0xd7: true, + 0xd8: true, + 0xd9: true, + 0xda: true, + 0xdb: true, + 0xdc: true, + 0xdd: true, + 0xde: true, + 0xdf: true, + 0xe0: true, + 0xe1: true, + 0xe2: true, + 0xe3: true, + 0xe4: true, + 0xe5: true, + 0xe6: true, + 0xe7: true, + 0xe8: true, + 0xe9: true, + 0xea: true, + 0xeb: true, + 0xec: true, + 0xed: true, + 0xee: true, + 0xef: true, + 0xf0: true, + 0xf1: true, + 0xf2: true, + 0xf3: true, + 0xf4: true, + 0xf5: true, + 0xf6: true, + 0xf7: true, + 0xf8: true, + 0xf9: true, + 0xfa: true, + 0xfb: true, + 0xfc: true, + 0xfd: true, + 0xfe: true, + 0xff: true, +} + +var needEscapeNormalizeUTF8 = [256]bool{ + '"': true, + '\\': true, + 0x00: true, + 0x01: true, + 0x02: true, + 0x03: true, + 0x04: true, + 0x05: true, + 0x06: true, + 0x07: true, + 0x08: true, + 0x09: true, + 0x0a: true, + 0x0b: true, + 0x0c: true, + 0x0d: true, + 0x0e: true, + 0x0f: true, + 0x10: true, + 0x11: true, + 0x12: true, + 0x13: true, + 0x14: true, + 0x15: true, + 0x16: true, + 0x17: true, + 0x18: true, + 0x19: true, + 0x1a: true, + 0x1b: true, + 0x1c: true, + 0x1d: true, + 0x1e: true, + 0x1f: true, + /* 0x20 - 0x7f */ + 0x80: true, + 0x81: true, + 0x82: true, + 0x83: true, + 0x84: true, + 0x85: true, + 0x86: true, + 0x87: true, + 0x88: true, + 0x89: true, + 0x8a: true, + 0x8b: true, + 0x8c: true, + 0x8d: true, + 0x8e: true, + 0x8f: true, + 0x90: true, + 0x91: true, + 0x92: true, + 0x93: true, + 0x94: true, + 0x95: true, + 0x96: true, + 0x97: true, + 0x98: true, + 0x99: true, + 0x9a: true, + 0x9b: true, + 0x9c: true, + 0x9d: true, + 0x9e: true, + 0x9f: true, + 0xa0: true, + 0xa1: true, + 0xa2: true, + 0xa3: true, + 0xa4: true, + 0xa5: true, + 0xa6: true, + 0xa7: true, + 0xa8: true, + 0xa9: true, + 0xaa: true, + 0xab: true, + 0xac: true, + 0xad: true, + 0xae: true, + 0xaf: true, + 0xb0: true, + 0xb1: true, + 0xb2: true, + 0xb3: true, + 0xb4: true, + 0xb5: true, + 0xb6: true, + 0xb7: true, + 0xb8: true, + 0xb9: true, + 0xba: true, + 0xbb: true, + 0xbc: true, + 0xbd: true, + 0xbe: true, + 0xbf: true, + 0xc0: true, + 0xc1: true, + 0xc2: true, + 0xc3: true, + 0xc4: true, + 0xc5: true, + 0xc6: true, + 0xc7: true, + 0xc8: true, + 0xc9: true, + 0xca: true, + 0xcb: true, + 0xcc: true, + 0xcd: true, + 0xce: true, + 0xcf: true, + 0xd0: true, + 0xd1: true, + 0xd2: true, + 0xd3: true, + 0xd4: true, + 0xd5: true, + 0xd6: true, + 0xd7: true, + 0xd8: true, + 0xd9: true, + 0xda: true, + 0xdb: true, + 0xdc: true, + 0xdd: true, + 0xde: true, + 0xdf: true, + 0xe0: true, + 0xe1: true, + 0xe2: true, + 0xe3: true, + 0xe4: true, + 0xe5: true, + 0xe6: true, + 0xe7: true, + 0xe8: true, + 0xe9: true, + 0xea: true, + 0xeb: true, + 0xec: true, + 0xed: true, + 0xee: true, + 0xef: true, + 0xf0: true, + 0xf1: true, + 0xf2: true, + 0xf3: true, + 0xf4: true, + 0xf5: true, + 0xf6: true, + 0xf7: true, + 0xf8: true, + 0xf9: true, + 0xfa: true, + 0xfb: true, + 0xfc: true, + 0xfd: true, + 0xfe: true, + 0xff: true, +} + +var needEscapeHTML = [256]bool{ + '"': true, + '&': true, + '<': true, + '>': true, + '\\': true, + 0x00: true, + 0x01: true, + 0x02: true, + 0x03: true, + 0x04: true, + 0x05: true, + 0x06: true, + 0x07: true, + 0x08: true, + 0x09: true, + 0x0a: true, + 0x0b: true, + 0x0c: true, + 0x0d: true, + 0x0e: true, + 0x0f: true, + 0x10: true, + 0x11: true, + 0x12: true, + 0x13: true, + 0x14: true, + 0x15: true, + 0x16: true, + 0x17: true, + 0x18: true, + 0x19: true, + 0x1a: true, + 0x1b: true, + 0x1c: true, + 0x1d: true, + 0x1e: true, + 0x1f: true, + /* 0x20 - 0xff */ +} + +var needEscape = [256]bool{ + '"': true, + '\\': true, + 0x00: true, + 0x01: true, + 0x02: true, + 0x03: true, + 0x04: true, + 0x05: true, + 0x06: true, + 0x07: true, + 0x08: true, + 0x09: true, + 0x0a: true, + 0x0b: true, + 0x0c: true, + 0x0d: true, + 0x0e: true, + 0x0f: true, + 0x10: true, + 0x11: true, + 0x12: true, + 0x13: true, + 0x14: true, + 0x15: true, + 0x16: true, + 0x17: true, + 0x18: true, + 0x19: true, + 0x1a: true, + 0x1b: true, + 0x1c: true, + 0x1d: true, + 0x1e: true, + 0x1f: true, + /* 0x20 - 0xff */ +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm/debug_vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm/debug_vm.go new file mode 100644 index 0000000..82b6dd4 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm/debug_vm.go @@ -0,0 +1,41 @@ +package vm + +import ( + "fmt" + "io" + + "github.com/goccy/go-json/internal/encoder" +) + +func DebugRun(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + defer func() { + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + if wc := ctx.Option.DebugDOTOut; wc != nil { + _, _ = io.WriteString(wc, code.DumpDOT()) + wc.Close() + ctx.Option.DebugDOTOut = nil + } + + if err := recover(); err != nil { + w := ctx.Option.DebugOut + fmt.Fprintln(w, "=============[DEBUG]===============") + fmt.Fprintln(w, "* [TYPE]") + fmt.Fprintln(w, codeSet.Type) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [ALL OPCODE]") + fmt.Fprintln(w, code.Dump()) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [CONTEXT]") + fmt.Fprintf(w, "%+v\n", ctx) + fmt.Fprintln(w, "===================================") + panic(err) + } + }() + + return Run(ctx, b, codeSet) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm/hack.go b/vendor/github.com/goccy/go-json/internal/encoder/vm/hack.go new file mode 100644 index 0000000..65252b4 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm/hack.go @@ -0,0 +1,9 @@ +package vm + +import ( + // HACK: compile order + // `vm`, `vm_indent`, `vm_color`, `vm_color_indent` packages uses a lot of memory to compile, + // so forcibly make dependencies and avoid compiling in concurrent. + // dependency order: vm => vm_indent => vm_color => vm_color_indent + _ "github.com/goccy/go-json/internal/encoder/vm_indent" +) diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm/util.go b/vendor/github.com/goccy/go-json/internal/encoder/vm/util.go new file mode 100644 index 0000000..86291d7 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm/util.go @@ -0,0 +1,207 @@ +package vm + +import ( + "encoding/json" + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +const uintptrSize = 4 << (^uintptr(0) >> 63) + +var ( + appendInt = encoder.AppendInt + appendUint = encoder.AppendUint + appendFloat32 = encoder.AppendFloat32 + appendFloat64 = encoder.AppendFloat64 + appendString = encoder.AppendString + appendByteSlice = encoder.AppendByteSlice + appendNumber = encoder.AppendNumber + errUnsupportedValue = encoder.ErrUnsupportedValue + errUnsupportedFloat = encoder.ErrUnsupportedFloat + mapiterinit = encoder.MapIterInit + mapiterkey = encoder.MapIterKey + mapitervalue = encoder.MapIterValue + mapiternext = encoder.MapIterNext + maplen = encoder.MapLen +) + +type emptyInterface struct { + typ *runtime.Type + ptr unsafe.Pointer +} + +type nonEmptyInterface struct { + itab *struct { + ityp *runtime.Type // static interface type + typ *runtime.Type // dynamic concrete type + // unused fields... + } + ptr unsafe.Pointer +} + +func errUnimplementedOp(op encoder.OpType) error { + return fmt.Errorf("encoder: opcode %s has not been implemented", op) +} + +func load(base uintptr, idx uint32) uintptr { + addr := base + uintptr(idx) + return **(**uintptr)(unsafe.Pointer(&addr)) +} + +func store(base uintptr, idx uint32, p uintptr) { + addr := base + uintptr(idx) + **(**uintptr)(unsafe.Pointer(&addr)) = p +} + +func loadNPtr(base uintptr, idx uint32, ptrNum uint8) uintptr { + addr := base + uintptr(idx) + p := **(**uintptr)(unsafe.Pointer(&addr)) + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUint64(p uintptr, bitSize uint8) uint64 { + switch bitSize { + case 8: + return (uint64)(**(**uint8)(unsafe.Pointer(&p))) + case 16: + return (uint64)(**(**uint16)(unsafe.Pointer(&p))) + case 32: + return (uint64)(**(**uint32)(unsafe.Pointer(&p))) + case 64: + return **(**uint64)(unsafe.Pointer(&p)) + } + return 0 +} +func ptrToFloat32(p uintptr) float32 { return **(**float32)(unsafe.Pointer(&p)) } +func ptrToFloat64(p uintptr) float64 { return **(**float64)(unsafe.Pointer(&p)) } +func ptrToBool(p uintptr) bool { return **(**bool)(unsafe.Pointer(&p)) } +func ptrToBytes(p uintptr) []byte { return **(**[]byte)(unsafe.Pointer(&p)) } +func ptrToNumber(p uintptr) json.Number { return **(**json.Number)(unsafe.Pointer(&p)) } +func ptrToString(p uintptr) string { return **(**string)(unsafe.Pointer(&p)) } +func ptrToSlice(p uintptr) *runtime.SliceHeader { return *(**runtime.SliceHeader)(unsafe.Pointer(&p)) } +func ptrToPtr(p uintptr) uintptr { + return uintptr(**(**unsafe.Pointer)(unsafe.Pointer(&p))) +} +func ptrToNPtr(p uintptr, ptrNum uint8) uintptr { + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUnsafePtr(p uintptr) unsafe.Pointer { + return *(*unsafe.Pointer)(unsafe.Pointer(&p)) +} +func ptrToInterface(code *encoder.Opcode, p uintptr) interface{} { + return *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: code.Type, + ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)), + })) +} + +func appendBool(_ *encoder.RuntimeContext, b []byte, v bool) []byte { + if v { + return append(b, "true"...) + } + return append(b, "false"...) +} + +func appendNull(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, "null"...) +} + +func appendComma(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, ',') +} + +func appendNullComma(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, "null,"...) +} + +func appendColon(_ *encoder.RuntimeContext, b []byte) []byte { + last := len(b) - 1 + b[last] = ':' + return b +} + +func appendMapKeyValue(_ *encoder.RuntimeContext, _ *encoder.Opcode, b, key, value []byte) []byte { + b = append(b, key...) + b[len(b)-1] = ':' + return append(b, value...) +} + +func appendMapEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + b[len(b)-1] = '}' + b = append(b, ',') + return b +} + +func appendMarshalJSON(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + return encoder.AppendMarshalJSON(ctx, code, b, v) +} + +func appendMarshalText(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + return encoder.AppendMarshalText(ctx, code, b, v) +} + +func appendArrayHead(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + return append(b, '[') +} + +func appendArrayEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + b[last] = ']' + return append(b, ',') +} + +func appendEmptyArray(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '[', ']', ',') +} + +func appendEmptyObject(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{', '}', ',') +} + +func appendObjectEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + b[last] = '}' + return append(b, ',') +} + +func appendStructHead(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{') +} + +func appendStructKey(_ *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + return append(b, code.Key...) +} + +func appendStructEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + return append(b, '}', ',') +} + +func appendStructEndSkipLast(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + if b[last] == ',' { + b[last] = '}' + return appendComma(ctx, b) + } + return appendStructEnd(ctx, code, b) +} + +func restoreIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, _ uintptr) {} +func storeIndent(_ uintptr, _ *encoder.Opcode, _ uintptr) {} +func appendMapKeyIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { return b } +func appendArrayElemIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { return b } diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm/vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm/vm.go new file mode 100644 index 0000000..645d20f --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm/vm.go @@ -0,0 +1,4859 @@ +// Code generated by internal/cmd/generator. DO NOT EDIT! +package vm + +import ( + "math" + "reflect" + "sort" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +func Run(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + recursiveLevel := 0 + ptrOffset := uintptr(0) + ctxptr := ctx.Ptr() + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + + for { + switch code.Op { + default: + return nil, errUnimplementedOp(code.Op) + case encoder.OpPtr: + p := load(ctxptr, code.Idx) + code = code.Next + store(ctxptr, code.Idx, ptrToPtr(p)) + case encoder.OpIntPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInt: + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpUint: + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpIntString: + b = append(b, '"') + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintString: + b = append(b, '"') + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat32Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat32: + b = appendFloat32(ctx, b, ptrToFloat32(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat64Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat64: + v := ptrToFloat64(load(ctxptr, code.Idx)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStringPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpString: + b = appendString(ctx, b, ptrToString(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBoolPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBool: + b = appendBool(ctx, b, ptrToBool(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBytesPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBytes: + b = appendByteSlice(ctx, b, ptrToBytes(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpNumberPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpNumber: + bb, err := appendNumber(ctx, b, ptrToNumber(load(ctxptr, code.Idx))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpInterfacePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInterface: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if p == seen { + return nil, errUnsupportedValue(code, p) + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, p) + var ( + typ *runtime.Type + ifacePtr unsafe.Pointer + ) + up := ptrToUnsafePtr(p) + if code.Flags&encoder.NonEmptyInterfaceFlags != 0 { + iface := (*nonEmptyInterface)(up) + ifacePtr = iface.ptr + if iface.itab != nil { + typ = iface.itab.typ + } + } else { + iface := (*emptyInterface)(up) + ifacePtr = iface.ptr + typ = iface.typ + } + if ifacePtr == nil { + isDirectedNil := typ != nil && typ.Kind() == reflect.Struct && !runtime.IfaceIndir(typ) + if !isDirectedNil { + b = appendNullComma(ctx, b) + code = code.Next + break + } + } + ctx.KeepRefs = append(ctx.KeepRefs, up) + ifaceCodeSet, err := encoder.CompileToGetCodeSet(ctx, uintptr(unsafe.Pointer(typ))) + if err != nil { + return nil, err + } + + totalLength := uintptr(code.Length) + 3 + nextTotalLength := uintptr(ifaceCodeSet.CodeLength) + 3 + + var c *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + c = ifaceCodeSet.InterfaceEscapeKeyCode + } else { + c = ifaceCodeSet.InterfaceNoescapeKeyCode + } + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += totalLength * uintptrSize + oldBaseIndent := ctx.BaseIndent + ctx.BaseIndent += code.Indent + + newLen := offsetNum + totalLength + nextTotalLength + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + end := ifaceCodeSet.EndCode + store(ctxptr, c.Idx, uintptr(ifacePtr)) + store(ctxptr, end.Idx, oldOffset) + store(ctxptr, end.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, end, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpInterfaceEnd: + recursiveLevel-- + + // restore ctxptr + offset := load(ctxptr, code.Idx) + restoreIndent(ctx, code, ctxptr) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + b = append(b, `""`...) + b = appendComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpSlicePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpSlice: + p := load(ctxptr, code.Idx) + slice := ptrToSlice(p) + if p == 0 || slice.Data == nil { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.ElemIdx, 0) + store(ctxptr, code.Length, uintptr(slice.Len)) + store(ctxptr, code.Idx, uintptr(slice.Data)) + if slice.Len > 0 { + b = appendArrayHead(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, uintptr(slice.Data)) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpSliceElem: + idx := load(ctxptr, code.ElemIdx) + length := load(ctxptr, code.Length) + idx++ + if idx < length { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + data := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, data+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpArrayPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpArray: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + if code.Length > 0 { + b = appendArrayHead(ctx, code, b) + store(ctxptr, code.ElemIdx, 0) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpArrayElem: + idx := load(ctxptr, code.ElemIdx) + idx++ + if idx < uintptr(code.Length) { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + p := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, p+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpMapPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpMap: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + uptr := ptrToUnsafePtr(p) + mlen := maplen(uptr) + if mlen <= 0 { + b = appendEmptyObject(ctx, b) + code = code.End.Next + break + } + b = appendStructHead(ctx, b) + unorderedMap := (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 + mapCtx := encoder.NewMapContext(mlen, unorderedMap) + mapiterinit(code.Type, uptr, &mapCtx.Iter) + store(ctxptr, code.Idx, uintptr(unsafe.Pointer(mapCtx))) + ctx.KeepRefs = append(ctx.KeepRefs, unsafe.Pointer(mapCtx)) + if unorderedMap { + b = appendMapKeyIndent(ctx, code.Next, b) + } else { + mapCtx.Start = len(b) + mapCtx.First = len(b) + } + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + case encoder.OpMapKey: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + idx := mapCtx.Idx + idx++ + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + if idx < mapCtx.Len { + b = appendMapKeyIndent(ctx, code, b) + mapCtx.Idx = int(idx) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + b = appendObjectEnd(ctx, code, b) + encoder.ReleaseMapContext(mapCtx) + code = code.End.Next + } + } else { + mapCtx.Slice.Items[mapCtx.Idx].Value = b[mapCtx.Start:len(b)] + if idx < mapCtx.Len { + mapCtx.Idx = int(idx) + mapCtx.Start = len(b) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + code = code.End + } + } + case encoder.OpMapValue: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + b = appendColon(ctx, b) + } else { + mapCtx.Slice.Items[mapCtx.Idx].Key = b[mapCtx.Start:len(b)] + mapCtx.Start = len(b) + } + value := mapitervalue(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(value)) + mapiternext(&mapCtx.Iter) + code = code.Next + case encoder.OpMapEnd: + // this operation only used by sorted map. + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + sort.Sort(mapCtx.Slice) + buf := mapCtx.Buf + for _, item := range mapCtx.Slice.Items { + buf = appendMapKeyValue(ctx, code, buf, item.Key, item.Value) + } + buf = appendMapEnd(ctx, code, buf) + b = b[:mapCtx.First] + b = append(b, buf...) + mapCtx.Buf = buf + encoder.ReleaseMapContext(mapCtx) + code = code.Next + case encoder.OpRecursivePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpRecursive: + ptr := load(ctxptr, code.Idx) + if ptr != 0 { + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if ptr == seen { + return nil, errUnsupportedValue(code, ptr) + } + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, ptr) + c := code.Jmp.Code + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += code.Jmp.CurLen * uintptrSize + oldBaseIndent := ctx.BaseIndent + indentDiffFromTop := c.Indent - 1 + ctx.BaseIndent += code.Indent - indentDiffFromTop + + newLen := offsetNum + code.Jmp.CurLen + code.Jmp.NextLen + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + store(ctxptr, c.Idx, ptr) + store(ctxptr, c.End.Next.Idx, oldOffset) + store(ctxptr, c.End.Next.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, c.End.Next, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpRecursiveEnd: + recursiveLevel-- + + // restore ctxptr + restoreIndent(ctx, code, ctxptr) + offset := load(ctxptr, code.Idx) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpStructPtrHead: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHead: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if len(code.Key) > 0 { + if (code.Flags&encoder.IsTaggedKeyFlags) != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + } + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if p == 0 || (ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + u64 := ptrToUint64(p, code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p+uintptr(code.Offset))))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + } + case encoder.OpStructPtrHeadNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructPtrHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadArray, encoder.OpStructPtrHeadSlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadArray, encoder.OpStructHeadSlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyArray: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyArray: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptySlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptySlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadArrayPtr, encoder.OpStructPtrHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadArrayPtr, encoder.OpStructHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyArrayPtr, encoder.OpStructPtrHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyArrayPtr, encoder.OpStructHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + if maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalJSON { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalJSON { + p = ptrToPtr(p) + } + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructField: + if code.Flags&encoder.IsTaggedKeyFlags != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + p := load(ctxptr, code.Idx) + uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmpty: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringString: + p := load(ctxptr, code.Idx) + s := ptrToString(p + uintptr(code.Offset)) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldMarshalJSON: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalJSONPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldMarshalText: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalTextPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldArrayPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArrayPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldSlice: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlice: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldSlicePtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldMap: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMap: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 || maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldMapPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldStruct: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyStruct: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructEnd: + b = appendStructEndSkipLast(ctx, code, b) + code = code.Next + case encoder.OpStructEndInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendStructEnd(ctx, code, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + s := ptrToString(p + uintptr(code.Offset)) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytesPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + code = code.Next + case encoder.OpStructEndOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpEnd: + goto END + } + } +END: + return b, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color/debug_vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/debug_vm.go new file mode 100644 index 0000000..925f61e --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/debug_vm.go @@ -0,0 +1,35 @@ +package vm_color + +import ( + "fmt" + + "github.com/goccy/go-json/internal/encoder" +) + +func DebugRun(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + + defer func() { + if err := recover(); err != nil { + w := ctx.Option.DebugOut + fmt.Fprintln(w, "=============[DEBUG]===============") + fmt.Fprintln(w, "* [TYPE]") + fmt.Fprintln(w, codeSet.Type) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [ALL OPCODE]") + fmt.Fprintln(w, code.Dump()) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [CONTEXT]") + fmt.Fprintf(w, "%+v\n", ctx) + fmt.Fprintln(w, "===================================") + panic(err) + } + }() + + return Run(ctx, b, codeSet) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color/hack.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/hack.go new file mode 100644 index 0000000..12ec56c --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/hack.go @@ -0,0 +1,9 @@ +package vm_color + +import ( + // HACK: compile order + // `vm`, `vm_indent`, `vm_color`, `vm_color_indent` packages uses a lot of memory to compile, + // so forcibly make dependencies and avoid compiling in concurrent. + // dependency order: vm => vm_indent => vm_color => vm_color_indent + _ "github.com/goccy/go-json/internal/encoder/vm_color_indent" +) diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color/util.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/util.go new file mode 100644 index 0000000..33f29ae --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/util.go @@ -0,0 +1,274 @@ +package vm_color + +import ( + "encoding/json" + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +const uintptrSize = 4 << (^uintptr(0) >> 63) + +var ( + errUnsupportedValue = encoder.ErrUnsupportedValue + errUnsupportedFloat = encoder.ErrUnsupportedFloat + mapiterinit = encoder.MapIterInit + mapiterkey = encoder.MapIterKey + mapitervalue = encoder.MapIterValue + mapiternext = encoder.MapIterNext + maplen = encoder.MapLen +) + +type emptyInterface struct { + typ *runtime.Type + ptr unsafe.Pointer +} + +type nonEmptyInterface struct { + itab *struct { + ityp *runtime.Type // static interface type + typ *runtime.Type // dynamic concrete type + // unused fields... + } + ptr unsafe.Pointer +} + +func errUnimplementedOp(op encoder.OpType) error { + return fmt.Errorf("encoder: opcode %s has not been implemented", op) +} + +func load(base uintptr, idx uint32) uintptr { + addr := base + uintptr(idx) + return **(**uintptr)(unsafe.Pointer(&addr)) +} + +func store(base uintptr, idx uint32, p uintptr) { + addr := base + uintptr(idx) + **(**uintptr)(unsafe.Pointer(&addr)) = p +} + +func loadNPtr(base uintptr, idx uint32, ptrNum uint8) uintptr { + addr := base + uintptr(idx) + p := **(**uintptr)(unsafe.Pointer(&addr)) + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUint64(p uintptr, bitSize uint8) uint64 { + switch bitSize { + case 8: + return (uint64)(**(**uint8)(unsafe.Pointer(&p))) + case 16: + return (uint64)(**(**uint16)(unsafe.Pointer(&p))) + case 32: + return (uint64)(**(**uint32)(unsafe.Pointer(&p))) + case 64: + return **(**uint64)(unsafe.Pointer(&p)) + } + return 0 +} +func ptrToFloat32(p uintptr) float32 { return **(**float32)(unsafe.Pointer(&p)) } +func ptrToFloat64(p uintptr) float64 { return **(**float64)(unsafe.Pointer(&p)) } +func ptrToBool(p uintptr) bool { return **(**bool)(unsafe.Pointer(&p)) } +func ptrToBytes(p uintptr) []byte { return **(**[]byte)(unsafe.Pointer(&p)) } +func ptrToNumber(p uintptr) json.Number { return **(**json.Number)(unsafe.Pointer(&p)) } +func ptrToString(p uintptr) string { return **(**string)(unsafe.Pointer(&p)) } +func ptrToSlice(p uintptr) *runtime.SliceHeader { return *(**runtime.SliceHeader)(unsafe.Pointer(&p)) } +func ptrToPtr(p uintptr) uintptr { + return uintptr(**(**unsafe.Pointer)(unsafe.Pointer(&p))) +} +func ptrToNPtr(p uintptr, ptrNum uint8) uintptr { + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUnsafePtr(p uintptr) unsafe.Pointer { + return *(*unsafe.Pointer)(unsafe.Pointer(&p)) +} +func ptrToInterface(code *encoder.Opcode, p uintptr) interface{} { + return *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: code.Type, + ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)), + })) +} + +func appendInt(ctx *encoder.RuntimeContext, b []byte, p uintptr, code *encoder.Opcode) []byte { + format := ctx.Option.ColorScheme.Int + b = append(b, format.Header...) + b = encoder.AppendInt(ctx, b, p, code) + return append(b, format.Footer...) +} + +func appendUint(ctx *encoder.RuntimeContext, b []byte, p uintptr, code *encoder.Opcode) []byte { + format := ctx.Option.ColorScheme.Uint + b = append(b, format.Header...) + b = encoder.AppendUint(ctx, b, p, code) + return append(b, format.Footer...) +} + +func appendFloat32(ctx *encoder.RuntimeContext, b []byte, v float32) []byte { + format := ctx.Option.ColorScheme.Float + b = append(b, format.Header...) + b = encoder.AppendFloat32(ctx, b, v) + return append(b, format.Footer...) +} + +func appendFloat64(ctx *encoder.RuntimeContext, b []byte, v float64) []byte { + format := ctx.Option.ColorScheme.Float + b = append(b, format.Header...) + b = encoder.AppendFloat64(ctx, b, v) + return append(b, format.Footer...) +} + +func appendString(ctx *encoder.RuntimeContext, b []byte, v string) []byte { + format := ctx.Option.ColorScheme.String + b = append(b, format.Header...) + b = encoder.AppendString(ctx, b, v) + return append(b, format.Footer...) +} + +func appendByteSlice(ctx *encoder.RuntimeContext, b []byte, src []byte) []byte { + format := ctx.Option.ColorScheme.Binary + b = append(b, format.Header...) + b = encoder.AppendByteSlice(ctx, b, src) + return append(b, format.Footer...) +} + +func appendNumber(ctx *encoder.RuntimeContext, b []byte, n json.Number) ([]byte, error) { + format := ctx.Option.ColorScheme.Int + b = append(b, format.Header...) + bb, err := encoder.AppendNumber(ctx, b, n) + if err != nil { + return nil, err + } + return append(bb, format.Footer...), nil +} + +func appendBool(ctx *encoder.RuntimeContext, b []byte, v bool) []byte { + format := ctx.Option.ColorScheme.Bool + b = append(b, format.Header...) + if v { + b = append(b, "true"...) + } else { + b = append(b, "false"...) + } + return append(b, format.Footer...) +} + +func appendNull(ctx *encoder.RuntimeContext, b []byte) []byte { + format := ctx.Option.ColorScheme.Null + b = append(b, format.Header...) + b = append(b, "null"...) + return append(b, format.Footer...) +} + +func appendComma(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, ',') +} + +func appendNullComma(ctx *encoder.RuntimeContext, b []byte) []byte { + format := ctx.Option.ColorScheme.Null + b = append(b, format.Header...) + b = append(b, "null"...) + return append(append(b, format.Footer...), ',') +} + +func appendColon(_ *encoder.RuntimeContext, b []byte) []byte { + last := len(b) - 1 + b[last] = ':' + return b +} + +func appendMapKeyValue(_ *encoder.RuntimeContext, _ *encoder.Opcode, b, key, value []byte) []byte { + b = append(b, key[:len(key)-1]...) + b = append(b, ':') + return append(b, value...) +} + +func appendMapEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + b[last] = '}' + b = append(b, ',') + return b +} + +func appendMarshalJSON(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + return encoder.AppendMarshalJSON(ctx, code, b, v) +} + +func appendMarshalText(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + format := ctx.Option.ColorScheme.String + b = append(b, format.Header...) + bb, err := encoder.AppendMarshalText(ctx, code, b, v) + if err != nil { + return nil, err + } + return append(bb, format.Footer...), nil +} + +func appendArrayHead(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + return append(b, '[') +} + +func appendArrayEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + b[last] = ']' + return append(b, ',') +} + +func appendEmptyArray(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '[', ']', ',') +} + +func appendEmptyObject(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{', '}', ',') +} + +func appendObjectEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + b[last] = '}' + return append(b, ',') +} + +func appendStructHead(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{') +} + +func appendStructKey(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + format := ctx.Option.ColorScheme.ObjectKey + b = append(b, format.Header...) + b = append(b, code.Key[:len(code.Key)-1]...) + b = append(b, format.Footer...) + + return append(b, ':') +} + +func appendStructEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + return append(b, '}', ',') +} + +func appendStructEndSkipLast(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + if b[last] == ',' { + b[last] = '}' + return appendComma(ctx, b) + } + return appendStructEnd(ctx, code, b) +} + +func restoreIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, _ uintptr) {} +func storeIndent(_ uintptr, _ *encoder.Opcode, _ uintptr) {} +func appendMapKeyIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { return b } +func appendArrayElemIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { return b } diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color/vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/vm.go new file mode 100644 index 0000000..a63e83e --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/vm.go @@ -0,0 +1,4859 @@ +// Code generated by internal/cmd/generator. DO NOT EDIT! +package vm_color + +import ( + "math" + "reflect" + "sort" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +func Run(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + recursiveLevel := 0 + ptrOffset := uintptr(0) + ctxptr := ctx.Ptr() + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + + for { + switch code.Op { + default: + return nil, errUnimplementedOp(code.Op) + case encoder.OpPtr: + p := load(ctxptr, code.Idx) + code = code.Next + store(ctxptr, code.Idx, ptrToPtr(p)) + case encoder.OpIntPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInt: + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpUint: + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpIntString: + b = append(b, '"') + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintString: + b = append(b, '"') + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat32Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat32: + b = appendFloat32(ctx, b, ptrToFloat32(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat64Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat64: + v := ptrToFloat64(load(ctxptr, code.Idx)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStringPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpString: + b = appendString(ctx, b, ptrToString(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBoolPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBool: + b = appendBool(ctx, b, ptrToBool(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBytesPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBytes: + b = appendByteSlice(ctx, b, ptrToBytes(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpNumberPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpNumber: + bb, err := appendNumber(ctx, b, ptrToNumber(load(ctxptr, code.Idx))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpInterfacePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInterface: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if p == seen { + return nil, errUnsupportedValue(code, p) + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, p) + var ( + typ *runtime.Type + ifacePtr unsafe.Pointer + ) + up := ptrToUnsafePtr(p) + if code.Flags&encoder.NonEmptyInterfaceFlags != 0 { + iface := (*nonEmptyInterface)(up) + ifacePtr = iface.ptr + if iface.itab != nil { + typ = iface.itab.typ + } + } else { + iface := (*emptyInterface)(up) + ifacePtr = iface.ptr + typ = iface.typ + } + if ifacePtr == nil { + isDirectedNil := typ != nil && typ.Kind() == reflect.Struct && !runtime.IfaceIndir(typ) + if !isDirectedNil { + b = appendNullComma(ctx, b) + code = code.Next + break + } + } + ctx.KeepRefs = append(ctx.KeepRefs, up) + ifaceCodeSet, err := encoder.CompileToGetCodeSet(ctx, uintptr(unsafe.Pointer(typ))) + if err != nil { + return nil, err + } + + totalLength := uintptr(code.Length) + 3 + nextTotalLength := uintptr(ifaceCodeSet.CodeLength) + 3 + + var c *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + c = ifaceCodeSet.InterfaceEscapeKeyCode + } else { + c = ifaceCodeSet.InterfaceNoescapeKeyCode + } + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += totalLength * uintptrSize + oldBaseIndent := ctx.BaseIndent + ctx.BaseIndent += code.Indent + + newLen := offsetNum + totalLength + nextTotalLength + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + end := ifaceCodeSet.EndCode + store(ctxptr, c.Idx, uintptr(ifacePtr)) + store(ctxptr, end.Idx, oldOffset) + store(ctxptr, end.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, end, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpInterfaceEnd: + recursiveLevel-- + + // restore ctxptr + offset := load(ctxptr, code.Idx) + restoreIndent(ctx, code, ctxptr) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + b = append(b, `""`...) + b = appendComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpSlicePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpSlice: + p := load(ctxptr, code.Idx) + slice := ptrToSlice(p) + if p == 0 || slice.Data == nil { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.ElemIdx, 0) + store(ctxptr, code.Length, uintptr(slice.Len)) + store(ctxptr, code.Idx, uintptr(slice.Data)) + if slice.Len > 0 { + b = appendArrayHead(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, uintptr(slice.Data)) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpSliceElem: + idx := load(ctxptr, code.ElemIdx) + length := load(ctxptr, code.Length) + idx++ + if idx < length { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + data := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, data+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpArrayPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpArray: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + if code.Length > 0 { + b = appendArrayHead(ctx, code, b) + store(ctxptr, code.ElemIdx, 0) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpArrayElem: + idx := load(ctxptr, code.ElemIdx) + idx++ + if idx < uintptr(code.Length) { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + p := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, p+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpMapPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpMap: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + uptr := ptrToUnsafePtr(p) + mlen := maplen(uptr) + if mlen <= 0 { + b = appendEmptyObject(ctx, b) + code = code.End.Next + break + } + b = appendStructHead(ctx, b) + unorderedMap := (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 + mapCtx := encoder.NewMapContext(mlen, unorderedMap) + mapiterinit(code.Type, uptr, &mapCtx.Iter) + store(ctxptr, code.Idx, uintptr(unsafe.Pointer(mapCtx))) + ctx.KeepRefs = append(ctx.KeepRefs, unsafe.Pointer(mapCtx)) + if unorderedMap { + b = appendMapKeyIndent(ctx, code.Next, b) + } else { + mapCtx.Start = len(b) + mapCtx.First = len(b) + } + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + case encoder.OpMapKey: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + idx := mapCtx.Idx + idx++ + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + if idx < mapCtx.Len { + b = appendMapKeyIndent(ctx, code, b) + mapCtx.Idx = int(idx) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + b = appendObjectEnd(ctx, code, b) + encoder.ReleaseMapContext(mapCtx) + code = code.End.Next + } + } else { + mapCtx.Slice.Items[mapCtx.Idx].Value = b[mapCtx.Start:len(b)] + if idx < mapCtx.Len { + mapCtx.Idx = int(idx) + mapCtx.Start = len(b) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + code = code.End + } + } + case encoder.OpMapValue: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + b = appendColon(ctx, b) + } else { + mapCtx.Slice.Items[mapCtx.Idx].Key = b[mapCtx.Start:len(b)] + mapCtx.Start = len(b) + } + value := mapitervalue(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(value)) + mapiternext(&mapCtx.Iter) + code = code.Next + case encoder.OpMapEnd: + // this operation only used by sorted map. + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + sort.Sort(mapCtx.Slice) + buf := mapCtx.Buf + for _, item := range mapCtx.Slice.Items { + buf = appendMapKeyValue(ctx, code, buf, item.Key, item.Value) + } + buf = appendMapEnd(ctx, code, buf) + b = b[:mapCtx.First] + b = append(b, buf...) + mapCtx.Buf = buf + encoder.ReleaseMapContext(mapCtx) + code = code.Next + case encoder.OpRecursivePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpRecursive: + ptr := load(ctxptr, code.Idx) + if ptr != 0 { + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if ptr == seen { + return nil, errUnsupportedValue(code, ptr) + } + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, ptr) + c := code.Jmp.Code + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += code.Jmp.CurLen * uintptrSize + oldBaseIndent := ctx.BaseIndent + indentDiffFromTop := c.Indent - 1 + ctx.BaseIndent += code.Indent - indentDiffFromTop + + newLen := offsetNum + code.Jmp.CurLen + code.Jmp.NextLen + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + store(ctxptr, c.Idx, ptr) + store(ctxptr, c.End.Next.Idx, oldOffset) + store(ctxptr, c.End.Next.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, c.End.Next, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpRecursiveEnd: + recursiveLevel-- + + // restore ctxptr + restoreIndent(ctx, code, ctxptr) + offset := load(ctxptr, code.Idx) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpStructPtrHead: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHead: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if len(code.Key) > 0 { + if (code.Flags&encoder.IsTaggedKeyFlags) != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + } + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if p == 0 || (ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + u64 := ptrToUint64(p, code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p+uintptr(code.Offset))))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + } + case encoder.OpStructPtrHeadNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructPtrHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadArray, encoder.OpStructPtrHeadSlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadArray, encoder.OpStructHeadSlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyArray: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyArray: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptySlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptySlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadArrayPtr, encoder.OpStructPtrHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadArrayPtr, encoder.OpStructHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyArrayPtr, encoder.OpStructPtrHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyArrayPtr, encoder.OpStructHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + if maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalJSON { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalJSON { + p = ptrToPtr(p) + } + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructField: + if code.Flags&encoder.IsTaggedKeyFlags != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + p := load(ctxptr, code.Idx) + uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmpty: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringString: + p := load(ctxptr, code.Idx) + s := ptrToString(p + uintptr(code.Offset)) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldMarshalJSON: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalJSONPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldMarshalText: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalTextPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldArrayPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArrayPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldSlice: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlice: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldSlicePtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldMap: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMap: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 || maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldMapPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldStruct: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyStruct: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructEnd: + b = appendStructEndSkipLast(ctx, code, b) + code = code.Next + case encoder.OpStructEndInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendStructEnd(ctx, code, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + s := ptrToString(p + uintptr(code.Offset)) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytesPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + code = code.Next + case encoder.OpStructEndOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpEnd: + goto END + } + } +END: + return b, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/debug_vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/debug_vm.go new file mode 100644 index 0000000..dd4cd48 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/debug_vm.go @@ -0,0 +1,35 @@ +package vm_color_indent + +import ( + "fmt" + + "github.com/goccy/go-json/internal/encoder" +) + +func DebugRun(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + + defer func() { + if err := recover(); err != nil { + w := ctx.Option.DebugOut + fmt.Fprintln(w, "=============[DEBUG]===============") + fmt.Fprintln(w, "* [TYPE]") + fmt.Fprintln(w, codeSet.Type) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [ALL OPCODE]") + fmt.Fprintln(w, code.Dump()) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [CONTEXT]") + fmt.Fprintf(w, "%+v\n", ctx) + fmt.Fprintln(w, "===================================") + panic(err) + } + }() + + return Run(ctx, b, codeSet) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/util.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/util.go new file mode 100644 index 0000000..2395abe --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/util.go @@ -0,0 +1,297 @@ +package vm_color_indent + +import ( + "encoding/json" + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +const uintptrSize = 4 << (^uintptr(0) >> 63) + +var ( + appendIndent = encoder.AppendIndent + appendStructEnd = encoder.AppendStructEndIndent + errUnsupportedValue = encoder.ErrUnsupportedValue + errUnsupportedFloat = encoder.ErrUnsupportedFloat + mapiterinit = encoder.MapIterInit + mapiterkey = encoder.MapIterKey + mapitervalue = encoder.MapIterValue + mapiternext = encoder.MapIterNext + maplen = encoder.MapLen +) + +type emptyInterface struct { + typ *runtime.Type + ptr unsafe.Pointer +} + +type nonEmptyInterface struct { + itab *struct { + ityp *runtime.Type // static interface type + typ *runtime.Type // dynamic concrete type + // unused fields... + } + ptr unsafe.Pointer +} + +func errUnimplementedOp(op encoder.OpType) error { + return fmt.Errorf("encoder (indent): opcode %s has not been implemented", op) +} + +func load(base uintptr, idx uint32) uintptr { + addr := base + uintptr(idx) + return **(**uintptr)(unsafe.Pointer(&addr)) +} + +func store(base uintptr, idx uint32, p uintptr) { + addr := base + uintptr(idx) + **(**uintptr)(unsafe.Pointer(&addr)) = p +} + +func loadNPtr(base uintptr, idx uint32, ptrNum uint8) uintptr { + addr := base + uintptr(idx) + p := **(**uintptr)(unsafe.Pointer(&addr)) + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUint64(p uintptr, bitSize uint8) uint64 { + switch bitSize { + case 8: + return (uint64)(**(**uint8)(unsafe.Pointer(&p))) + case 16: + return (uint64)(**(**uint16)(unsafe.Pointer(&p))) + case 32: + return (uint64)(**(**uint32)(unsafe.Pointer(&p))) + case 64: + return **(**uint64)(unsafe.Pointer(&p)) + } + return 0 +} + +func ptrToFloat32(p uintptr) float32 { return **(**float32)(unsafe.Pointer(&p)) } +func ptrToFloat64(p uintptr) float64 { return **(**float64)(unsafe.Pointer(&p)) } +func ptrToBool(p uintptr) bool { return **(**bool)(unsafe.Pointer(&p)) } +func ptrToBytes(p uintptr) []byte { return **(**[]byte)(unsafe.Pointer(&p)) } +func ptrToNumber(p uintptr) json.Number { return **(**json.Number)(unsafe.Pointer(&p)) } +func ptrToString(p uintptr) string { return **(**string)(unsafe.Pointer(&p)) } +func ptrToSlice(p uintptr) *runtime.SliceHeader { return *(**runtime.SliceHeader)(unsafe.Pointer(&p)) } +func ptrToPtr(p uintptr) uintptr { + return uintptr(**(**unsafe.Pointer)(unsafe.Pointer(&p))) +} +func ptrToNPtr(p uintptr, ptrNum uint8) uintptr { + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUnsafePtr(p uintptr) unsafe.Pointer { + return *(*unsafe.Pointer)(unsafe.Pointer(&p)) +} +func ptrToInterface(code *encoder.Opcode, p uintptr) interface{} { + return *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: code.Type, + ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)), + })) +} + +func appendInt(ctx *encoder.RuntimeContext, b []byte, p uintptr, code *encoder.Opcode) []byte { + format := ctx.Option.ColorScheme.Int + b = append(b, format.Header...) + b = encoder.AppendInt(ctx, b, p, code) + return append(b, format.Footer...) +} + +func appendUint(ctx *encoder.RuntimeContext, b []byte, p uintptr, code *encoder.Opcode) []byte { + format := ctx.Option.ColorScheme.Uint + b = append(b, format.Header...) + b = encoder.AppendUint(ctx, b, p, code) + return append(b, format.Footer...) +} + +func appendFloat32(ctx *encoder.RuntimeContext, b []byte, v float32) []byte { + format := ctx.Option.ColorScheme.Float + b = append(b, format.Header...) + b = encoder.AppendFloat32(ctx, b, v) + return append(b, format.Footer...) +} + +func appendFloat64(ctx *encoder.RuntimeContext, b []byte, v float64) []byte { + format := ctx.Option.ColorScheme.Float + b = append(b, format.Header...) + b = encoder.AppendFloat64(ctx, b, v) + return append(b, format.Footer...) +} + +func appendString(ctx *encoder.RuntimeContext, b []byte, v string) []byte { + format := ctx.Option.ColorScheme.String + b = append(b, format.Header...) + b = encoder.AppendString(ctx, b, v) + return append(b, format.Footer...) +} + +func appendByteSlice(ctx *encoder.RuntimeContext, b []byte, src []byte) []byte { + format := ctx.Option.ColorScheme.Binary + b = append(b, format.Header...) + b = encoder.AppendByteSlice(ctx, b, src) + return append(b, format.Footer...) +} + +func appendNumber(ctx *encoder.RuntimeContext, b []byte, n json.Number) ([]byte, error) { + format := ctx.Option.ColorScheme.Int + b = append(b, format.Header...) + bb, err := encoder.AppendNumber(ctx, b, n) + if err != nil { + return nil, err + } + return append(bb, format.Footer...), nil +} + +func appendBool(ctx *encoder.RuntimeContext, b []byte, v bool) []byte { + format := ctx.Option.ColorScheme.Bool + b = append(b, format.Header...) + if v { + b = append(b, "true"...) + } else { + b = append(b, "false"...) + } + return append(b, format.Footer...) +} + +func appendNull(ctx *encoder.RuntimeContext, b []byte) []byte { + format := ctx.Option.ColorScheme.Null + b = append(b, format.Header...) + b = append(b, "null"...) + return append(b, format.Footer...) +} + +func appendComma(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, ',', '\n') +} + +func appendNullComma(ctx *encoder.RuntimeContext, b []byte) []byte { + format := ctx.Option.ColorScheme.Null + b = append(b, format.Header...) + b = append(b, "null"...) + return append(append(b, format.Footer...), ',', '\n') +} + +func appendColon(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b[:len(b)-2], ':', ' ') +} + +func appendMapKeyValue(ctx *encoder.RuntimeContext, code *encoder.Opcode, b, key, value []byte) []byte { + b = appendIndent(ctx, b, code.Indent+1) + b = append(b, key...) + b[len(b)-2] = ':' + b[len(b)-1] = ' ' + return append(b, value...) +} + +func appendMapEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = b[:len(b)-2] + b = append(b, '\n') + b = appendIndent(ctx, b, code.Indent) + return append(b, '}', ',', '\n') +} + +func appendArrayHead(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = append(b, '[', '\n') + return appendIndent(ctx, b, code.Indent+1) +} + +func appendArrayEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = b[:len(b)-2] + b = append(b, '\n') + b = appendIndent(ctx, b, code.Indent) + return append(b, ']', ',', '\n') +} + +func appendEmptyArray(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '[', ']', ',', '\n') +} + +func appendEmptyObject(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{', '}', ',', '\n') +} + +func appendObjectEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + // replace comma to newline + b[last-1] = '\n' + b = appendIndent(ctx, b[:last], code.Indent) + return append(b, '}', ',', '\n') +} + +func appendMarshalJSON(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + return encoder.AppendMarshalJSONIndent(ctx, code, b, v) +} + +func appendMarshalText(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + format := ctx.Option.ColorScheme.String + b = append(b, format.Header...) + bb, err := encoder.AppendMarshalTextIndent(ctx, code, b, v) + if err != nil { + return nil, err + } + return append(bb, format.Footer...), nil +} + +func appendStructHead(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{', '\n') +} + +func appendStructKey(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = appendIndent(ctx, b, code.Indent) + + format := ctx.Option.ColorScheme.ObjectKey + b = append(b, format.Header...) + b = append(b, code.Key[:len(code.Key)-1]...) + b = append(b, format.Footer...) + + return append(b, ':', ' ') +} + +func appendStructEndSkipLast(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + if b[last-1] == '{' { + b[last] = '}' + } else { + if b[last] == '\n' { + // to remove ',' and '\n' characters + b = b[:len(b)-2] + } + b = append(b, '\n') + b = appendIndent(ctx, b, code.Indent-1) + b = append(b, '}') + } + return appendComma(ctx, b) +} + +func restoreIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, ctxptr uintptr) { + ctx.BaseIndent = uint32(load(ctxptr, code.Length)) +} + +func storeIndent(ctxptr uintptr, code *encoder.Opcode, indent uintptr) { + store(ctxptr, code.Length, indent) +} + +func appendArrayElemIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + return appendIndent(ctx, b, code.Indent+1) +} + +func appendMapKeyIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + return appendIndent(ctx, b, code.Indent) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/vm.go new file mode 100644 index 0000000..3b4e22e --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/vm.go @@ -0,0 +1,4859 @@ +// Code generated by internal/cmd/generator. DO NOT EDIT! +package vm_color_indent + +import ( + "math" + "reflect" + "sort" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +func Run(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + recursiveLevel := 0 + ptrOffset := uintptr(0) + ctxptr := ctx.Ptr() + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + + for { + switch code.Op { + default: + return nil, errUnimplementedOp(code.Op) + case encoder.OpPtr: + p := load(ctxptr, code.Idx) + code = code.Next + store(ctxptr, code.Idx, ptrToPtr(p)) + case encoder.OpIntPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInt: + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpUint: + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpIntString: + b = append(b, '"') + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintString: + b = append(b, '"') + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat32Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat32: + b = appendFloat32(ctx, b, ptrToFloat32(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat64Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat64: + v := ptrToFloat64(load(ctxptr, code.Idx)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStringPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpString: + b = appendString(ctx, b, ptrToString(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBoolPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBool: + b = appendBool(ctx, b, ptrToBool(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBytesPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBytes: + b = appendByteSlice(ctx, b, ptrToBytes(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpNumberPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpNumber: + bb, err := appendNumber(ctx, b, ptrToNumber(load(ctxptr, code.Idx))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpInterfacePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInterface: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if p == seen { + return nil, errUnsupportedValue(code, p) + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, p) + var ( + typ *runtime.Type + ifacePtr unsafe.Pointer + ) + up := ptrToUnsafePtr(p) + if code.Flags&encoder.NonEmptyInterfaceFlags != 0 { + iface := (*nonEmptyInterface)(up) + ifacePtr = iface.ptr + if iface.itab != nil { + typ = iface.itab.typ + } + } else { + iface := (*emptyInterface)(up) + ifacePtr = iface.ptr + typ = iface.typ + } + if ifacePtr == nil { + isDirectedNil := typ != nil && typ.Kind() == reflect.Struct && !runtime.IfaceIndir(typ) + if !isDirectedNil { + b = appendNullComma(ctx, b) + code = code.Next + break + } + } + ctx.KeepRefs = append(ctx.KeepRefs, up) + ifaceCodeSet, err := encoder.CompileToGetCodeSet(ctx, uintptr(unsafe.Pointer(typ))) + if err != nil { + return nil, err + } + + totalLength := uintptr(code.Length) + 3 + nextTotalLength := uintptr(ifaceCodeSet.CodeLength) + 3 + + var c *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + c = ifaceCodeSet.InterfaceEscapeKeyCode + } else { + c = ifaceCodeSet.InterfaceNoescapeKeyCode + } + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += totalLength * uintptrSize + oldBaseIndent := ctx.BaseIndent + ctx.BaseIndent += code.Indent + + newLen := offsetNum + totalLength + nextTotalLength + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + end := ifaceCodeSet.EndCode + store(ctxptr, c.Idx, uintptr(ifacePtr)) + store(ctxptr, end.Idx, oldOffset) + store(ctxptr, end.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, end, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpInterfaceEnd: + recursiveLevel-- + + // restore ctxptr + offset := load(ctxptr, code.Idx) + restoreIndent(ctx, code, ctxptr) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + b = append(b, `""`...) + b = appendComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpSlicePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpSlice: + p := load(ctxptr, code.Idx) + slice := ptrToSlice(p) + if p == 0 || slice.Data == nil { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.ElemIdx, 0) + store(ctxptr, code.Length, uintptr(slice.Len)) + store(ctxptr, code.Idx, uintptr(slice.Data)) + if slice.Len > 0 { + b = appendArrayHead(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, uintptr(slice.Data)) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpSliceElem: + idx := load(ctxptr, code.ElemIdx) + length := load(ctxptr, code.Length) + idx++ + if idx < length { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + data := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, data+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpArrayPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpArray: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + if code.Length > 0 { + b = appendArrayHead(ctx, code, b) + store(ctxptr, code.ElemIdx, 0) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpArrayElem: + idx := load(ctxptr, code.ElemIdx) + idx++ + if idx < uintptr(code.Length) { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + p := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, p+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpMapPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpMap: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + uptr := ptrToUnsafePtr(p) + mlen := maplen(uptr) + if mlen <= 0 { + b = appendEmptyObject(ctx, b) + code = code.End.Next + break + } + b = appendStructHead(ctx, b) + unorderedMap := (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 + mapCtx := encoder.NewMapContext(mlen, unorderedMap) + mapiterinit(code.Type, uptr, &mapCtx.Iter) + store(ctxptr, code.Idx, uintptr(unsafe.Pointer(mapCtx))) + ctx.KeepRefs = append(ctx.KeepRefs, unsafe.Pointer(mapCtx)) + if unorderedMap { + b = appendMapKeyIndent(ctx, code.Next, b) + } else { + mapCtx.Start = len(b) + mapCtx.First = len(b) + } + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + case encoder.OpMapKey: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + idx := mapCtx.Idx + idx++ + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + if idx < mapCtx.Len { + b = appendMapKeyIndent(ctx, code, b) + mapCtx.Idx = int(idx) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + b = appendObjectEnd(ctx, code, b) + encoder.ReleaseMapContext(mapCtx) + code = code.End.Next + } + } else { + mapCtx.Slice.Items[mapCtx.Idx].Value = b[mapCtx.Start:len(b)] + if idx < mapCtx.Len { + mapCtx.Idx = int(idx) + mapCtx.Start = len(b) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + code = code.End + } + } + case encoder.OpMapValue: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + b = appendColon(ctx, b) + } else { + mapCtx.Slice.Items[mapCtx.Idx].Key = b[mapCtx.Start:len(b)] + mapCtx.Start = len(b) + } + value := mapitervalue(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(value)) + mapiternext(&mapCtx.Iter) + code = code.Next + case encoder.OpMapEnd: + // this operation only used by sorted map. + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + sort.Sort(mapCtx.Slice) + buf := mapCtx.Buf + for _, item := range mapCtx.Slice.Items { + buf = appendMapKeyValue(ctx, code, buf, item.Key, item.Value) + } + buf = appendMapEnd(ctx, code, buf) + b = b[:mapCtx.First] + b = append(b, buf...) + mapCtx.Buf = buf + encoder.ReleaseMapContext(mapCtx) + code = code.Next + case encoder.OpRecursivePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpRecursive: + ptr := load(ctxptr, code.Idx) + if ptr != 0 { + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if ptr == seen { + return nil, errUnsupportedValue(code, ptr) + } + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, ptr) + c := code.Jmp.Code + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += code.Jmp.CurLen * uintptrSize + oldBaseIndent := ctx.BaseIndent + indentDiffFromTop := c.Indent - 1 + ctx.BaseIndent += code.Indent - indentDiffFromTop + + newLen := offsetNum + code.Jmp.CurLen + code.Jmp.NextLen + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + store(ctxptr, c.Idx, ptr) + store(ctxptr, c.End.Next.Idx, oldOffset) + store(ctxptr, c.End.Next.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, c.End.Next, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpRecursiveEnd: + recursiveLevel-- + + // restore ctxptr + restoreIndent(ctx, code, ctxptr) + offset := load(ctxptr, code.Idx) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpStructPtrHead: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHead: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if len(code.Key) > 0 { + if (code.Flags&encoder.IsTaggedKeyFlags) != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + } + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if p == 0 || (ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + u64 := ptrToUint64(p, code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p+uintptr(code.Offset))))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + } + case encoder.OpStructPtrHeadNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructPtrHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadArray, encoder.OpStructPtrHeadSlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadArray, encoder.OpStructHeadSlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyArray: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyArray: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptySlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptySlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadArrayPtr, encoder.OpStructPtrHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadArrayPtr, encoder.OpStructHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyArrayPtr, encoder.OpStructPtrHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyArrayPtr, encoder.OpStructHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + if maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalJSON { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalJSON { + p = ptrToPtr(p) + } + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructField: + if code.Flags&encoder.IsTaggedKeyFlags != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + p := load(ctxptr, code.Idx) + uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmpty: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringString: + p := load(ctxptr, code.Idx) + s := ptrToString(p + uintptr(code.Offset)) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldMarshalJSON: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalJSONPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldMarshalText: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalTextPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldArrayPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArrayPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldSlice: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlice: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldSlicePtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldMap: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMap: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 || maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldMapPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldStruct: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyStruct: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructEnd: + b = appendStructEndSkipLast(ctx, code, b) + code = code.Next + case encoder.OpStructEndInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendStructEnd(ctx, code, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + s := ptrToString(p + uintptr(code.Offset)) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytesPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + code = code.Next + case encoder.OpStructEndOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpEnd: + goto END + } + } +END: + return b, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/debug_vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/debug_vm.go new file mode 100644 index 0000000..9939538 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/debug_vm.go @@ -0,0 +1,35 @@ +package vm_indent + +import ( + "fmt" + + "github.com/goccy/go-json/internal/encoder" +) + +func DebugRun(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + + defer func() { + if err := recover(); err != nil { + w := ctx.Option.DebugOut + fmt.Fprintln(w, "=============[DEBUG]===============") + fmt.Fprintln(w, "* [TYPE]") + fmt.Fprintln(w, codeSet.Type) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [ALL OPCODE]") + fmt.Fprintln(w, code.Dump()) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [CONTEXT]") + fmt.Fprintf(w, "%+v\n", ctx) + fmt.Fprintln(w, "===================================") + panic(err) + } + }() + + return Run(ctx, b, codeSet) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/hack.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/hack.go new file mode 100644 index 0000000..9e245bf --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/hack.go @@ -0,0 +1,9 @@ +package vm_indent + +import ( + // HACK: compile order + // `vm`, `vm_indent`, `vm_color`, `vm_color_indent` packages uses a lot of memory to compile, + // so forcibly make dependencies and avoid compiling in concurrent. + // dependency order: vm => vm_indent => vm_color => vm_color_indent + _ "github.com/goccy/go-json/internal/encoder/vm_color" +) diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/util.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/util.go new file mode 100644 index 0000000..6cb745e --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/util.go @@ -0,0 +1,230 @@ +package vm_indent + +import ( + "encoding/json" + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +const uintptrSize = 4 << (^uintptr(0) >> 63) + +var ( + appendInt = encoder.AppendInt + appendUint = encoder.AppendUint + appendFloat32 = encoder.AppendFloat32 + appendFloat64 = encoder.AppendFloat64 + appendString = encoder.AppendString + appendByteSlice = encoder.AppendByteSlice + appendNumber = encoder.AppendNumber + appendStructEnd = encoder.AppendStructEndIndent + appendIndent = encoder.AppendIndent + errUnsupportedValue = encoder.ErrUnsupportedValue + errUnsupportedFloat = encoder.ErrUnsupportedFloat + mapiterinit = encoder.MapIterInit + mapiterkey = encoder.MapIterKey + mapitervalue = encoder.MapIterValue + mapiternext = encoder.MapIterNext + maplen = encoder.MapLen +) + +type emptyInterface struct { + typ *runtime.Type + ptr unsafe.Pointer +} + +type nonEmptyInterface struct { + itab *struct { + ityp *runtime.Type // static interface type + typ *runtime.Type // dynamic concrete type + // unused fields... + } + ptr unsafe.Pointer +} + +func errUnimplementedOp(op encoder.OpType) error { + return fmt.Errorf("encoder (indent): opcode %s has not been implemented", op) +} + +func load(base uintptr, idx uint32) uintptr { + addr := base + uintptr(idx) + return **(**uintptr)(unsafe.Pointer(&addr)) +} + +func store(base uintptr, idx uint32, p uintptr) { + addr := base + uintptr(idx) + **(**uintptr)(unsafe.Pointer(&addr)) = p +} + +func loadNPtr(base uintptr, idx uint32, ptrNum uint8) uintptr { + addr := base + uintptr(idx) + p := **(**uintptr)(unsafe.Pointer(&addr)) + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUint64(p uintptr, bitSize uint8) uint64 { + switch bitSize { + case 8: + return (uint64)(**(**uint8)(unsafe.Pointer(&p))) + case 16: + return (uint64)(**(**uint16)(unsafe.Pointer(&p))) + case 32: + return (uint64)(**(**uint32)(unsafe.Pointer(&p))) + case 64: + return **(**uint64)(unsafe.Pointer(&p)) + } + return 0 +} +func ptrToFloat32(p uintptr) float32 { return **(**float32)(unsafe.Pointer(&p)) } +func ptrToFloat64(p uintptr) float64 { return **(**float64)(unsafe.Pointer(&p)) } +func ptrToBool(p uintptr) bool { return **(**bool)(unsafe.Pointer(&p)) } +func ptrToBytes(p uintptr) []byte { return **(**[]byte)(unsafe.Pointer(&p)) } +func ptrToNumber(p uintptr) json.Number { return **(**json.Number)(unsafe.Pointer(&p)) } +func ptrToString(p uintptr) string { return **(**string)(unsafe.Pointer(&p)) } +func ptrToSlice(p uintptr) *runtime.SliceHeader { return *(**runtime.SliceHeader)(unsafe.Pointer(&p)) } +func ptrToPtr(p uintptr) uintptr { + return uintptr(**(**unsafe.Pointer)(unsafe.Pointer(&p))) +} +func ptrToNPtr(p uintptr, ptrNum uint8) uintptr { + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUnsafePtr(p uintptr) unsafe.Pointer { + return *(*unsafe.Pointer)(unsafe.Pointer(&p)) +} +func ptrToInterface(code *encoder.Opcode, p uintptr) interface{} { + return *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: code.Type, + ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)), + })) +} + +func appendBool(_ *encoder.RuntimeContext, b []byte, v bool) []byte { + if v { + return append(b, "true"...) + } + return append(b, "false"...) +} + +func appendNull(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, "null"...) +} + +func appendComma(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, ',', '\n') +} + +func appendNullComma(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, "null,\n"...) +} + +func appendColon(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b[:len(b)-2], ':', ' ') +} + +func appendMapKeyValue(ctx *encoder.RuntimeContext, code *encoder.Opcode, b, key, value []byte) []byte { + b = appendIndent(ctx, b, code.Indent+1) + b = append(b, key...) + b[len(b)-2] = ':' + b[len(b)-1] = ' ' + return append(b, value...) +} + +func appendMapEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = b[:len(b)-2] + b = append(b, '\n') + b = appendIndent(ctx, b, code.Indent) + return append(b, '}', ',', '\n') +} + +func appendArrayHead(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = append(b, '[', '\n') + return appendIndent(ctx, b, code.Indent+1) +} + +func appendArrayEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = b[:len(b)-2] + b = append(b, '\n') + b = appendIndent(ctx, b, code.Indent) + return append(b, ']', ',', '\n') +} + +func appendEmptyArray(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '[', ']', ',', '\n') +} + +func appendEmptyObject(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{', '}', ',', '\n') +} + +func appendObjectEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + // replace comma to newline + b[last-1] = '\n' + b = appendIndent(ctx, b[:last], code.Indent) + return append(b, '}', ',', '\n') +} + +func appendMarshalJSON(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + return encoder.AppendMarshalJSONIndent(ctx, code, b, v) +} + +func appendMarshalText(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + return encoder.AppendMarshalTextIndent(ctx, code, b, v) +} + +func appendStructHead(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{', '\n') +} + +func appendStructKey(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = appendIndent(ctx, b, code.Indent) + b = append(b, code.Key...) + return append(b, ' ') +} + +func appendStructEndSkipLast(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + if b[last-1] == '{' { + b[last] = '}' + } else { + if b[last] == '\n' { + // to remove ',' and '\n' characters + b = b[:len(b)-2] + } + b = append(b, '\n') + b = appendIndent(ctx, b, code.Indent-1) + b = append(b, '}') + } + return appendComma(ctx, b) +} + +func restoreIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, ctxptr uintptr) { + ctx.BaseIndent = uint32(load(ctxptr, code.Length)) +} + +func storeIndent(ctxptr uintptr, code *encoder.Opcode, indent uintptr) { + store(ctxptr, code.Length, indent) +} + +func appendArrayElemIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + return appendIndent(ctx, b, code.Indent+1) +} + +func appendMapKeyIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + return appendIndent(ctx, b, code.Indent) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/vm.go new file mode 100644 index 0000000..836c5c8 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/vm.go @@ -0,0 +1,4859 @@ +// Code generated by internal/cmd/generator. DO NOT EDIT! +package vm_indent + +import ( + "math" + "reflect" + "sort" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +func Run(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + recursiveLevel := 0 + ptrOffset := uintptr(0) + ctxptr := ctx.Ptr() + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + + for { + switch code.Op { + default: + return nil, errUnimplementedOp(code.Op) + case encoder.OpPtr: + p := load(ctxptr, code.Idx) + code = code.Next + store(ctxptr, code.Idx, ptrToPtr(p)) + case encoder.OpIntPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInt: + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpUint: + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpIntString: + b = append(b, '"') + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintString: + b = append(b, '"') + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat32Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat32: + b = appendFloat32(ctx, b, ptrToFloat32(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat64Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat64: + v := ptrToFloat64(load(ctxptr, code.Idx)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStringPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpString: + b = appendString(ctx, b, ptrToString(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBoolPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBool: + b = appendBool(ctx, b, ptrToBool(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBytesPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBytes: + b = appendByteSlice(ctx, b, ptrToBytes(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpNumberPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpNumber: + bb, err := appendNumber(ctx, b, ptrToNumber(load(ctxptr, code.Idx))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpInterfacePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInterface: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if p == seen { + return nil, errUnsupportedValue(code, p) + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, p) + var ( + typ *runtime.Type + ifacePtr unsafe.Pointer + ) + up := ptrToUnsafePtr(p) + if code.Flags&encoder.NonEmptyInterfaceFlags != 0 { + iface := (*nonEmptyInterface)(up) + ifacePtr = iface.ptr + if iface.itab != nil { + typ = iface.itab.typ + } + } else { + iface := (*emptyInterface)(up) + ifacePtr = iface.ptr + typ = iface.typ + } + if ifacePtr == nil { + isDirectedNil := typ != nil && typ.Kind() == reflect.Struct && !runtime.IfaceIndir(typ) + if !isDirectedNil { + b = appendNullComma(ctx, b) + code = code.Next + break + } + } + ctx.KeepRefs = append(ctx.KeepRefs, up) + ifaceCodeSet, err := encoder.CompileToGetCodeSet(ctx, uintptr(unsafe.Pointer(typ))) + if err != nil { + return nil, err + } + + totalLength := uintptr(code.Length) + 3 + nextTotalLength := uintptr(ifaceCodeSet.CodeLength) + 3 + + var c *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + c = ifaceCodeSet.InterfaceEscapeKeyCode + } else { + c = ifaceCodeSet.InterfaceNoescapeKeyCode + } + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += totalLength * uintptrSize + oldBaseIndent := ctx.BaseIndent + ctx.BaseIndent += code.Indent + + newLen := offsetNum + totalLength + nextTotalLength + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + end := ifaceCodeSet.EndCode + store(ctxptr, c.Idx, uintptr(ifacePtr)) + store(ctxptr, end.Idx, oldOffset) + store(ctxptr, end.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, end, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpInterfaceEnd: + recursiveLevel-- + + // restore ctxptr + offset := load(ctxptr, code.Idx) + restoreIndent(ctx, code, ctxptr) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + b = append(b, `""`...) + b = appendComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpSlicePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpSlice: + p := load(ctxptr, code.Idx) + slice := ptrToSlice(p) + if p == 0 || slice.Data == nil { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.ElemIdx, 0) + store(ctxptr, code.Length, uintptr(slice.Len)) + store(ctxptr, code.Idx, uintptr(slice.Data)) + if slice.Len > 0 { + b = appendArrayHead(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, uintptr(slice.Data)) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpSliceElem: + idx := load(ctxptr, code.ElemIdx) + length := load(ctxptr, code.Length) + idx++ + if idx < length { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + data := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, data+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpArrayPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpArray: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + if code.Length > 0 { + b = appendArrayHead(ctx, code, b) + store(ctxptr, code.ElemIdx, 0) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpArrayElem: + idx := load(ctxptr, code.ElemIdx) + idx++ + if idx < uintptr(code.Length) { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + p := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, p+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpMapPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpMap: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + uptr := ptrToUnsafePtr(p) + mlen := maplen(uptr) + if mlen <= 0 { + b = appendEmptyObject(ctx, b) + code = code.End.Next + break + } + b = appendStructHead(ctx, b) + unorderedMap := (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 + mapCtx := encoder.NewMapContext(mlen, unorderedMap) + mapiterinit(code.Type, uptr, &mapCtx.Iter) + store(ctxptr, code.Idx, uintptr(unsafe.Pointer(mapCtx))) + ctx.KeepRefs = append(ctx.KeepRefs, unsafe.Pointer(mapCtx)) + if unorderedMap { + b = appendMapKeyIndent(ctx, code.Next, b) + } else { + mapCtx.Start = len(b) + mapCtx.First = len(b) + } + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + case encoder.OpMapKey: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + idx := mapCtx.Idx + idx++ + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + if idx < mapCtx.Len { + b = appendMapKeyIndent(ctx, code, b) + mapCtx.Idx = int(idx) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + b = appendObjectEnd(ctx, code, b) + encoder.ReleaseMapContext(mapCtx) + code = code.End.Next + } + } else { + mapCtx.Slice.Items[mapCtx.Idx].Value = b[mapCtx.Start:len(b)] + if idx < mapCtx.Len { + mapCtx.Idx = int(idx) + mapCtx.Start = len(b) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + code = code.End + } + } + case encoder.OpMapValue: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + b = appendColon(ctx, b) + } else { + mapCtx.Slice.Items[mapCtx.Idx].Key = b[mapCtx.Start:len(b)] + mapCtx.Start = len(b) + } + value := mapitervalue(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(value)) + mapiternext(&mapCtx.Iter) + code = code.Next + case encoder.OpMapEnd: + // this operation only used by sorted map. + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + sort.Sort(mapCtx.Slice) + buf := mapCtx.Buf + for _, item := range mapCtx.Slice.Items { + buf = appendMapKeyValue(ctx, code, buf, item.Key, item.Value) + } + buf = appendMapEnd(ctx, code, buf) + b = b[:mapCtx.First] + b = append(b, buf...) + mapCtx.Buf = buf + encoder.ReleaseMapContext(mapCtx) + code = code.Next + case encoder.OpRecursivePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpRecursive: + ptr := load(ctxptr, code.Idx) + if ptr != 0 { + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if ptr == seen { + return nil, errUnsupportedValue(code, ptr) + } + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, ptr) + c := code.Jmp.Code + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += code.Jmp.CurLen * uintptrSize + oldBaseIndent := ctx.BaseIndent + indentDiffFromTop := c.Indent - 1 + ctx.BaseIndent += code.Indent - indentDiffFromTop + + newLen := offsetNum + code.Jmp.CurLen + code.Jmp.NextLen + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + store(ctxptr, c.Idx, ptr) + store(ctxptr, c.End.Next.Idx, oldOffset) + store(ctxptr, c.End.Next.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, c.End.Next, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpRecursiveEnd: + recursiveLevel-- + + // restore ctxptr + restoreIndent(ctx, code, ctxptr) + offset := load(ctxptr, code.Idx) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpStructPtrHead: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHead: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if len(code.Key) > 0 { + if (code.Flags&encoder.IsTaggedKeyFlags) != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + } + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if p == 0 || (ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + u64 := ptrToUint64(p, code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p+uintptr(code.Offset))))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + } + case encoder.OpStructPtrHeadNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructPtrHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadArray, encoder.OpStructPtrHeadSlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadArray, encoder.OpStructHeadSlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyArray: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyArray: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptySlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptySlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadArrayPtr, encoder.OpStructPtrHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadArrayPtr, encoder.OpStructHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyArrayPtr, encoder.OpStructPtrHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyArrayPtr, encoder.OpStructHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + if maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalJSON { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalJSON { + p = ptrToPtr(p) + } + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructField: + if code.Flags&encoder.IsTaggedKeyFlags != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + p := load(ctxptr, code.Idx) + uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmpty: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringString: + p := load(ctxptr, code.Idx) + s := ptrToString(p + uintptr(code.Offset)) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldMarshalJSON: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalJSONPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldMarshalText: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalTextPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldArrayPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArrayPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldSlice: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlice: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldSlicePtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldMap: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMap: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 || maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldMapPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldStruct: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyStruct: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructEnd: + b = appendStructEndSkipLast(ctx, code, b) + code = code.Next + case encoder.OpStructEndInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendStructEnd(ctx, code, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + s := ptrToString(p + uintptr(code.Offset)) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytesPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + code = code.Next + case encoder.OpStructEndOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpEnd: + goto END + } + } +END: + return b, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/errors/error.go b/vendor/github.com/goccy/go-json/internal/errors/error.go new file mode 100644 index 0000000..9207d0f --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/errors/error.go @@ -0,0 +1,183 @@ +package errors + +import ( + "fmt" + "reflect" + "strconv" +) + +type InvalidUTF8Error struct { + S string // the whole string value that caused the error +} + +func (e *InvalidUTF8Error) Error() string { + return fmt.Sprintf("json: invalid UTF-8 in string: %s", strconv.Quote(e.S)) +} + +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return fmt.Sprintf("json: Unmarshal(non-pointer %s)", e.Type) + } + return fmt.Sprintf("json: Unmarshal(nil %s)", e.Type) +} + +// A MarshalerError represents an error from calling a MarshalJSON or MarshalText method. +type MarshalerError struct { + Type reflect.Type + Err error + sourceFunc string +} + +func (e *MarshalerError) Error() string { + srcFunc := e.sourceFunc + if srcFunc == "" { + srcFunc = "MarshalJSON" + } + return fmt.Sprintf("json: error calling %s for type %s: %s", srcFunc, e.Type, e.Err.Error()) +} + +// Unwrap returns the underlying error. +func (e *MarshalerError) Unwrap() error { return e.Err } + +// A SyntaxError is a description of a JSON syntax error. +type SyntaxError struct { + msg string // description of error + Offset int64 // error occurred after reading Offset bytes +} + +func (e *SyntaxError) Error() string { return e.msg } + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// +// Deprecated: No longer used; kept for compatibility. +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return fmt.Sprintf("json: cannot unmarshal object key %s into unexported field %s of type %s", + strconv.Quote(e.Key), e.Field.Name, e.Type.String(), + ) +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes + Struct string // name of the struct type containing the field + Field string // the full path from root node to the field +} + +func (e *UnmarshalTypeError) Error() string { + if e.Struct != "" || e.Field != "" { + return fmt.Sprintf("json: cannot unmarshal %s into Go struct field %s.%s of type %s", + e.Value, e.Struct, e.Field, e.Type, + ) + } + return fmt.Sprintf("json: cannot unmarshal %s into Go value of type %s", e.Value, e.Type) +} + +// An UnsupportedTypeError is returned by Marshal when attempting +// to encode an unsupported value type. +type UnsupportedTypeError struct { + Type reflect.Type +} + +func (e *UnsupportedTypeError) Error() string { + return fmt.Sprintf("json: unsupported type: %s", e.Type) +} + +type UnsupportedValueError struct { + Value reflect.Value + Str string +} + +func (e *UnsupportedValueError) Error() string { + return fmt.Sprintf("json: unsupported value: %s", e.Str) +} + +func ErrSyntax(msg string, offset int64) *SyntaxError { + return &SyntaxError{msg: msg, Offset: offset} +} + +func ErrMarshaler(typ reflect.Type, err error, msg string) *MarshalerError { + return &MarshalerError{ + Type: typ, + Err: err, + sourceFunc: msg, + } +} + +func ErrExceededMaxDepth(c byte, cursor int64) *SyntaxError { + return &SyntaxError{ + msg: fmt.Sprintf(`invalid character "%c" exceeded max depth`, c), + Offset: cursor, + } +} + +func ErrNotAtBeginningOfValue(cursor int64) *SyntaxError { + return &SyntaxError{msg: "not at beginning of value", Offset: cursor} +} + +func ErrUnexpectedEndOfJSON(msg string, cursor int64) *SyntaxError { + return &SyntaxError{ + msg: fmt.Sprintf("json: %s unexpected end of JSON input", msg), + Offset: cursor, + } +} + +func ErrExpected(msg string, cursor int64) *SyntaxError { + return &SyntaxError{msg: fmt.Sprintf("expected %s", msg), Offset: cursor} +} + +func ErrInvalidCharacter(c byte, context string, cursor int64) *SyntaxError { + if c == 0 { + return &SyntaxError{ + msg: fmt.Sprintf("json: invalid character as %s", context), + Offset: cursor, + } + } + return &SyntaxError{ + msg: fmt.Sprintf("json: invalid character %c as %s", c, context), + Offset: cursor, + } +} + +func ErrInvalidBeginningOfValue(c byte, cursor int64) *SyntaxError { + return &SyntaxError{ + msg: fmt.Sprintf("invalid character '%c' looking for beginning of value", c), + Offset: cursor, + } +} + +type PathError struct { + msg string +} + +func (e *PathError) Error() string { + return fmt.Sprintf("json: invalid path format: %s", e.msg) +} + +func ErrInvalidPath(msg string, args ...interface{}) *PathError { + if len(args) != 0 { + return &PathError{msg: fmt.Sprintf(msg, args...)} + } + return &PathError{msg: msg} +} + +func ErrEmptyPath() *PathError { + return &PathError{msg: "path is empty"} +} diff --git a/vendor/github.com/goccy/go-json/internal/runtime/rtype.go b/vendor/github.com/goccy/go-json/internal/runtime/rtype.go new file mode 100644 index 0000000..37cfe35 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/runtime/rtype.go @@ -0,0 +1,262 @@ +package runtime + +import ( + "reflect" + "unsafe" +) + +// Type representing reflect.rtype for noescape trick +type Type struct{} + +//go:linkname rtype_Align reflect.(*rtype).Align +//go:noescape +func rtype_Align(*Type) int + +func (t *Type) Align() int { + return rtype_Align(t) +} + +//go:linkname rtype_FieldAlign reflect.(*rtype).FieldAlign +//go:noescape +func rtype_FieldAlign(*Type) int + +func (t *Type) FieldAlign() int { + return rtype_FieldAlign(t) +} + +//go:linkname rtype_Method reflect.(*rtype).Method +//go:noescape +func rtype_Method(*Type, int) reflect.Method + +func (t *Type) Method(a0 int) reflect.Method { + return rtype_Method(t, a0) +} + +//go:linkname rtype_MethodByName reflect.(*rtype).MethodByName +//go:noescape +func rtype_MethodByName(*Type, string) (reflect.Method, bool) + +func (t *Type) MethodByName(a0 string) (reflect.Method, bool) { + return rtype_MethodByName(t, a0) +} + +//go:linkname rtype_NumMethod reflect.(*rtype).NumMethod +//go:noescape +func rtype_NumMethod(*Type) int + +func (t *Type) NumMethod() int { + return rtype_NumMethod(t) +} + +//go:linkname rtype_Name reflect.(*rtype).Name +//go:noescape +func rtype_Name(*Type) string + +func (t *Type) Name() string { + return rtype_Name(t) +} + +//go:linkname rtype_PkgPath reflect.(*rtype).PkgPath +//go:noescape +func rtype_PkgPath(*Type) string + +func (t *Type) PkgPath() string { + return rtype_PkgPath(t) +} + +//go:linkname rtype_Size reflect.(*rtype).Size +//go:noescape +func rtype_Size(*Type) uintptr + +func (t *Type) Size() uintptr { + return rtype_Size(t) +} + +//go:linkname rtype_String reflect.(*rtype).String +//go:noescape +func rtype_String(*Type) string + +func (t *Type) String() string { + return rtype_String(t) +} + +//go:linkname rtype_Kind reflect.(*rtype).Kind +//go:noescape +func rtype_Kind(*Type) reflect.Kind + +func (t *Type) Kind() reflect.Kind { + return rtype_Kind(t) +} + +//go:linkname rtype_Implements reflect.(*rtype).Implements +//go:noescape +func rtype_Implements(*Type, reflect.Type) bool + +func (t *Type) Implements(u reflect.Type) bool { + return rtype_Implements(t, u) +} + +//go:linkname rtype_AssignableTo reflect.(*rtype).AssignableTo +//go:noescape +func rtype_AssignableTo(*Type, reflect.Type) bool + +func (t *Type) AssignableTo(u reflect.Type) bool { + return rtype_AssignableTo(t, u) +} + +//go:linkname rtype_ConvertibleTo reflect.(*rtype).ConvertibleTo +//go:noescape +func rtype_ConvertibleTo(*Type, reflect.Type) bool + +func (t *Type) ConvertibleTo(u reflect.Type) bool { + return rtype_ConvertibleTo(t, u) +} + +//go:linkname rtype_Comparable reflect.(*rtype).Comparable +//go:noescape +func rtype_Comparable(*Type) bool + +func (t *Type) Comparable() bool { + return rtype_Comparable(t) +} + +//go:linkname rtype_Bits reflect.(*rtype).Bits +//go:noescape +func rtype_Bits(*Type) int + +func (t *Type) Bits() int { + return rtype_Bits(t) +} + +//go:linkname rtype_ChanDir reflect.(*rtype).ChanDir +//go:noescape +func rtype_ChanDir(*Type) reflect.ChanDir + +func (t *Type) ChanDir() reflect.ChanDir { + return rtype_ChanDir(t) +} + +//go:linkname rtype_IsVariadic reflect.(*rtype).IsVariadic +//go:noescape +func rtype_IsVariadic(*Type) bool + +func (t *Type) IsVariadic() bool { + return rtype_IsVariadic(t) +} + +//go:linkname rtype_Elem reflect.(*rtype).Elem +//go:noescape +func rtype_Elem(*Type) reflect.Type + +func (t *Type) Elem() *Type { + return Type2RType(rtype_Elem(t)) +} + +//go:linkname rtype_Field reflect.(*rtype).Field +//go:noescape +func rtype_Field(*Type, int) reflect.StructField + +func (t *Type) Field(i int) reflect.StructField { + return rtype_Field(t, i) +} + +//go:linkname rtype_FieldByIndex reflect.(*rtype).FieldByIndex +//go:noescape +func rtype_FieldByIndex(*Type, []int) reflect.StructField + +func (t *Type) FieldByIndex(index []int) reflect.StructField { + return rtype_FieldByIndex(t, index) +} + +//go:linkname rtype_FieldByName reflect.(*rtype).FieldByName +//go:noescape +func rtype_FieldByName(*Type, string) (reflect.StructField, bool) + +func (t *Type) FieldByName(name string) (reflect.StructField, bool) { + return rtype_FieldByName(t, name) +} + +//go:linkname rtype_FieldByNameFunc reflect.(*rtype).FieldByNameFunc +//go:noescape +func rtype_FieldByNameFunc(*Type, func(string) bool) (reflect.StructField, bool) + +func (t *Type) FieldByNameFunc(match func(string) bool) (reflect.StructField, bool) { + return rtype_FieldByNameFunc(t, match) +} + +//go:linkname rtype_In reflect.(*rtype).In +//go:noescape +func rtype_In(*Type, int) reflect.Type + +func (t *Type) In(i int) reflect.Type { + return rtype_In(t, i) +} + +//go:linkname rtype_Key reflect.(*rtype).Key +//go:noescape +func rtype_Key(*Type) reflect.Type + +func (t *Type) Key() *Type { + return Type2RType(rtype_Key(t)) +} + +//go:linkname rtype_Len reflect.(*rtype).Len +//go:noescape +func rtype_Len(*Type) int + +func (t *Type) Len() int { + return rtype_Len(t) +} + +//go:linkname rtype_NumField reflect.(*rtype).NumField +//go:noescape +func rtype_NumField(*Type) int + +func (t *Type) NumField() int { + return rtype_NumField(t) +} + +//go:linkname rtype_NumIn reflect.(*rtype).NumIn +//go:noescape +func rtype_NumIn(*Type) int + +func (t *Type) NumIn() int { + return rtype_NumIn(t) +} + +//go:linkname rtype_NumOut reflect.(*rtype).NumOut +//go:noescape +func rtype_NumOut(*Type) int + +func (t *Type) NumOut() int { + return rtype_NumOut(t) +} + +//go:linkname rtype_Out reflect.(*rtype).Out +//go:noescape +func rtype_Out(*Type, int) reflect.Type + +//go:linkname PtrTo reflect.(*rtype).ptrTo +//go:noescape +func PtrTo(*Type) *Type + +func (t *Type) Out(i int) reflect.Type { + return rtype_Out(t, i) +} + +//go:linkname IfaceIndir reflect.ifaceIndir +//go:noescape +func IfaceIndir(*Type) bool + +//go:linkname RType2Type reflect.toType +//go:noescape +func RType2Type(t *Type) reflect.Type + +type emptyInterface struct { + _ *Type + ptr unsafe.Pointer +} + +func Type2RType(t reflect.Type) *Type { + return (*Type)(((*emptyInterface)(unsafe.Pointer(&t))).ptr) +} diff --git a/vendor/github.com/goccy/go-json/internal/runtime/struct_field.go b/vendor/github.com/goccy/go-json/internal/runtime/struct_field.go new file mode 100644 index 0000000..baab0c5 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/runtime/struct_field.go @@ -0,0 +1,91 @@ +package runtime + +import ( + "reflect" + "strings" + "unicode" +) + +func getTag(field reflect.StructField) string { + return field.Tag.Get("json") +} + +func IsIgnoredStructField(field reflect.StructField) bool { + if field.PkgPath != "" { + if field.Anonymous { + t := field.Type + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return true + } + } else { + // private field + return true + } + } + tag := getTag(field) + return tag == "-" +} + +type StructTag struct { + Key string + IsTaggedKey bool + IsOmitEmpty bool + IsString bool + Field reflect.StructField +} + +type StructTags []*StructTag + +func (t StructTags) ExistsKey(key string) bool { + for _, tt := range t { + if tt.Key == key { + return true + } + } + return false +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + case !unicode.IsLetter(c) && !unicode.IsDigit(c): + return false + } + } + return true +} + +func StructTagFromField(field reflect.StructField) *StructTag { + keyName := field.Name + tag := getTag(field) + st := &StructTag{Field: field} + opts := strings.Split(tag, ",") + if len(opts) > 0 { + if opts[0] != "" && isValidTag(opts[0]) { + keyName = opts[0] + st.IsTaggedKey = true + } + } + st.Key = keyName + if len(opts) > 1 { + for _, opt := range opts[1:] { + switch opt { + case "omitempty": + st.IsOmitEmpty = true + case "string": + st.IsString = true + } + } + } + return st +} diff --git a/vendor/github.com/goccy/go-json/internal/runtime/type.go b/vendor/github.com/goccy/go-json/internal/runtime/type.go new file mode 100644 index 0000000..0167cd2 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/runtime/type.go @@ -0,0 +1,100 @@ +package runtime + +import ( + "reflect" + "unsafe" +) + +type SliceHeader struct { + Data unsafe.Pointer + Len int + Cap int +} + +const ( + maxAcceptableTypeAddrRange = 1024 * 1024 * 2 // 2 Mib +) + +type TypeAddr struct { + BaseTypeAddr uintptr + MaxTypeAddr uintptr + AddrRange uintptr + AddrShift uintptr +} + +var ( + typeAddr *TypeAddr + alreadyAnalyzed bool +) + +//go:linkname typelinks reflect.typelinks +func typelinks() ([]unsafe.Pointer, [][]int32) + +//go:linkname rtypeOff reflect.rtypeOff +func rtypeOff(unsafe.Pointer, int32) unsafe.Pointer + +func AnalyzeTypeAddr() *TypeAddr { + defer func() { + alreadyAnalyzed = true + }() + if alreadyAnalyzed { + return typeAddr + } + sections, offsets := typelinks() + if len(sections) != 1 { + return nil + } + if len(offsets) != 1 { + return nil + } + section := sections[0] + offset := offsets[0] + var ( + min uintptr = uintptr(^uint(0)) + max uintptr = 0 + isAligned64 = true + isAligned32 = true + ) + for i := 0; i < len(offset); i++ { + typ := (*Type)(rtypeOff(section, offset[i])) + addr := uintptr(unsafe.Pointer(typ)) + if min > addr { + min = addr + } + if max < addr { + max = addr + } + if typ.Kind() == reflect.Ptr { + addr = uintptr(unsafe.Pointer(typ.Elem())) + if min > addr { + min = addr + } + if max < addr { + max = addr + } + } + isAligned64 = isAligned64 && (addr-min)&63 == 0 + isAligned32 = isAligned32 && (addr-min)&31 == 0 + } + addrRange := max - min + if addrRange == 0 { + return nil + } + var addrShift uintptr + if isAligned64 { + addrShift = 6 + } else if isAligned32 { + addrShift = 5 + } + cacheSize := addrRange >> addrShift + if cacheSize > maxAcceptableTypeAddrRange { + return nil + } + typeAddr = &TypeAddr{ + BaseTypeAddr: min, + MaxTypeAddr: max, + AddrRange: addrRange, + AddrShift: addrShift, + } + return typeAddr +} diff --git a/vendor/github.com/goccy/go-json/json.go b/vendor/github.com/goccy/go-json/json.go new file mode 100644 index 0000000..fb18065 --- /dev/null +++ b/vendor/github.com/goccy/go-json/json.go @@ -0,0 +1,368 @@ +package json + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/goccy/go-json/internal/encoder" +) + +// Marshaler is the interface implemented by types that +// can marshal themselves into valid JSON. +type Marshaler interface { + MarshalJSON() ([]byte, error) +} + +// MarshalerContext is the interface implemented by types that +// can marshal themselves into valid JSON with context.Context. +type MarshalerContext interface { + MarshalJSON(context.Context) ([]byte, error) +} + +// Unmarshaler is the interface implemented by types +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +// +// By convention, to approximate the behavior of Unmarshal itself, +// Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// UnmarshalerContext is the interface implemented by types +// that can unmarshal with context.Context a JSON description of themselves. +type UnmarshalerContext interface { + UnmarshalJSON(context.Context, []byte) error +} + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. If no MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method and encodes the result as a JSON string. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" +// to keep some browsers from misinterpreting JSON output as HTML. +// Ampersand "&" is also escaped to "\u0026" for the same reason. +// This escaping can be disabled using an Encoder that had SetEscapeHTML(false) +// called on it. +// +// Array and slice values encode as JSON arrays, except that +// []byte encodes as a base64-encoded string, and a nil slice +// encodes as the null JSON value. +// +// Struct values encode as JSON objects. +// Each exported struct field becomes a member of the object, using the +// field name as the object key, unless the field is omitted for one of the +// reasons given below. +// +// The encoding of each struct field can be customized by the format string +// stored under the "json" key in the struct field's tag. +// The format string gives the name of the field, possibly followed by a +// comma-separated list of options. The name may be empty in order to +// specify options without overriding the default field name. +// +// The "omitempty" option specifies that the field should be omitted +// from the encoding if the field has an empty value, defined as +// false, 0, a nil pointer, a nil interface value, and any empty array, +// slice, map, or string. +// +// As a special case, if the field tag is "-", the field is always omitted. +// Note that a field with name "-" can still be generated using the tag "-,". +// +// Examples of struct field tags and their meanings: +// +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` +// +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` +// +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` +// +// // Field is ignored by this package. +// Field int `json:"-"` +// +// // Field appears in JSON as key "-". +// Field int `json:"-,"` +// +// The "string" option signals that a field is stored as JSON inside a +// JSON-encoded string. It applies only to fields of string, floating point, +// integer, or boolean types. This extra level of encoding is sometimes used +// when communicating with JavaScript programs: +// +// Int64String int64 `json:",string"` +// +// The key name will be used if it's a non-empty string consisting of +// only Unicode letters, digits, and ASCII punctuation except quotation +// marks, backslash, and comma. +// +// Anonymous struct fields are usually marshaled as if their inner exported fields +// were fields in the outer struct, subject to the usual Go visibility rules amended +// as described in the next paragraph. +// An anonymous struct field with a name given in its JSON tag is treated as +// having that name, rather than being anonymous. +// An anonymous struct field of interface type is treated the same as having +// that type as its name, rather than being anonymous. +// +// The Go visibility rules for struct fields are amended for JSON when +// deciding which field to marshal or unmarshal. If there are +// multiple fields at the same level, and that level is the least +// nested (and would therefore be the nesting level selected by the +// usual Go rules), the following extra rules apply: +// +// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, +// even if there are multiple untagged fields that would otherwise conflict. +// +// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. +// +// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. +// +// Handling of anonymous struct fields is new in Go 1.1. +// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of +// an anonymous struct field in both current and earlier versions, give the field +// a JSON tag of "-". +// +// Map values encode as JSON objects. The map's key type must either be a +// string, an integer type, or implement encoding.TextMarshaler. The map keys +// are sorted and used as JSON object keys by applying the following rules, +// subject to the UTF-8 coercion described for string values above: +// - string keys are used directly +// - encoding.TextMarshalers are marshaled +// - integer keys are converted to strings +// +// Pointer values encode as the value pointed to. +// A nil pointer encodes as the null JSON value. +// +// Interface values encode as the value contained in the interface. +// A nil interface value encodes as the null JSON value. +// +// Channel, complex, and function values cannot be encoded in JSON. +// Attempting to encode such a value causes Marshal to return +// an UnsupportedTypeError. +// +// JSON cannot represent cyclic data structures and Marshal does not +// handle them. Passing cyclic structures to Marshal will result in +// an infinite recursion. +func Marshal(v interface{}) ([]byte, error) { + return MarshalWithOption(v) +} + +// MarshalNoEscape returns the JSON encoding of v and doesn't escape v. +func MarshalNoEscape(v interface{}) ([]byte, error) { + return marshalNoEscape(v) +} + +// MarshalContext returns the JSON encoding of v with context.Context and EncodeOption. +func MarshalContext(ctx context.Context, v interface{}, optFuncs ...EncodeOptionFunc) ([]byte, error) { + return marshalContext(ctx, v, optFuncs...) +} + +// MarshalWithOption returns the JSON encoding of v with EncodeOption. +func MarshalWithOption(v interface{}, optFuncs ...EncodeOptionFunc) ([]byte, error) { + return marshal(v, optFuncs...) +} + +// MarshalIndent is like Marshal but applies Indent to format the output. +// Each JSON element in the output will begin on a new line beginning with prefix +// followed by one or more copies of indent according to the indentation nesting. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return MarshalIndentWithOption(v, prefix, indent) +} + +// MarshalIndentWithOption is like Marshal but applies Indent to format the output with EncodeOption. +func MarshalIndentWithOption(v interface{}, prefix, indent string, optFuncs ...EncodeOptionFunc) ([]byte, error) { + return marshalIndent(v, prefix, indent, optFuncs...) +} + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. If v is nil or not a pointer, +// Unmarshal returns an InvalidUnmarshalError. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a value implementing the Unmarshaler interface, +// Unmarshal calls that value's UnmarshalJSON method, including +// when the input is a JSON null. +// Otherwise, if the value implements encoding.TextUnmarshaler +// and the input is a JSON quoted string, Unmarshal calls that value's +// UnmarshalText method with the unquoted form of the string. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. By +// default, object keys which don't have a corresponding struct field are +// ignored (see Decoder.DisallowUnknownFields for an alternative). +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a map, Unmarshal first establishes a map to +// use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal +// reuses the existing map, keeping existing entries. Unmarshal then stores +// key-value pairs from the JSON object into the map. The map's key type must +// either be any string type, an integer, implement json.Unmarshaler, or +// implement encoding.TextUnmarshaler. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. In any +// case, it's not guaranteed that all the remaining fields following +// the problematic one will be unmarshaled into the target object. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// “not present,” unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +func Unmarshal(data []byte, v interface{}) error { + return unmarshal(data, v) +} + +// UnmarshalContext parses the JSON-encoded data and stores the result +// in the value pointed to by v. If you implement the UnmarshalerContext interface, +// call it with ctx as an argument. +func UnmarshalContext(ctx context.Context, data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { + return unmarshalContext(ctx, data, v) +} + +func UnmarshalWithOption(data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { + return unmarshal(data, v, optFuncs...) +} + +func UnmarshalNoEscape(data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { + return unmarshalNoEscape(data, v, optFuncs...) +} + +// A Token holds a value of one of these types: +// +// Delim, for the four JSON delimiters [ ] { } +// bool, for JSON booleans +// float64, for JSON numbers +// Number, for JSON numbers +// string, for JSON string literals +// nil, for JSON null +type Token = json.Token + +// A Number represents a JSON number literal. +type Number = json.Number + +// RawMessage is a raw encoded JSON value. +// It implements Marshaler and Unmarshaler and can +// be used to delay JSON decoding or precompute a JSON encoding. +type RawMessage = json.RawMessage + +// A Delim is a JSON array or object delimiter, one of [ ] { or }. +type Delim = json.Delim + +// Compact appends to dst the JSON-encoded src with +// insignificant space characters elided. +func Compact(dst *bytes.Buffer, src []byte) error { + return encoder.Compact(dst, src, false) +} + +// Indent appends to dst an indented form of the JSON-encoded src. +// Each element in a JSON object or array begins on a new, +// indented line beginning with prefix followed by one or more +// copies of indent according to the indentation nesting. +// The data appended to dst does not begin with the prefix nor +// any indentation, to make it easier to embed inside other formatted JSON data. +// Although leading space characters (space, tab, carriage return, newline) +// at the beginning of src are dropped, trailing space characters +// at the end of src are preserved and copied to dst. +// For example, if src has no trailing spaces, neither will dst; +// if src ends in a trailing newline, so will dst. +func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error { + return encoder.Indent(dst, src, prefix, indent) +} + +// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 +// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 +// so that the JSON will be safe to embed inside HTML