From 5b141b229b72d4c6cf24d252f4e07f5b56e5ec29 Mon Sep 17 00:00:00 2001
From: Kunju Perath
Date: Tue, 9 Jan 2024 16:19:22 -0500
Subject: [PATCH] move github.com/google/cel-go back to apiserver supported
version of v0.17.7
---
go.mod | 4 +-
go.sum | 8 +-
.../antlr/antlr4/runtime/Go/antlr/v4/LICENSE | 26 +
.../antlr4/runtime/Go/antlr/v4/antlrdoc.go | 68 +
.../antlr4/runtime/Go}/antlr/v4/atn.go | 9 +-
.../antlr4/runtime/Go/antlr/v4/atn_config.go | 303 ++
.../runtime/Go/antlr/v4/atn_config_set.go | 441 +++
.../antlr/v4/atn_deserialization_options.go | 7 +-
.../runtime/Go}/antlr/v4/atn_deserializer.go | 9 +-
.../runtime/Go}/antlr/v4/atn_simulator.go | 17 +-
.../antlr4/runtime/Go}/antlr/v4/atn_state.go | 224 +-
.../antlr4/runtime/Go}/antlr/v4/atn_type.go | 0
.../runtime/Go}/antlr/v4/char_stream.go | 2 +-
.../Go}/antlr/v4/common_token_factory.go | 0
.../Go}/antlr/v4/common_token_stream.go | 39 +-
.../runtime/Go}/antlr/v4/comparators.go | 33 +-
.../antlr4/runtime/Go}/antlr/v4/dfa.go | 47 +-
.../runtime/Go}/antlr/v4/dfa_serializer.go | 2 +-
.../antlr4/runtime/Go}/antlr/v4/dfa_state.go | 29 +-
.../Go}/antlr/v4/diagnostic_error_listener.go | 11 +-
.../runtime/Go}/antlr/v4/error_listener.go | 40 +-
.../runtime/Go}/antlr/v4/error_strategy.go | 450 +--
.../antlr4/runtime/Go}/antlr/v4/errors.go | 73 +-
.../runtime/Go}/antlr/v4/file_stream.go | 46 +-
.../runtime/Go/antlr/v4/input_stream.go | 113 +
.../antlr4/runtime/Go}/antlr/v4/int_stream.go | 0
.../runtime/Go}/antlr/v4/interval_set.go | 60 +-
.../antlr4/runtime/Go/antlr/v4/jcollect.go | 198 ++
.../antlr4/runtime/Go}/antlr/v4/lexer.go | 68 +-
.../runtime/Go}/antlr/v4/lexer_action.go | 100 +-
.../Go}/antlr/v4/lexer_action_executor.go | 61 +-
.../Go}/antlr/v4/lexer_atn_simulator.go | 185 +-
.../runtime/Go}/antlr/v4/ll1_analyzer.go | 62 +-
.../antlr4/runtime/Go}/antlr/v4/parser.go | 160 +-
.../Go}/antlr/v4/parser_atn_simulator.go | 787 +++---
.../Go}/antlr/v4/parser_rule_context.go | 85 +-
.../runtime/Go/antlr/v4/prediction_context.go | 806 ++++++
.../runtime/Go/antlr/v4/prediction_mode.go | 529 ++++
.../antlr4/runtime/Go}/antlr/v4/recognizer.go | 65 +-
.../runtime/Go/antlr/v4/rule_context.go | 114 +
.../runtime/Go}/antlr/v4/semantic_context.go | 33 +-
.../antlr4/runtime/Go}/antlr/v4/token.go | 36 +-
.../runtime/Go}/antlr/v4/token_source.go | 0
.../runtime/Go}/antlr/v4/token_stream.go | 3 +-
.../Go}/antlr/v4/tokenstream_rewriter.go | 221 +-
.../runtime/Go}/antlr/v4/trace_listener.go | 0
.../antlr4/runtime/Go}/antlr/v4/transition.go | 229 +-
.../antlr4/runtime/Go}/antlr/v4/tree.go | 109 +-
.../antlr4/runtime/Go}/antlr/v4/trees.go | 22 +-
.../antlr4/runtime/Go}/antlr/v4/utils.go | 84 +-
.../antlr4/runtime/Go/antlr/v4/utils_set.go | 235 ++
.../github.com/antlr4-go/antlr/v4/.gitignore | 18 -
vendor/github.com/antlr4-go/antlr/v4/LICENSE | 28 -
.../github.com/antlr4-go/antlr/v4/README.md | 54 -
.../github.com/antlr4-go/antlr/v4/antlrdoc.go | 102 -
.../antlr4-go/antlr/v4/atn_config.go | 335 ---
.../antlr4-go/antlr/v4/atn_config_set.go | 301 --
.../antlr4-go/antlr/v4/configuration.go | 214 --
.../antlr4-go/antlr/v4/input_stream.go | 157 --
.../github.com/antlr4-go/antlr/v4/jcollect.go | 685 -----
.../antlr4-go/antlr/v4/nostatistics.go | 47 -
.../antlr4-go/antlr/v4/prediction_context.go | 727 -----
.../antlr/v4/prediction_context_cache.go | 48 -
.../antlr4-go/antlr/v4/prediction_mode.go | 536 ----
.../antlr4-go/antlr/v4/rule_context.go | 40 -
.../antlr4-go/antlr/v4/statistics.go | 281 --
.../antlr4-go/antlr/v4/stats_data.go | 23 -
.../github.com/google/cel-go/cel/BUILD.bazel | 7 -
vendor/github.com/google/cel-go/cel/decls.go | 40 +
vendor/github.com/google/cel-go/cel/env.go | 128 +-
.../github.com/google/cel-go/cel/folding.go | 559 ----
.../github.com/google/cel-go/cel/inlining.go | 240 --
vendor/github.com/google/cel-go/cel/io.go | 36 +-
.../github.com/google/cel-go/cel/library.go | 61 +-
vendor/github.com/google/cel-go/cel/macro.go | 456 +--
.../github.com/google/cel-go/cel/optimizer.go | 482 ----
.../github.com/google/cel-go/cel/options.go | 2 -
.../github.com/google/cel-go/cel/program.go | 60 +-
.../github.com/google/cel-go/cel/validator.go | 51 +-
.../google/cel-go/checker/BUILD.bazel | 1 +
.../google/cel-go/checker/checker.go | 356 ++-
.../github.com/google/cel-go/checker/cost.go | 192 +-
.../google/cel-go/checker/errors.go | 18 +-
.../google/cel-go/checker/printer.go | 34 +-
.../google/cel-go/common/ast/BUILD.bazel | 9 -
.../google/cel-go/common/ast/ast.go | 476 +---
.../google/cel-go/common/ast/conversion.go | 632 -----
.../google/cel-go/common/ast/expr.go | 967 +++----
.../google/cel-go/common/ast/factory.go | 303 --
.../google/cel-go/common/ast/navigable.go | 652 -----
.../cel-go/common/containers/BUILD.bazel | 4 +-
.../cel-go/common/containers/container.go | 22 +-
.../google/cel-go/common/debug/BUILD.bazel | 4 +-
.../google/cel-go/common/debug/debug.go | 156 +-
.../github.com/google/cel-go/common/errors.go | 2 +-
.../google/cel-go/common/types/provider.go | 25 +-
.../github.com/google/cel-go/ext/BUILD.bazel | 6 +-
vendor/github.com/google/cel-go/ext/README.md | 14 -
.../github.com/google/cel-go/ext/bindings.go | 24 +-
.../google/cel-go/ext/formatting.go | 904 ------
vendor/github.com/google/cel-go/ext/guards.go | 11 +-
vendor/github.com/google/cel-go/ext/math.go | 67 +-
vendor/github.com/google/cel-go/ext/native.go | 18 -
vendor/github.com/google/cel-go/ext/protos.go | 45 +-
.../github.com/google/cel-go/ext/strings.go | 454 ++-
.../google/cel-go/interpreter/BUILD.bazel | 1 +
.../google/cel-go/interpreter/formatting.go | 383 +++
.../google/cel-go/interpreter/interpreter.go | 26 +-
.../google/cel-go/interpreter/planner.go | 279 +-
.../google/cel-go/interpreter/prune.go | 496 ++--
.../google/cel-go/parser/BUILD.bazel | 9 +-
.../google/cel-go/parser/gen/BUILD.bazel | 2 +-
.../cel-go/parser/gen/cel_base_listener.go | 4 +-
.../cel-go/parser/gen/cel_base_visitor.go | 5 +-
.../google/cel-go/parser/gen/cel_lexer.go | 603 ++--
.../google/cel-go/parser/gen/cel_listener.go | 5 +-
.../google/cel-go/parser/gen/cel_parser.go | 2478 ++++++-----------
.../google/cel-go/parser/gen/cel_visitor.go | 8 +-
.../google/cel-go/parser/gen/generate.sh | 2 +-
.../github.com/google/cel-go/parser/helper.go | 580 ++--
.../github.com/google/cel-go/parser/input.go | 4 +-
.../github.com/google/cel-go/parser/macro.go | 194 +-
.../github.com/google/cel-go/parser/parser.go | 189 +-
.../google/cel-go/parser/unparser.go | 222 +-
vendor/modules.txt | 8 +-
125 files changed, 8798 insertions(+), 14067 deletions(-)
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/atn.go (94%)
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/atn_deserialization_options.go (86%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/atn_deserializer.go (97%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/atn_simulator.go (66%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/atn_state.go (65%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/atn_type.go (100%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/char_stream.go (89%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/common_token_factory.go (100%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/common_token_stream.go (88%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/comparators.go (82%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/dfa.go (76%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/dfa_serializer.go (97%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/dfa_state.go (81%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/diagnostic_error_listener.go (92%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/error_listener.go (62%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/error_strategy.go (58%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/errors.go (73%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/file_stream.go (52%)
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/int_stream.go (100%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/interval_set.go (82%)
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/lexer.go (78%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/lexer_action.go (78%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/lexer_action_executor.go (70%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/lexer_atn_simulator.go (80%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/ll1_analyzer.go (73%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/parser.go (80%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/parser_atn_simulator.go (64%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/parser_rule_context.go (77%)
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/recognizer.go (70%)
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/semantic_context.go (92%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/token.go (86%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/token_source.go (100%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/token_stream.go (90%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/tokenstream_rewriter.go (73%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/trace_listener.go (100%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/transition.go (67%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/tree.go (62%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/trees.go (81%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/utils.go (85%)
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/.gitignore
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/LICENSE
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/README.md
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/atn_config.go
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/configuration.go
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/input_stream.go
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/jcollect.go
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/nostatistics.go
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/prediction_context.go
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/rule_context.go
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/statistics.go
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/stats_data.go
delete mode 100644 vendor/github.com/google/cel-go/cel/folding.go
delete mode 100644 vendor/github.com/google/cel-go/cel/inlining.go
delete mode 100644 vendor/github.com/google/cel-go/cel/optimizer.go
delete mode 100644 vendor/github.com/google/cel-go/common/ast/conversion.go
delete mode 100644 vendor/github.com/google/cel-go/common/ast/factory.go
delete mode 100644 vendor/github.com/google/cel-go/common/ast/navigable.go
delete mode 100644 vendor/github.com/google/cel-go/ext/formatting.go
create mode 100644 vendor/github.com/google/cel-go/interpreter/formatting.go
diff --git a/go.mod b/go.mod
index 7d7240ba6..7496bd28e 100644
--- a/go.mod
+++ b/go.mod
@@ -27,7 +27,7 @@ require (
require (
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
- github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
+ github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
@@ -48,7 +48,7 @@ require (
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
- github.com/google/cel-go v0.18.2 // indirect
+ github.com/google/cel-go v0.17.7 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
diff --git a/go.sum b/go.sum
index 7053f97ff..9dff76437 100644
--- a/go.sum
+++ b/go.sum
@@ -7,8 +7,8 @@ github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
-github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
-github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
+github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
+github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@@ -78,8 +78,8 @@ github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
-github.com/google/cel-go v0.18.2 h1:L0B6sNBSVmt0OyECi8v6VOS74KOc9W/tLiWKfZABvf4=
-github.com/google/cel-go v0.18.2/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg=
+github.com/google/cel-go v0.17.7 h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ=
+github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE
new file mode 100644
index 000000000..52cf18e42
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE
@@ -0,0 +1,26 @@
+Copyright 2021 The ANTLR Project
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ 3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from this
+ software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go
new file mode 100644
index 000000000..ab5121267
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go
@@ -0,0 +1,68 @@
+/*
+Package antlr implements the Go version of the ANTLR 4 runtime.
+
+# The ANTLR Tool
+
+ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing,
+or translating structured text or binary files. It's widely used to build languages, tools, and frameworks.
+From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface
+(or visitor) that makes it easy to respond to the recognition of phrases of interest.
+
+# Code Generation
+
+ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a
+runtime library, written specifically to support the generated code in the target language. This library is the
+runtime for the Go target.
+
+To generate code for the go target, it is generally recommended to place the source grammar files in a package of
+their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory
+it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean
+that the antlr tool JAR file will be checked in to your source code control though, so you are free to use any other
+way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in
+your IDE, or configuration in your CI system.
+
+Here is a general template for an ANTLR based recognizer in Go:
+
+ .
+ ├── myproject
+ ├── parser
+ │ ├── mygrammar.g4
+ │ ├── antlr-4.12.0-complete.jar
+ │ ├── error_listeners.go
+ │ ├── generate.go
+ │ ├── generate.sh
+ ├── go.mod
+ ├── go.sum
+ ├── main.go
+ └── main_test.go
+
+Make sure that the package statement in your grammar file(s) reflects the go package they exist in.
+The generate.go file then looks like this:
+
+ package parser
+
+ //go:generate ./generate.sh
+
+And the generate.sh file will look similar to this:
+
+ #!/bin/sh
+
+ alias antlr4='java -Xmx500M -cp "./antlr4-4.12.0-complete.jar:$CLASSPATH" org.antlr.v4.Tool'
+ antlr4 -Dlanguage=Go -no-visitor -package parser *.g4
+
+depending on whether you want visitors or listeners or any other ANTLR options.
+
+From the command line at the root of your package “myproject” you can then simply issue the command:
+
+ go generate ./...
+
+# Copyright Notice
+
+Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+
+Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root.
+
+[target languages]: https://github.com/antlr/antlr4/tree/master/runtime
+[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt
+*/
+package antlr
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go
similarity index 94%
rename from vendor/github.com/antlr4-go/antlr/v4/atn.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go
index cdeefed24..98010d2e6 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/atn.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go
@@ -20,11 +20,10 @@ var ATNInvalidAltNumber int
// [ALL(*)]: https://www.antlr.org/papers/allstar-techreport.pdf
// [Recursive Transition Network]: https://en.wikipedia.org/wiki/Recursive_transition_network
type ATN struct {
-
- // DecisionToState is the decision points for all rules, sub-rules, optional
- // blocks, ()+, ()*, etc. Each sub-rule/rule is a decision point, and we must track them, so we
+ // DecisionToState is the decision points for all rules, subrules, optional
+ // blocks, ()+, ()*, etc. Each subrule/rule is a decision point, and we must track them so we
// can go back later and build DFA predictors for them. This includes
- // all the rules, sub-rules, optional blocks, ()+, ()* etc...
+ // all the rules, subrules, optional blocks, ()+, ()* etc...
DecisionToState []DecisionState
// grammarType is the ATN type and is used for deserializing ATNs from strings.
@@ -52,8 +51,6 @@ type ATN struct {
// specified, and otherwise is nil.
ruleToTokenType []int
- // ATNStates is a list of all states in the ATN, ordered by state number.
- //
states []ATNState
mu sync.Mutex
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go
new file mode 100644
index 000000000..7619fa172
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go
@@ -0,0 +1,303 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+)
+
+// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic
+// context). The syntactic context is a graph-structured stack node whose
+// path(s) to the root is the rule invocation(s) chain used to arrive at the
+// state. The semantic context is the tree of semantic predicates encountered
+// before reaching an ATN state.
+type ATNConfig interface {
+ Equals(o Collectable[ATNConfig]) bool
+ Hash() int
+
+ GetState() ATNState
+ GetAlt() int
+ GetSemanticContext() SemanticContext
+
+ GetContext() PredictionContext
+ SetContext(PredictionContext)
+
+ GetReachesIntoOuterContext() int
+ SetReachesIntoOuterContext(int)
+
+ String() string
+
+ getPrecedenceFilterSuppressed() bool
+ setPrecedenceFilterSuppressed(bool)
+}
+
+type BaseATNConfig struct {
+ precedenceFilterSuppressed bool
+ state ATNState
+ alt int
+ context PredictionContext
+ semanticContext SemanticContext
+ reachesIntoOuterContext int
+}
+
+func NewBaseATNConfig7(old *BaseATNConfig) ATNConfig { // TODO: Dup
+ return &BaseATNConfig{
+ state: old.state,
+ alt: old.alt,
+ context: old.context,
+ semanticContext: old.semanticContext,
+ reachesIntoOuterContext: old.reachesIntoOuterContext,
+ }
+}
+
+func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig {
+ return NewBaseATNConfig5(state, alt, context, SemanticContextNone)
+}
+
+func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
+ if semanticContext == nil {
+ panic("semanticContext cannot be nil") // TODO: Necessary?
+ }
+
+ return &BaseATNConfig{state: state, alt: alt, context: context, semanticContext: semanticContext}
+}
+
+func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig {
+ return NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext())
+}
+
+func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig {
+ return NewBaseATNConfig(c, state, c.GetContext(), semanticContext)
+}
+
+func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig {
+ return NewBaseATNConfig(c, c.GetState(), c.GetContext(), semanticContext)
+}
+
+func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig {
+ return NewBaseATNConfig(c, state, context, c.GetSemanticContext())
+}
+
+func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
+ if semanticContext == nil {
+ panic("semanticContext cannot be nil")
+ }
+
+ return &BaseATNConfig{
+ state: state,
+ alt: c.GetAlt(),
+ context: context,
+ semanticContext: semanticContext,
+ reachesIntoOuterContext: c.GetReachesIntoOuterContext(),
+ precedenceFilterSuppressed: c.getPrecedenceFilterSuppressed(),
+ }
+}
+
+func (b *BaseATNConfig) getPrecedenceFilterSuppressed() bool {
+ return b.precedenceFilterSuppressed
+}
+
+func (b *BaseATNConfig) setPrecedenceFilterSuppressed(v bool) {
+ b.precedenceFilterSuppressed = v
+}
+
+func (b *BaseATNConfig) GetState() ATNState {
+ return b.state
+}
+
+func (b *BaseATNConfig) GetAlt() int {
+ return b.alt
+}
+
+func (b *BaseATNConfig) SetContext(v PredictionContext) {
+ b.context = v
+}
+func (b *BaseATNConfig) GetContext() PredictionContext {
+ return b.context
+}
+
+func (b *BaseATNConfig) GetSemanticContext() SemanticContext {
+ return b.semanticContext
+}
+
+func (b *BaseATNConfig) GetReachesIntoOuterContext() int {
+ return b.reachesIntoOuterContext
+}
+
+func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) {
+ b.reachesIntoOuterContext = v
+}
+
+// Equals is the default comparison function for an ATNConfig when no specialist implementation is required
+// for a collection.
+//
+// An ATN configuration is equal to another if both have the same state, they
+// predict the same alternative, and syntactic/semantic contexts are the same.
+func (b *BaseATNConfig) Equals(o Collectable[ATNConfig]) bool {
+ if b == o {
+ return true
+ } else if o == nil {
+ return false
+ }
+
+ var other, ok = o.(*BaseATNConfig)
+
+ if !ok {
+ return false
+ }
+
+ var equal bool
+
+ if b.context == nil {
+ equal = other.context == nil
+ } else {
+ equal = b.context.Equals(other.context)
+ }
+
+ var (
+ nums = b.state.GetStateNumber() == other.state.GetStateNumber()
+ alts = b.alt == other.alt
+ cons = b.semanticContext.Equals(other.semanticContext)
+ sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed
+ )
+
+ return nums && alts && cons && sups && equal
+}
+
+// Hash is the default hash function for BaseATNConfig, when no specialist hash function
+// is required for a collection
+func (b *BaseATNConfig) Hash() int {
+ var c int
+ if b.context != nil {
+ c = b.context.Hash()
+ }
+
+ h := murmurInit(7)
+ h = murmurUpdate(h, b.state.GetStateNumber())
+ h = murmurUpdate(h, b.alt)
+ h = murmurUpdate(h, c)
+ h = murmurUpdate(h, b.semanticContext.Hash())
+ return murmurFinish(h, 4)
+}
+
+func (b *BaseATNConfig) String() string {
+ var s1, s2, s3 string
+
+ if b.context != nil {
+ s1 = ",[" + fmt.Sprint(b.context) + "]"
+ }
+
+ if b.semanticContext != SemanticContextNone {
+ s2 = "," + fmt.Sprint(b.semanticContext)
+ }
+
+ if b.reachesIntoOuterContext > 0 {
+ s3 = ",up=" + fmt.Sprint(b.reachesIntoOuterContext)
+ }
+
+ return fmt.Sprintf("(%v,%v%v%v%v)", b.state, b.alt, s1, s2, s3)
+}
+
+type LexerATNConfig struct {
+ *BaseATNConfig
+ lexerActionExecutor *LexerActionExecutor
+ passedThroughNonGreedyDecision bool
+}
+
+func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
+ return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
+}
+
+func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
+ return &LexerATNConfig{
+ BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone),
+ lexerActionExecutor: lexerActionExecutor,
+ }
+}
+
+func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig {
+ return &LexerATNConfig{
+ BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
+ lexerActionExecutor: c.lexerActionExecutor,
+ passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
+ }
+}
+
+func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
+ return &LexerATNConfig{
+ BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
+ lexerActionExecutor: lexerActionExecutor,
+ passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
+ }
+}
+
+func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig {
+ return &LexerATNConfig{
+ BaseATNConfig: NewBaseATNConfig(c, state, context, c.GetSemanticContext()),
+ lexerActionExecutor: c.lexerActionExecutor,
+ passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
+ }
+}
+
+func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
+ return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
+}
+
+// Hash is the default hash function for LexerATNConfig objects, it can be used directly or via
+// the default comparator [ObjEqComparator].
+func (l *LexerATNConfig) Hash() int {
+ var f int
+ if l.passedThroughNonGreedyDecision {
+ f = 1
+ } else {
+ f = 0
+ }
+ h := murmurInit(7)
+ h = murmurUpdate(h, l.state.GetStateNumber())
+ h = murmurUpdate(h, l.alt)
+ h = murmurUpdate(h, l.context.Hash())
+ h = murmurUpdate(h, l.semanticContext.Hash())
+ h = murmurUpdate(h, f)
+ h = murmurUpdate(h, l.lexerActionExecutor.Hash())
+ h = murmurFinish(h, 6)
+ return h
+}
+
+// Equals is the default comparison function for LexerATNConfig objects, it can be used directly or via
+// the default comparator [ObjEqComparator].
+func (l *LexerATNConfig) Equals(other Collectable[ATNConfig]) bool {
+ if l == other {
+ return true
+ }
+ var othert, ok = other.(*LexerATNConfig)
+
+ if l == other {
+ return true
+ } else if !ok {
+ return false
+ } else if l.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision {
+ return false
+ }
+
+ var b bool
+
+ if l.lexerActionExecutor != nil {
+ b = !l.lexerActionExecutor.Equals(othert.lexerActionExecutor)
+ } else {
+ b = othert.lexerActionExecutor != nil
+ }
+
+ if b {
+ return false
+ }
+
+ return l.BaseATNConfig.Equals(othert.BaseATNConfig)
+}
+
+func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool {
+ var ds, ok = target.(DecisionState)
+
+ return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy())
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go
new file mode 100644
index 000000000..43e9b33f3
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go
@@ -0,0 +1,441 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+)
+
+type ATNConfigSet interface {
+ Hash() int
+ Equals(o Collectable[ATNConfig]) bool
+ Add(ATNConfig, *DoubleDict) bool
+ AddAll([]ATNConfig) bool
+
+ GetStates() *JStore[ATNState, Comparator[ATNState]]
+ GetPredicates() []SemanticContext
+ GetItems() []ATNConfig
+
+ OptimizeConfigs(interpreter *BaseATNSimulator)
+
+ Length() int
+ IsEmpty() bool
+ Contains(ATNConfig) bool
+ ContainsFast(ATNConfig) bool
+ Clear()
+ String() string
+
+ HasSemanticContext() bool
+ SetHasSemanticContext(v bool)
+
+ ReadOnly() bool
+ SetReadOnly(bool)
+
+ GetConflictingAlts() *BitSet
+ SetConflictingAlts(*BitSet)
+
+ Alts() *BitSet
+
+ FullContext() bool
+
+ GetUniqueAlt() int
+ SetUniqueAlt(int)
+
+ GetDipsIntoOuterContext() bool
+ SetDipsIntoOuterContext(bool)
+}
+
+// BaseATNConfigSet is a specialized set of ATNConfig that tracks information
+// about its elements and can combine similar configurations using a
+// graph-structured stack.
+type BaseATNConfigSet struct {
+ cachedHash int
+
+ // configLookup is used to determine whether two BaseATNConfigSets are equal. We
+ // need all configurations with the same (s, i, _, semctx) to be equal. A key
+ // effectively doubles the number of objects associated with ATNConfigs. All
+ // keys are hashed by (s, i, _, pi), not including the context. Wiped out when
+ // read-only because a set becomes a DFA state.
+ configLookup *JStore[ATNConfig, Comparator[ATNConfig]]
+
+ // configs is the added elements.
+ configs []ATNConfig
+
+ // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
+ // info together because it saves recomputation. Can we track conflicts as they
+ // are added to save scanning configs later?
+ conflictingAlts *BitSet
+
+ // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates
+ // we hit a pred while computing a closure operation. Do not make a DFA state
+ // from the BaseATNConfigSet in this case. TODO: How is this used by parsers?
+ dipsIntoOuterContext bool
+
+ // fullCtx is whether it is part of a full context LL prediction. Used to
+ // determine how to merge $. It is a wildcard with SLL, but not for an LL
+ // context merge.
+ fullCtx bool
+
+ // Used in parser and lexer. In lexer, it indicates we hit a pred
+ // while computing a closure operation. Don't make a DFA state from a.
+ hasSemanticContext bool
+
+ // readOnly is whether it is read-only. Do not
+ // allow any code to manipulate the set if true because DFA states will point at
+ // sets and those must not change. It not, protect other fields; conflictingAlts
+ // in particular, which is assigned after readOnly.
+ readOnly bool
+
+ // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
+ // info together because it saves recomputation. Can we track conflicts as they
+ // are added to save scanning configs later?
+ uniqueAlt int
+}
+
+func (b *BaseATNConfigSet) Alts() *BitSet {
+ alts := NewBitSet()
+ for _, it := range b.configs {
+ alts.add(it.GetAlt())
+ }
+ return alts
+}
+
+func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet {
+ return &BaseATNConfigSet{
+ cachedHash: -1,
+ configLookup: NewJStore[ATNConfig, Comparator[ATNConfig]](aConfCompInst),
+ fullCtx: fullCtx,
+ }
+}
+
+// Add merges contexts with existing configs for (s, i, pi, _), where s is the
+// ATNConfig.state, i is the ATNConfig.alt, and pi is the
+// ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates
+// dipsIntoOuterContext and hasSemanticContext when necessary.
+func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+
+ if config.GetSemanticContext() != SemanticContextNone {
+ b.hasSemanticContext = true
+ }
+
+ if config.GetReachesIntoOuterContext() > 0 {
+ b.dipsIntoOuterContext = true
+ }
+
+ existing, present := b.configLookup.Put(config)
+
+ // The config was not already in the set
+ //
+ if !present {
+ b.cachedHash = -1
+ b.configs = append(b.configs, config) // Track order here
+ return true
+ }
+
+ // Merge a previous (s, i, pi, _) with it and save the result
+ rootIsWildcard := !b.fullCtx
+ merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache)
+
+ // No need to check for existing.context because config.context is in the cache,
+ // since the only way to create new graphs is the "call rule" and here. We cache
+ // at both places.
+ existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext()))
+
+ // Preserve the precedence filter suppression during the merge
+ if config.getPrecedenceFilterSuppressed() {
+ existing.setPrecedenceFilterSuppressed(true)
+ }
+
+ // Replace the context because there is no need to do alt mapping
+ existing.SetContext(merged)
+
+ return true
+}
+
+func (b *BaseATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] {
+
+ // states uses the standard comparator provided by the ATNState instance
+ //
+ states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst)
+
+ for i := 0; i < len(b.configs); i++ {
+ states.Put(b.configs[i].GetState())
+ }
+
+ return states
+}
+
+func (b *BaseATNConfigSet) HasSemanticContext() bool {
+ return b.hasSemanticContext
+}
+
+func (b *BaseATNConfigSet) SetHasSemanticContext(v bool) {
+ b.hasSemanticContext = v
+}
+
+func (b *BaseATNConfigSet) GetPredicates() []SemanticContext {
+ preds := make([]SemanticContext, 0)
+
+ for i := 0; i < len(b.configs); i++ {
+ c := b.configs[i].GetSemanticContext()
+
+ if c != SemanticContextNone {
+ preds = append(preds, c)
+ }
+ }
+
+ return preds
+}
+
+func (b *BaseATNConfigSet) GetItems() []ATNConfig {
+ return b.configs
+}
+
+func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+
+ if b.configLookup.Len() == 0 {
+ return
+ }
+
+ for i := 0; i < len(b.configs); i++ {
+ config := b.configs[i]
+
+ config.SetContext(interpreter.getCachedContext(config.GetContext()))
+ }
+}
+
+func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool {
+ for i := 0; i < len(coll); i++ {
+ b.Add(coll[i], nil)
+ }
+
+ return false
+}
+
+// Compare is a hack function just to verify that adding DFAstares to the known
+// set works, so long as comparison of ATNConfigSet s works. For that to work, we
+// need to make sure that the set of ATNConfigs in two sets are equivalent. We can't
+// know the order, so we do this inefficient hack. If this proves the point, then
+// we can change the config set to a better structure.
+func (b *BaseATNConfigSet) Compare(bs *BaseATNConfigSet) bool {
+ if len(b.configs) != len(bs.configs) {
+ return false
+ }
+
+ for _, c := range b.configs {
+ found := false
+ for _, c2 := range bs.configs {
+ if c.Equals(c2) {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ return false
+ }
+
+ }
+ return true
+}
+
+func (b *BaseATNConfigSet) Equals(other Collectable[ATNConfig]) bool {
+ if b == other {
+ return true
+ } else if _, ok := other.(*BaseATNConfigSet); !ok {
+ return false
+ }
+
+ other2 := other.(*BaseATNConfigSet)
+
+ return b.configs != nil &&
+ b.fullCtx == other2.fullCtx &&
+ b.uniqueAlt == other2.uniqueAlt &&
+ b.conflictingAlts == other2.conflictingAlts &&
+ b.hasSemanticContext == other2.hasSemanticContext &&
+ b.dipsIntoOuterContext == other2.dipsIntoOuterContext &&
+ b.Compare(other2)
+}
+
+func (b *BaseATNConfigSet) Hash() int {
+ if b.readOnly {
+ if b.cachedHash == -1 {
+ b.cachedHash = b.hashCodeConfigs()
+ }
+
+ return b.cachedHash
+ }
+
+ return b.hashCodeConfigs()
+}
+
+func (b *BaseATNConfigSet) hashCodeConfigs() int {
+ h := 1
+ for _, config := range b.configs {
+ h = 31*h + config.Hash()
+ }
+ return h
+}
+
+func (b *BaseATNConfigSet) Length() int {
+ return len(b.configs)
+}
+
+func (b *BaseATNConfigSet) IsEmpty() bool {
+ return len(b.configs) == 0
+}
+
+func (b *BaseATNConfigSet) Contains(item ATNConfig) bool {
+ if b.configLookup == nil {
+ panic("not implemented for read-only sets")
+ }
+
+ return b.configLookup.Contains(item)
+}
+
+func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool {
+ if b.configLookup == nil {
+ panic("not implemented for read-only sets")
+ }
+
+ return b.configLookup.Contains(item) // TODO: containsFast is not implemented for Set
+}
+
+func (b *BaseATNConfigSet) Clear() {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+
+ b.configs = make([]ATNConfig, 0)
+ b.cachedHash = -1
+ b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst)
+}
+
+func (b *BaseATNConfigSet) FullContext() bool {
+ return b.fullCtx
+}
+
+func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool {
+ return b.dipsIntoOuterContext
+}
+
+func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool) {
+ b.dipsIntoOuterContext = v
+}
+
+func (b *BaseATNConfigSet) GetUniqueAlt() int {
+ return b.uniqueAlt
+}
+
+func (b *BaseATNConfigSet) SetUniqueAlt(v int) {
+ b.uniqueAlt = v
+}
+
+func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet {
+ return b.conflictingAlts
+}
+
+func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet) {
+ b.conflictingAlts = v
+}
+
+func (b *BaseATNConfigSet) ReadOnly() bool {
+ return b.readOnly
+}
+
+func (b *BaseATNConfigSet) SetReadOnly(readOnly bool) {
+ b.readOnly = readOnly
+
+ if readOnly {
+ b.configLookup = nil // Read only, so no need for the lookup cache
+ }
+}
+
+func (b *BaseATNConfigSet) String() string {
+ s := "["
+
+ for i, c := range b.configs {
+ s += c.String()
+
+ if i != len(b.configs)-1 {
+ s += ", "
+ }
+ }
+
+ s += "]"
+
+ if b.hasSemanticContext {
+ s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext)
+ }
+
+ if b.uniqueAlt != ATNInvalidAltNumber {
+ s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt)
+ }
+
+ if b.conflictingAlts != nil {
+ s += ",conflictingAlts=" + b.conflictingAlts.String()
+ }
+
+ if b.dipsIntoOuterContext {
+ s += ",dipsIntoOuterContext"
+ }
+
+ return s
+}
+
+type OrderedATNConfigSet struct {
+ *BaseATNConfigSet
+}
+
+func NewOrderedATNConfigSet() *OrderedATNConfigSet {
+ b := NewBaseATNConfigSet(false)
+
+ // This set uses the standard Hash() and Equals() from ATNConfig
+ b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst)
+
+ return &OrderedATNConfigSet{BaseATNConfigSet: b}
+}
+
+func hashATNConfig(i interface{}) int {
+ o := i.(ATNConfig)
+ hash := 7
+ hash = 31*hash + o.GetState().GetStateNumber()
+ hash = 31*hash + o.GetAlt()
+ hash = 31*hash + o.GetSemanticContext().Hash()
+ return hash
+}
+
+func equalATNConfigs(a, b interface{}) bool {
+ if a == nil || b == nil {
+ return false
+ }
+
+ if a == b {
+ return true
+ }
+
+ var ai, ok = a.(ATNConfig)
+ var bi, ok1 = b.(ATNConfig)
+
+ if !ok || !ok1 {
+ return false
+ }
+
+ if ai.GetState().GetStateNumber() != bi.GetState().GetStateNumber() {
+ return false
+ }
+
+ if ai.GetAlt() != bi.GetAlt() {
+ return false
+ }
+
+ return ai.GetSemanticContext().Equals(bi.GetSemanticContext())
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go
similarity index 86%
rename from vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go
index bdb30b362..3c975ec7b 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go
@@ -20,7 +20,7 @@ func (opts *ATNDeserializationOptions) ReadOnly() bool {
func (opts *ATNDeserializationOptions) SetReadOnly(readOnly bool) {
if opts.readOnly {
- panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
+ panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
}
opts.readOnly = readOnly
}
@@ -31,7 +31,7 @@ func (opts *ATNDeserializationOptions) VerifyATN() bool {
func (opts *ATNDeserializationOptions) SetVerifyATN(verifyATN bool) {
if opts.readOnly {
- panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
+ panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
}
opts.verifyATN = verifyATN
}
@@ -42,12 +42,11 @@ func (opts *ATNDeserializationOptions) GenerateRuleBypassTransitions() bool {
func (opts *ATNDeserializationOptions) SetGenerateRuleBypassTransitions(generateRuleBypassTransitions bool) {
if opts.readOnly {
- panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
+ panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
}
opts.generateRuleBypassTransitions = generateRuleBypassTransitions
}
-//goland:noinspection GoUnusedExportedFunction
func DefaultATNDeserializationOptions() *ATNDeserializationOptions {
return NewATNDeserializationOptions(&defaultATNDeserializationOptions)
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go
similarity index 97%
rename from vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go
index 2dcb9ae11..3888856b4 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go
@@ -35,7 +35,6 @@ func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer {
return &ATNDeserializer{options: options}
}
-//goland:noinspection GoUnusedFunction
func stringInSlice(a string, list []string) int {
for i, b := range list {
if b == a {
@@ -194,7 +193,7 @@ func (a *ATNDeserializer) readModes(atn *ATN) {
}
}
-func (a *ATNDeserializer) readSets(_ *ATN, sets []*IntervalSet) []*IntervalSet {
+func (a *ATNDeserializer) readSets(atn *ATN, sets []*IntervalSet) []*IntervalSet {
m := a.readInt()
// Preallocate the needed capacity.
@@ -351,7 +350,7 @@ func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) {
bypassStart.endState = bypassStop
- atn.defineDecisionState(&bypassStart.BaseDecisionState)
+ atn.defineDecisionState(bypassStart.BaseDecisionState)
bypassStop.startState = bypassStart
@@ -451,7 +450,7 @@ func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) {
continue
}
- // We analyze the [ATN] to determine if an ATN decision state is the
+ // We analyze the ATN to determine if a ATN decision state is the
// decision for the closure block that determines whether a
// precedence rule should continue or complete.
if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule {
@@ -554,7 +553,7 @@ func (a *ATNDeserializer) readInt() int {
return int(v) // data is 32 bits but int is at least that big
}
-func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, _, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition {
+func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, src, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition {
target := atn.states[trg]
switch typeIndex {
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go
similarity index 66%
rename from vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go
index afe6c9f80..41529115f 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go
@@ -4,7 +4,7 @@
package antlr
-var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewATNConfigSet(false))
+var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false))
type IATNSimulator interface {
SharedContextCache() *PredictionContextCache
@@ -18,13 +18,22 @@ type BaseATNSimulator struct {
decisionToDFA []*DFA
}
-func (b *BaseATNSimulator) getCachedContext(context *PredictionContext) *PredictionContext {
+func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator {
+ b := new(BaseATNSimulator)
+
+ b.atn = atn
+ b.sharedContextCache = sharedContextCache
+
+ return b
+}
+
+func (b *BaseATNSimulator) getCachedContext(context PredictionContext) PredictionContext {
if b.sharedContextCache == nil {
return context
}
- //visited := NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionVisitedCollection, "Visit map in getCachedContext()")
- visited := NewVisitRecord()
+ visited := make(map[PredictionContext]PredictionContext)
+
return getCachedBasePredictionContext(context, b.sharedContextCache, visited)
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_state.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go
similarity index 65%
rename from vendor/github.com/antlr4-go/antlr/v4/atn_state.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go
index 2ae5807cd..1f2a56bc3 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/atn_state.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go
@@ -4,11 +4,7 @@
package antlr
-import (
- "fmt"
- "os"
- "strconv"
-)
+import "strconv"
// Constants for serialization.
const (
@@ -29,7 +25,6 @@ const (
ATNStateInvalidStateNumber = -1
)
-//goland:noinspection GoUnusedGlobalVariable
var ATNStateInitialNumTransitions = 4
type ATNState interface {
@@ -78,7 +73,7 @@ type BaseATNState struct {
transitions []Transition
}
-func NewATNState() *BaseATNState {
+func NewBaseATNState() *BaseATNState {
return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType}
}
@@ -153,46 +148,27 @@ func (as *BaseATNState) AddTransition(trans Transition, index int) {
if len(as.transitions) == 0 {
as.epsilonOnlyTransitions = trans.getIsEpsilon()
} else if as.epsilonOnlyTransitions != trans.getIsEpsilon() {
- _, _ = fmt.Fprintf(os.Stdin, "ATN state %d has both epsilon and non-epsilon transitions.\n", as.stateNumber)
as.epsilonOnlyTransitions = false
}
- // TODO: Check code for already present compared to the Java equivalent
- //alreadyPresent := false
- //for _, t := range as.transitions {
- // if t.getTarget().GetStateNumber() == trans.getTarget().GetStateNumber() {
- // if t.getLabel() != nil && trans.getLabel() != nil && trans.getLabel().Equals(t.getLabel()) {
- // alreadyPresent = true
- // break
- // }
- // } else if t.getIsEpsilon() && trans.getIsEpsilon() {
- // alreadyPresent = true
- // break
- // }
- //}
- //if !alreadyPresent {
if index == -1 {
as.transitions = append(as.transitions, trans)
} else {
as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...)
// TODO: as.transitions.splice(index, 1, trans)
}
- //} else {
- // _, _ = fmt.Fprintf(os.Stderr, "Transition already present in state %d\n", as.stateNumber)
- //}
}
type BasicState struct {
- BaseATNState
+ *BaseATNState
}
func NewBasicState() *BasicState {
- return &BasicState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateBasic,
- },
- }
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateBasic
+
+ return &BasicState{BaseATNState: b}
}
type DecisionState interface {
@@ -206,19 +182,13 @@ type DecisionState interface {
}
type BaseDecisionState struct {
- BaseATNState
+ *BaseATNState
decision int
nonGreedy bool
}
func NewBaseDecisionState() *BaseDecisionState {
- return &BaseDecisionState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateBasic,
- },
- decision: -1,
- }
+ return &BaseDecisionState{BaseATNState: NewBaseATNState(), decision: -1}
}
func (s *BaseDecisionState) getDecision() int {
@@ -246,20 +216,12 @@ type BlockStartState interface {
// BaseBlockStartState is the start of a regular (...) block.
type BaseBlockStartState struct {
- BaseDecisionState
+ *BaseDecisionState
endState *BlockEndState
}
func NewBlockStartState() *BaseBlockStartState {
- return &BaseBlockStartState{
- BaseDecisionState: BaseDecisionState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateBasic,
- },
- decision: -1,
- },
- }
+ return &BaseBlockStartState{BaseDecisionState: NewBaseDecisionState()}
}
func (s *BaseBlockStartState) getEndState() *BlockEndState {
@@ -271,38 +233,31 @@ func (s *BaseBlockStartState) setEndState(b *BlockEndState) {
}
type BasicBlockStartState struct {
- BaseBlockStartState
+ *BaseBlockStartState
}
func NewBasicBlockStartState() *BasicBlockStartState {
- return &BasicBlockStartState{
- BaseBlockStartState: BaseBlockStartState{
- BaseDecisionState: BaseDecisionState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateBlockStart,
- },
- },
- },
- }
+ b := NewBlockStartState()
+
+ b.stateType = ATNStateBlockStart
+
+ return &BasicBlockStartState{BaseBlockStartState: b}
}
var _ BlockStartState = &BasicBlockStartState{}
// BlockEndState is a terminal node of a simple (a|b|c) block.
type BlockEndState struct {
- BaseATNState
+ *BaseATNState
startState ATNState
}
func NewBlockEndState() *BlockEndState {
- return &BlockEndState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateBlockEnd,
- },
- startState: nil,
- }
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateBlockEnd
+
+ return &BlockEndState{BaseATNState: b}
}
// RuleStopState is the last node in the ATN for a rule, unless that rule is the
@@ -310,48 +265,43 @@ func NewBlockEndState() *BlockEndState {
// encode references to all calls to this rule to compute FOLLOW sets for error
// handling.
type RuleStopState struct {
- BaseATNState
+ *BaseATNState
}
func NewRuleStopState() *RuleStopState {
- return &RuleStopState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateRuleStop,
- },
- }
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateRuleStop
+
+ return &RuleStopState{BaseATNState: b}
}
type RuleStartState struct {
- BaseATNState
+ *BaseATNState
stopState ATNState
isPrecedenceRule bool
}
func NewRuleStartState() *RuleStartState {
- return &RuleStartState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateRuleStart,
- },
- }
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateRuleStart
+
+ return &RuleStartState{BaseATNState: b}
}
// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two
// transitions: one to the loop back to start of the block, and one to exit.
type PlusLoopbackState struct {
- BaseDecisionState
+ *BaseDecisionState
}
func NewPlusLoopbackState() *PlusLoopbackState {
- return &PlusLoopbackState{
- BaseDecisionState: BaseDecisionState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStatePlusLoopBack,
- },
- },
- }
+ b := NewBaseDecisionState()
+
+ b.stateType = ATNStatePlusLoopBack
+
+ return &PlusLoopbackState{BaseDecisionState: b}
}
// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a
@@ -359,103 +309,85 @@ func NewPlusLoopbackState() *PlusLoopbackState {
// it is included for completeness. In reality, PlusLoopbackState is the real
// decision-making node for A+.
type PlusBlockStartState struct {
- BaseBlockStartState
+ *BaseBlockStartState
loopBackState ATNState
}
func NewPlusBlockStartState() *PlusBlockStartState {
- return &PlusBlockStartState{
- BaseBlockStartState: BaseBlockStartState{
- BaseDecisionState: BaseDecisionState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStatePlusBlockStart,
- },
- },
- },
- }
+ b := NewBlockStartState()
+
+ b.stateType = ATNStatePlusBlockStart
+
+ return &PlusBlockStartState{BaseBlockStartState: b}
}
var _ BlockStartState = &PlusBlockStartState{}
// StarBlockStartState is the block that begins a closure loop.
type StarBlockStartState struct {
- BaseBlockStartState
+ *BaseBlockStartState
}
func NewStarBlockStartState() *StarBlockStartState {
- return &StarBlockStartState{
- BaseBlockStartState: BaseBlockStartState{
- BaseDecisionState: BaseDecisionState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateStarBlockStart,
- },
- },
- },
- }
+ b := NewBlockStartState()
+
+ b.stateType = ATNStateStarBlockStart
+
+ return &StarBlockStartState{BaseBlockStartState: b}
}
var _ BlockStartState = &StarBlockStartState{}
type StarLoopbackState struct {
- BaseATNState
+ *BaseATNState
}
func NewStarLoopbackState() *StarLoopbackState {
- return &StarLoopbackState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateStarLoopBack,
- },
- }
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateStarLoopBack
+
+ return &StarLoopbackState{BaseATNState: b}
}
type StarLoopEntryState struct {
- BaseDecisionState
+ *BaseDecisionState
loopBackState ATNState
precedenceRuleDecision bool
}
func NewStarLoopEntryState() *StarLoopEntryState {
+ b := NewBaseDecisionState()
+
+ b.stateType = ATNStateStarLoopEntry
+
// False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making.
- return &StarLoopEntryState{
- BaseDecisionState: BaseDecisionState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateStarLoopEntry,
- },
- },
- }
+ return &StarLoopEntryState{BaseDecisionState: b}
}
// LoopEndState marks the end of a * or + loop.
type LoopEndState struct {
- BaseATNState
+ *BaseATNState
loopBackState ATNState
}
func NewLoopEndState() *LoopEndState {
- return &LoopEndState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateLoopEnd,
- },
- }
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateLoopEnd
+
+ return &LoopEndState{BaseATNState: b}
}
// TokensStartState is the Tokens rule start state linking to each lexer rule start state.
type TokensStartState struct {
- BaseDecisionState
+ *BaseDecisionState
}
func NewTokensStartState() *TokensStartState {
- return &TokensStartState{
- BaseDecisionState: BaseDecisionState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateTokenStart,
- },
- },
- }
+ b := NewBaseDecisionState()
+
+ b.stateType = ATNStateTokenStart
+
+ return &TokensStartState{BaseDecisionState: b}
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_type.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_type.go
similarity index 100%
rename from vendor/github.com/antlr4-go/antlr/v4/atn_type.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_type.go
diff --git a/vendor/github.com/antlr4-go/antlr/v4/char_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go
similarity index 89%
rename from vendor/github.com/antlr4-go/antlr/v4/char_stream.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go
index bd8127b6b..c33f0adb5 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/char_stream.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go
@@ -8,5 +8,5 @@ type CharStream interface {
IntStream
GetText(int, int) string
GetTextFromTokens(start, end Token) string
- GetTextFromInterval(Interval) string
+ GetTextFromInterval(*Interval) string
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_factory.go
similarity index 100%
rename from vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_factory.go
diff --git a/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go
similarity index 88%
rename from vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go
index b75da9df0..c6c9485a2 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go
@@ -28,24 +28,22 @@ type CommonTokenStream struct {
// trivial with bt field.
fetchedEOF bool
- // index into [tokens] of the current token (next token to consume).
+ // index indexs into tokens of the current token (next token to consume).
// tokens[p] should be LT(1). It is set to -1 when the stream is first
// constructed or when SetTokenSource is called, indicating that the first token
// has not yet been fetched from the token source. For additional information,
- // see the documentation of [IntStream] for a description of initializing methods.
+ // see the documentation of IntStream for a description of initializing methods.
index int
- // tokenSource is the [TokenSource] from which tokens for the bt stream are
+ // tokenSource is the TokenSource from which tokens for the bt stream are
// fetched.
tokenSource TokenSource
- // tokens contains all tokens fetched from the token source. The list is considered a
+ // tokens is all tokens fetched from the token source. The list is considered a
// complete view of the input once fetchedEOF is set to true.
tokens []Token
}
-// NewCommonTokenStream creates a new CommonTokenStream instance using the supplied lexer to produce
-// tokens and will pull tokens from the given lexer channel.
func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream {
return &CommonTokenStream{
channel: channel,
@@ -55,7 +53,6 @@ func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream {
}
}
-// GetAllTokens returns all tokens currently pulled from the token source.
func (c *CommonTokenStream) GetAllTokens() []Token {
return c.tokens
}
@@ -64,11 +61,9 @@ func (c *CommonTokenStream) Mark() int {
return 0
}
-func (c *CommonTokenStream) Release(_ int) {}
+func (c *CommonTokenStream) Release(marker int) {}
-func (c *CommonTokenStream) Reset() {
- c.fetchedEOF = false
- c.tokens = make([]Token, 0)
+func (c *CommonTokenStream) reset() {
c.Seek(0)
}
@@ -112,7 +107,7 @@ func (c *CommonTokenStream) Consume() {
// Sync makes sure index i in tokens has a token and returns true if a token is
// located at index i and otherwise false.
func (c *CommonTokenStream) Sync(i int) bool {
- n := i - len(c.tokens) + 1 // How many more elements do we need?
+ n := i - len(c.tokens) + 1 // TODO: How many more elements do we need?
if n > 0 {
fetched := c.fetch(n)
@@ -198,13 +193,12 @@ func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) {
c.tokenSource = tokenSource
c.tokens = make([]Token, 0)
c.index = -1
- c.fetchedEOF = false
}
// NextTokenOnChannel returns the index of the next token on channel given a
// starting index. Returns i if tokens[i] is on channel. Returns -1 if there are
-// no tokens on channel between 'i' and [TokenEOF].
-func (c *CommonTokenStream) NextTokenOnChannel(i, _ int) int {
+// no tokens on channel between i and EOF.
+func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int {
c.Sync(i)
if i >= len(c.tokens) {
@@ -250,7 +244,7 @@ func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []To
nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel)
from := tokenIndex + 1
- // If no onChannel to the right, then nextOnChannel == -1, so set 'to' to the last token
+ // If no onchannel to the right, then nextOnChannel == -1, so set to to last token
var to int
if nextOnChannel == -1 {
@@ -320,8 +314,7 @@ func (c *CommonTokenStream) Index() int {
}
func (c *CommonTokenStream) GetAllText() string {
- c.Fill()
- return c.GetTextFromInterval(NewInterval(0, len(c.tokens)-1))
+ return c.GetTextFromInterval(nil)
}
func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string {
@@ -336,9 +329,15 @@ func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string
return c.GetTextFromInterval(interval.GetSourceInterval())
}
-func (c *CommonTokenStream) GetTextFromInterval(interval Interval) string {
+func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string {
c.lazyInit()
- c.Sync(interval.Stop)
+
+ if interval == nil {
+ c.Fill()
+ interval = NewInterval(0, len(c.tokens)-1)
+ } else {
+ c.Sync(interval.Stop)
+ }
start := interval.Start
stop := interval.Stop
diff --git a/vendor/github.com/antlr4-go/antlr/v4/comparators.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go
similarity index 82%
rename from vendor/github.com/antlr4-go/antlr/v4/comparators.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go
index 7467e9b43..9ea320053 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/comparators.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go
@@ -18,20 +18,17 @@ package antlr
// type safety and avoid having to implement this for every type that we want to perform comparison on.
//
// This comparator works by using the standard Hash() and Equals() methods of the type T that is being compared. Which
-// allows us to use it in any collection instance that does not require a special hash or equals implementation.
+// allows us to use it in any collection instance that does nto require a special hash or equals implementation.
type ObjEqComparator[T Collectable[T]] struct{}
var (
- aStateEqInst = &ObjEqComparator[ATNState]{}
- aConfEqInst = &ObjEqComparator[*ATNConfig]{}
-
- // aConfCompInst is the comparator used for the ATNConfigSet for the configLookup cache
- aConfCompInst = &ATNConfigComparator[*ATNConfig]{}
- atnConfCompInst = &BaseATNConfigComparator[*ATNConfig]{}
+ aStateEqInst = &ObjEqComparator[ATNState]{}
+ aConfEqInst = &ObjEqComparator[ATNConfig]{}
+ aConfCompInst = &ATNConfigComparator[ATNConfig]{}
+ atnConfCompInst = &BaseATNConfigComparator[ATNConfig]{}
dfaStateEqInst = &ObjEqComparator[*DFAState]{}
semctxEqInst = &ObjEqComparator[SemanticContext]{}
- atnAltCfgEqInst = &ATNAltConfigComparator[*ATNConfig]{}
- pContextEqInst = &ObjEqComparator[*PredictionContext]{}
+ atnAltCfgEqInst = &ATNAltConfigComparator[ATNConfig]{}
)
// Equals2 delegates to the Equals() method of type T
@@ -47,14 +44,14 @@ func (c *ObjEqComparator[T]) Hash1(o T) int {
type SemCComparator[T Collectable[T]] struct{}
-// ATNConfigComparator is used as the comparator for the configLookup field of an ATNConfigSet
+// ATNConfigComparator is used as the compartor for the configLookup field of an ATNConfigSet
// and has a custom Equals() and Hash() implementation, because equality is not based on the
// standard Hash() and Equals() methods of the ATNConfig type.
type ATNConfigComparator[T Collectable[T]] struct {
}
// Equals2 is a custom comparator for ATNConfigs specifically for configLookup
-func (c *ATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
+func (c *ATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
// Same pointer, must be equal, even if both nil
//
@@ -75,8 +72,7 @@ func (c *ATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
}
// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup
-func (c *ATNConfigComparator[T]) Hash1(o *ATNConfig) int {
-
+func (c *ATNConfigComparator[T]) Hash1(o ATNConfig) int {
hash := 7
hash = 31*hash + o.GetState().GetStateNumber()
hash = 31*hash + o.GetAlt()
@@ -89,7 +85,7 @@ type ATNAltConfigComparator[T Collectable[T]] struct {
}
// Equals2 is a custom comparator for ATNConfigs specifically for configLookup
-func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
+func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
// Same pointer, must be equal, even if both nil
//
@@ -109,21 +105,21 @@ func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
}
// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup
-func (c *ATNAltConfigComparator[T]) Hash1(o *ATNConfig) int {
+func (c *ATNAltConfigComparator[T]) Hash1(o ATNConfig) int {
h := murmurInit(7)
h = murmurUpdate(h, o.GetState().GetStateNumber())
h = murmurUpdate(h, o.GetContext().Hash())
return murmurFinish(h, 2)
}
-// BaseATNConfigComparator is used as the comparator for the configLookup field of a ATNConfigSet
+// BaseATNConfigComparator is used as the comparator for the configLookup field of a BaseATNConfigSet
// and has a custom Equals() and Hash() implementation, because equality is not based on the
// standard Hash() and Equals() methods of the ATNConfig type.
type BaseATNConfigComparator[T Collectable[T]] struct {
}
// Equals2 is a custom comparator for ATNConfigs specifically for baseATNConfigSet
-func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
+func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
// Same pointer, must be equal, even if both nil
//
@@ -145,6 +141,7 @@ func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup, but in fact just
// delegates to the standard Hash() method of the ATNConfig type.
-func (c *BaseATNConfigComparator[T]) Hash1(o *ATNConfig) int {
+func (c *BaseATNConfigComparator[T]) Hash1(o ATNConfig) int {
+
return o.Hash()
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/dfa.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go
similarity index 76%
rename from vendor/github.com/antlr4-go/antlr/v4/dfa.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go
index 6b63eb158..bfd43e1f7 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/dfa.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go
@@ -4,8 +4,6 @@
package antlr
-// DFA represents the Deterministic Finite Automaton used by the recognizer, including all the states it can
-// reach and the transitions between them.
type DFA struct {
// atnStartState is the ATN state in which this was created
atnStartState DecisionState
@@ -14,9 +12,10 @@ type DFA struct {
// states is all the DFA states. Use Map to get the old state back; Set can only
// indicate whether it is there. Go maps implement key hash collisions and so on and are very
- // good, but the DFAState is an object and can't be used directly as the key as it can in say Java
+ // good, but the DFAState is an object and can't be used directly as the key as it can in say JAva
// amd C#, whereby if the hashcode is the same for two objects, then Equals() is called against them
- // to see if they really are the same object. Hence, we have our own map storage.
+ // to see if they really are the same object.
+ //
//
states *JStore[*DFAState, *ObjEqComparator[*DFAState]]
@@ -33,11 +32,11 @@ func NewDFA(atnStartState DecisionState, decision int) *DFA {
dfa := &DFA{
atnStartState: atnStartState,
decision: decision,
- states: nil, // Lazy initialize
+ states: NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst),
}
if s, ok := atnStartState.(*StarLoopEntryState); ok && s.precedenceRuleDecision {
dfa.precedenceDfa = true
- dfa.s0 = NewDFAState(-1, NewATNConfigSet(false))
+ dfa.s0 = NewDFAState(-1, NewBaseATNConfigSet(false))
dfa.s0.isAcceptState = false
dfa.s0.requiresFullContext = false
}
@@ -96,11 +95,12 @@ func (d *DFA) getPrecedenceDfa() bool {
// true or nil otherwise, and d.precedenceDfa is updated.
func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
if d.getPrecedenceDfa() != precedenceDfa {
- d.states = nil // Lazy initialize
+ d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst)
d.numstates = 0
if precedenceDfa {
- precedenceState := NewDFAState(-1, NewATNConfigSet(false))
+ precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false))
+
precedenceState.setEdges(make([]*DFAState, 0))
precedenceState.isAcceptState = false
precedenceState.requiresFullContext = false
@@ -113,31 +113,6 @@ func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
}
}
-// Len returns the number of states in d. We use this instead of accessing states directly so that we can implement lazy
-// instantiation of the states JMap.
-func (d *DFA) Len() int {
- if d.states == nil {
- return 0
- }
- return d.states.Len()
-}
-
-// Get returns a state that matches s if it is present in the DFA state set. We defer to this
-// function instead of accessing states directly so that we can implement lazy instantiation of the states JMap.
-func (d *DFA) Get(s *DFAState) (*DFAState, bool) {
- if d.states == nil {
- return nil, false
- }
- return d.states.Get(s)
-}
-
-func (d *DFA) Put(s *DFAState) (*DFAState, bool) {
- if d.states == nil {
- d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst, DFAStateCollection, "DFA via DFA.Put")
- }
- return d.states.Put(s)
-}
-
func (d *DFA) getS0() *DFAState {
return d.s0
}
@@ -146,11 +121,9 @@ func (d *DFA) setS0(s *DFAState) {
d.s0 = s
}
-// sortedStates returns the states in d sorted by their state number, or an empty set if d.states is nil.
+// sortedStates returns the states in d sorted by their state number.
func (d *DFA) sortedStates() []*DFAState {
- if d.states == nil {
- return []*DFAState{}
- }
+
vs := d.states.SortedSlice(func(i, j *DFAState) bool {
return i.stateNumber < j.stateNumber
})
diff --git a/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go
similarity index 97%
rename from vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go
index 0e1100989..84d0a31e5 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go
@@ -10,7 +10,7 @@ import (
"strings"
)
-// DFASerializer is a DFA walker that knows how to dump the DFA states to serialized
+// DFASerializer is a DFA walker that knows how to dump them to serialized
// strings.
type DFASerializer struct {
dfa *DFA
diff --git a/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go
similarity index 81%
rename from vendor/github.com/antlr4-go/antlr/v4/dfa_state.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go
index 654143074..c90dec55c 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go
@@ -22,31 +22,30 @@ func (p *PredPrediction) String() string {
return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")"
}
-// DFAState represents a set of possible [ATN] configurations. As Aho, Sethi,
+// DFAState represents a set of possible ATN configurations. As Aho, Sethi,
// Ullman p. 117 says: "The DFA uses its state to keep track of all possible
// states the ATN can be in after reading each input symbol. That is to say,
-// after reading input a1, a2,..an, the DFA is in a state that represents the
+// after reading input a1a2..an, the DFA is in a state that represents the
// subset T of the states of the ATN that are reachable from the ATN's start
-// state along some path labeled a1a2..an."
-//
-// In conventional NFA-to-DFA conversion, therefore, the subset T would be a bitset representing the set of
-// states the [ATN] could be in. We need to track the alt predicted by each state
+// state along some path labeled a1a2..an." In conventional NFA-to-DFA
+// conversion, therefore, the subset T would be a bitset representing the set of
+// states the ATN could be in. We need to track the alt predicted by each state
// as well, however. More importantly, we need to maintain a stack of states,
// tracking the closure operations as they jump from rule to rule, emulating
// rule invocations (method calls). I have to add a stack to simulate the proper
// lookahead sequences for the underlying LL grammar from which the ATN was
// derived.
//
-// I use a set of [ATNConfig] objects, not simple states. An [ATNConfig] is both a
-// state (ala normal conversion) and a [RuleContext] describing the chain of rules
+// I use a set of ATNConfig objects, not simple states. An ATNConfig is both a
+// state (ala normal conversion) and a RuleContext describing the chain of rules
// (if any) followed to arrive at that state.
//
-// A [DFAState] may have multiple references to a particular state, but with
-// different [ATN] contexts (with same or different alts) meaning that state was
+// A DFAState may have multiple references to a particular state, but with
+// different ATN contexts (with same or different alts) meaning that state was
// reached via a different set of rule invocations.
type DFAState struct {
stateNumber int
- configs *ATNConfigSet
+ configs ATNConfigSet
// edges elements point to the target of the symbol. Shift up by 1 so (-1)
// Token.EOF maps to the first element.
@@ -54,7 +53,7 @@ type DFAState struct {
isAcceptState bool
- // prediction is the 'ttype' we match or alt we predict if the state is 'accept'.
+ // prediction is the ttype we match or alt we predict if the state is accept.
// Set to ATN.INVALID_ALT_NUMBER when predicates != nil or
// requiresFullContext.
prediction int
@@ -82,9 +81,9 @@ type DFAState struct {
predicates []*PredPrediction
}
-func NewDFAState(stateNumber int, configs *ATNConfigSet) *DFAState {
+func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState {
if configs == nil {
- configs = NewATNConfigSet(false)
+ configs = NewBaseATNConfigSet(false)
}
return &DFAState{configs: configs, stateNumber: stateNumber}
@@ -95,7 +94,7 @@ func (d *DFAState) GetAltSet() []int {
var alts []int
if d.configs != nil {
- for _, c := range d.configs.configs {
+ for _, c := range d.configs.GetItems() {
alts = append(alts, c.GetAlt())
}
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go
similarity index 92%
rename from vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go
index bd2cd8bc3..c55bcc19b 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go
@@ -33,7 +33,6 @@ type DiagnosticErrorListener struct {
exactOnly bool
}
-//goland:noinspection GoUnusedExportedFunction
func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener {
n := new(DiagnosticErrorListener)
@@ -43,7 +42,7 @@ func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener {
return n
}
-func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) {
+func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
if d.exactOnly && !exact {
return
}
@@ -56,7 +55,7 @@ func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, s
recognizer.NotifyErrorListeners(msg, nil, nil)
}
-func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, _ *BitSet, _ *ATNConfigSet) {
+func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
msg := "reportAttemptingFullContext d=" +
d.getDecisionDescription(recognizer, dfa) +
@@ -65,7 +64,7 @@ func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser,
recognizer.NotifyErrorListeners(msg, nil, nil)
}
-func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, _ int, _ *ATNConfigSet) {
+func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
msg := "reportContextSensitivity d=" +
d.getDecisionDescription(recognizer, dfa) +
", input='" +
@@ -97,12 +96,12 @@ func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa
// @param configs The conflicting or ambiguous configuration set.
// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise
// returns the set of alternatives represented in {@code configs}.
-func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set *ATNConfigSet) *BitSet {
+func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet {
if ReportedAlts != nil {
return ReportedAlts
}
result := NewBitSet()
- for _, c := range set.configs {
+ for _, c := range set.GetItems() {
result.add(c.GetAlt())
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/error_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go
similarity index 62%
rename from vendor/github.com/antlr4-go/antlr/v4/error_listener.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go
index 21a021643..f679f0dcd 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/error_listener.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go
@@ -16,29 +16,28 @@ import (
type ErrorListener interface {
SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)
- ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet)
- ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet)
- ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet)
+ ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet)
+ ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet)
+ ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet)
}
type DefaultErrorListener struct {
}
-//goland:noinspection GoUnusedExportedFunction
func NewDefaultErrorListener() *DefaultErrorListener {
return new(DefaultErrorListener)
}
-func (d *DefaultErrorListener) SyntaxError(_ Recognizer, _ interface{}, _, _ int, _ string, _ RecognitionException) {
+func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
}
-func (d *DefaultErrorListener) ReportAmbiguity(_ Parser, _ *DFA, _, _ int, _ bool, _ *BitSet, _ *ATNConfigSet) {
+func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
}
-func (d *DefaultErrorListener) ReportAttemptingFullContext(_ Parser, _ *DFA, _, _ int, _ *BitSet, _ *ATNConfigSet) {
+func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
}
-func (d *DefaultErrorListener) ReportContextSensitivity(_ Parser, _ *DFA, _, _, _ int, _ *ATNConfigSet) {
+func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
}
type ConsoleErrorListener struct {
@@ -49,16 +48,21 @@ func NewConsoleErrorListener() *ConsoleErrorListener {
return new(ConsoleErrorListener)
}
-// ConsoleErrorListenerINSTANCE provides a default instance of {@link ConsoleErrorListener}.
+// Provides a default instance of {@link ConsoleErrorListener}.
var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener()
-// SyntaxError prints messages to System.err containing the
-// values of line, charPositionInLine, and msg using
-// the following format:
+// {@inheritDoc}
//
-// line :
-func (c *ConsoleErrorListener) SyntaxError(_ Recognizer, _ interface{}, line, column int, msg string, _ RecognitionException) {
- _, _ = fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg)
+//
+// This implementation prints messages to {@link System//err} containing the
+// values of {@code line}, {@code charPositionInLine}, and {@code msg} using
+// the following format.
+//
+//
+// line line:charPositionInLine msg
+//
+func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
+ fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg)
}
type ProxyErrorListener struct {
@@ -81,19 +85,19 @@ func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol
}
}
-func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) {
+func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
for _, d := range p.delegates {
d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
}
}
-func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet) {
+func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
for _, d := range p.delegates {
d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs)
}
}
-func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet) {
+func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
for _, d := range p.delegates {
d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs)
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go
similarity index 58%
rename from vendor/github.com/antlr4-go/antlr/v4/error_strategy.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go
index 9db2be1c7..5c0a637ba 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go
@@ -21,8 +21,8 @@ type ErrorStrategy interface {
ReportMatch(Parser)
}
-// DefaultErrorStrategy is the default implementation of ANTLRErrorStrategy used for
-// error reporting and recovery in ANTLR parsers.
+// This is the default implementation of {@link ANTLRErrorStrategy} used for
+// error Reporting and recovery in ANTLR parsers.
type DefaultErrorStrategy struct {
errorRecoveryMode bool
lastErrorIndex int
@@ -46,7 +46,7 @@ func NewDefaultErrorStrategy() *DefaultErrorStrategy {
// The index into the input stream where the last error occurred.
// This is used to prevent infinite loops where an error is found
// but no token is consumed during recovery...another error is found,
- // ad nauseam. This is a failsafe mechanism to guarantee that at least
+ // ad nauseum. This is a failsafe mechanism to guarantee that at least
// one token/tree node is consumed for two errors.
//
d.lastErrorIndex = -1
@@ -62,37 +62,50 @@ func (d *DefaultErrorStrategy) reset(recognizer Parser) {
// This method is called to enter error recovery mode when a recognition
// exception is Reported.
-func (d *DefaultErrorStrategy) beginErrorCondition(_ Parser) {
+//
+// @param recognizer the parser instance
+func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) {
d.errorRecoveryMode = true
}
-func (d *DefaultErrorStrategy) InErrorRecoveryMode(_ Parser) bool {
+func (d *DefaultErrorStrategy) InErrorRecoveryMode(recognizer Parser) bool {
return d.errorRecoveryMode
}
// This method is called to leave error recovery mode after recovering from
// a recognition exception.
-func (d *DefaultErrorStrategy) endErrorCondition(_ Parser) {
+//
+// @param recognizer
+func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) {
d.errorRecoveryMode = false
d.lastErrorStates = nil
d.lastErrorIndex = -1
}
-// ReportMatch is the default implementation of error matching and simply calls endErrorCondition.
+// {@inheritDoc}
+//
+// The default implementation simply calls {@link //endErrorCondition}.
func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) {
d.endErrorCondition(recognizer)
}
-// ReportError is the default implementation of error reporting.
-// It returns immediately if the handler is already
-// in error recovery mode. Otherwise, it calls [beginErrorCondition]
-// and dispatches the Reporting task based on the runtime type of e
-// according to the following table.
-//
-// [NoViableAltException] : Dispatches the call to [ReportNoViableAlternative]
-// [InputMisMatchException] : Dispatches the call to [ReportInputMisMatch]
-// [FailedPredicateException] : Dispatches the call to [ReportFailedPredicate]
-// All other types : Calls [NotifyErrorListeners] to Report the exception
+// {@inheritDoc}
+//
+// The default implementation returns immediately if the handler is already
+// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition}
+// and dispatches the Reporting task based on the runtime type of {@code e}
+// according to the following table.
+//
+//
+// - {@link NoViableAltException}: Dispatches the call to
+// {@link //ReportNoViableAlternative}
+// - {@link InputMisMatchException}: Dispatches the call to
+// {@link //ReportInputMisMatch}
+// - {@link FailedPredicateException}: Dispatches the call to
+// {@link //ReportFailedPredicate}
+// - All other types: calls {@link Parser//NotifyErrorListeners} to Report
+// the exception
+//
func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) {
// if we've already Reported an error and have not Matched a token
// yet successfully, don't Report any errors.
@@ -115,10 +128,12 @@ func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionExcep
}
}
-// Recover is the default recovery implementation.
-// It reSynchronizes the parser by consuming tokens until we find one in the reSynchronization set -
-// loosely the set of tokens that can follow the current rule.
-func (d *DefaultErrorStrategy) Recover(recognizer Parser, _ RecognitionException) {
+// {@inheritDoc}
+//
+// The default implementation reSynchronizes the parser by consuming tokens
+// until we find one in the reSynchronization set--loosely the set of tokens
+// that can follow the current rule.
+func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
if d.lastErrorIndex == recognizer.GetInputStream().Index() &&
d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) {
@@ -133,58 +148,54 @@ func (d *DefaultErrorStrategy) Recover(recognizer Parser, _ RecognitionException
d.lastErrorStates = NewIntervalSet()
}
d.lastErrorStates.addOne(recognizer.GetState())
- followSet := d.GetErrorRecoverySet(recognizer)
+ followSet := d.getErrorRecoverySet(recognizer)
d.consumeUntil(recognizer, followSet)
}
-// Sync is the default implementation of error strategy synchronization.
-//
-// This Sync makes sure that the current lookahead symbol is consistent with what were expecting
-// at this point in the [ATN]. You can call this anytime but ANTLR only
-// generates code to check before sub-rules/loops and each iteration.
+// The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure
+// that the current lookahead symbol is consistent with what were expecting
+// at d point in the ATN. You can call d anytime but ANTLR only
+// generates code to check before subrules/loops and each iteration.
//
-// Implements [Jim Idle]'s magic Sync mechanism in closures and optional
-// sub-rules. E.g.:
+// Implements Jim Idle's magic Sync mechanism in closures and optional
+// subrules. E.g.,
//
-// a : Sync ( stuff Sync )*
-// Sync : {consume to what can follow Sync}
+//
+// a : Sync ( stuff Sync )*
+// Sync : {consume to what can follow Sync}
+//
//
-// At the start of a sub-rule upon error, Sync performs single
+// At the start of a sub rule upon error, {@link //Sync} performs single
// token deletion, if possible. If it can't do that, it bails on the current
// rule and uses the default error recovery, which consumes until the
// reSynchronization set of the current rule.
//
-// If the sub-rule is optional
-//
-// ({@code (...)?}, {@code (...)*},
+// If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block
+// with an empty alternative), then the expected set includes what follows
+// the subrule.
//
-// or a block with an empty alternative), then the expected set includes what follows
-// the sub-rule.
+// During loop iteration, it consumes until it sees a token that can start a
+// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to
+// stay in the loop as long as possible.
//
-// During loop iteration, it consumes until it sees a token that can start a
-// sub-rule or what follows loop. Yes, that is pretty aggressive. We opt to
-// stay in the loop as long as possible.
+// ORIGINS
//
-// # Origins
-//
-// Previous versions of ANTLR did a poor job of their recovery within loops.
+// Previous versions of ANTLR did a poor job of their recovery within loops.
// A single mismatch token or missing token would force the parser to bail
-// out of the entire rules surrounding the loop. So, for rule:
+// out of the entire rules surrounding the loop. So, for rule
//
-// classfunc : 'class' ID '{' member* '}'
+//
+// classfunc : 'class' ID '{' member* '}'
+//
//
// input with an extra token between members would force the parser to
// consume until it found the next class definition rather than the next
// member definition of the current class.
//
-// This functionality cost a bit of effort because the parser has to
-// compare the token set at the start of the loop and at each iteration. If for
-// some reason speed is suffering for you, you can turn off this
-// functionality by simply overriding this method as empty:
-//
-// { }
-//
-// [Jim Idle]: https://github.com/jimidle
+// This functionality cost a little bit of effort because the parser has to
+// compare token set at the start of the loop and at each iteration. If for
+// some reason speed is suffering for you, you can turn off d
+// functionality by simply overriding d method as a blank { }.
func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
// If already recovering, don't try to Sync
if d.InErrorRecoveryMode(recognizer) {
@@ -206,21 +217,25 @@ func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
if d.SingleTokenDeletion(recognizer) != nil {
return
}
- recognizer.SetError(NewInputMisMatchException(recognizer))
+ panic(NewInputMisMatchException(recognizer))
case ATNStatePlusLoopBack, ATNStateStarLoopBack:
d.ReportUnwantedToken(recognizer)
expecting := NewIntervalSet()
expecting.addSet(recognizer.GetExpectedTokens())
- whatFollowsLoopIterationOrRule := expecting.addSet(d.GetErrorRecoverySet(recognizer))
+ whatFollowsLoopIterationOrRule := expecting.addSet(d.getErrorRecoverySet(recognizer))
d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule)
default:
// do nothing if we can't identify the exact kind of ATN state
}
}
-// ReportNoViableAlternative is called by [ReportError] when the exception is a [NoViableAltException].
+// This is called by {@link //ReportError} when the exception is a
+// {@link NoViableAltException}.
+//
+// @see //ReportError
//
-// See also [ReportError]
+// @param recognizer the parser instance
+// @param e the recognition exception
func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) {
tokens := recognizer.GetTokenStream()
var input string
@@ -237,38 +252,48 @@ func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *N
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
}
-// ReportInputMisMatch is called by [ReportError] when the exception is an [InputMisMatchException]
+// This is called by {@link //ReportError} when the exception is an
+// {@link InputMisMatchException}.
//
-// See also: [ReportError]
-func (d *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) {
- msg := "mismatched input " + d.GetTokenErrorDisplay(e.offendingToken) +
+// @see //ReportError
+//
+// @param recognizer the parser instance
+// @param e the recognition exception
+func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) {
+ msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) +
" expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
}
-// ReportFailedPredicate is called by [ReportError] when the exception is a [FailedPredicateException].
+// This is called by {@link //ReportError} when the exception is a
+// {@link FailedPredicateException}.
+//
+// @see //ReportError
//
-// See also: [ReportError]
+// @param recognizer the parser instance
+// @param e the recognition exception
func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) {
ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()]
msg := "rule " + ruleName + " " + e.message
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
}
-// ReportUnwantedToken is called to report a syntax error that requires the removal
+// This method is called to Report a syntax error which requires the removal
// of a token from the input stream. At the time d method is called, the
-// erroneous symbol is the current LT(1) symbol and has not yet been
-// removed from the input stream. When this method returns,
-// recognizer is in error recovery mode.
+// erroneous symbol is current {@code LT(1)} symbol and has not yet been
+// removed from the input stream. When d method returns,
+// {@code recognizer} is in error recovery mode.
//
-// This method is called when singleTokenDeletion identifies
+// This method is called when {@link //singleTokenDeletion} identifies
// single-token deletion as a viable recovery strategy for a mismatched
-// input error.
+// input error.
//
-// The default implementation simply returns if the handler is already in
-// error recovery mode. Otherwise, it calls beginErrorCondition to
+// The default implementation simply returns if the handler is already in
+// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
// enter error recovery mode, followed by calling
-// [NotifyErrorListeners]
+// {@link Parser//NotifyErrorListeners}.
+//
+// @param recognizer the parser instance
func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
if d.InErrorRecoveryMode(recognizer) {
return
@@ -282,18 +307,21 @@ func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
recognizer.NotifyErrorListeners(msg, t, nil)
}
-// ReportMissingToken is called to report a syntax error which requires the
-// insertion of a missing token into the input stream. At the time this
-// method is called, the missing token has not yet been inserted. When this
-// method returns, recognizer is in error recovery mode.
+// This method is called to Report a syntax error which requires the
+// insertion of a missing token into the input stream. At the time d
+// method is called, the missing token has not yet been inserted. When d
+// method returns, {@code recognizer} is in error recovery mode.
//
-// This method is called when singleTokenInsertion identifies
+// This method is called when {@link //singleTokenInsertion} identifies
// single-token insertion as a viable recovery strategy for a mismatched
-// input error.
+// input error.
//
-// The default implementation simply returns if the handler is already in
-// error recovery mode. Otherwise, it calls beginErrorCondition to
-// enter error recovery mode, followed by calling [NotifyErrorListeners]
+// The default implementation simply returns if the handler is already in
+// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
+// enter error recovery mode, followed by calling
+// {@link Parser//NotifyErrorListeners}.
+//
+// @param recognizer the parser instance
func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
if d.InErrorRecoveryMode(recognizer) {
return
@@ -306,48 +334,54 @@ func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
recognizer.NotifyErrorListeners(msg, t, nil)
}
-// The RecoverInline default implementation attempts to recover from the mismatched input
+// The default implementation attempts to recover from the mismatched input
// by using single token insertion and deletion as described below. If the
-// recovery attempt fails, this method panics with [InputMisMatchException}.
-// TODO: Not sure that panic() is the right thing to do here - JI
+// recovery attempt fails, d method panics an
+// {@link InputMisMatchException}.
//
-// # EXTRA TOKEN (single token deletion)
+// EXTRA TOKEN (single token deletion)
//
-// LA(1) is not what we are looking for. If LA(2) has the
-// right token, however, then assume LA(1) is some extra spurious
+// {@code LA(1)} is not what we are looking for. If {@code LA(2)} has the
+// right token, however, then assume {@code LA(1)} is some extra spurious
// token and delete it. Then consume and return the next token (which was
-// the LA(2) token) as the successful result of the Match operation.
+// the {@code LA(2)} token) as the successful result of the Match operation.
//
-// # This recovery strategy is implemented by singleTokenDeletion
+// This recovery strategy is implemented by {@link
+// //singleTokenDeletion}.
//
-// # MISSING TOKEN (single token insertion)
+// MISSING TOKEN (single token insertion)
//
-// If current token -at LA(1) - is consistent with what could come
-// after the expected LA(1) token, then assume the token is missing
-// and use the parser's [TokenFactory] to create it on the fly. The
-// “insertion” is performed by returning the created token as the successful
-// result of the Match operation.
+// If current token (at {@code LA(1)}) is consistent with what could come
+// after the expected {@code LA(1)} token, then assume the token is missing
+// and use the parser's {@link TokenFactory} to create it on the fly. The
+// "insertion" is performed by returning the created token as the successful
+// result of the Match operation.
//
-// This recovery strategy is implemented by [SingleTokenInsertion].
+// This recovery strategy is implemented by {@link
+// //singleTokenInsertion}.
//
-// # Example
+// EXAMPLE
//
-// For example, Input i=(3 is clearly missing the ')'. When
-// the parser returns from the nested call to expr, it will have
-// call the chain:
+// For example, Input {@code i=(3} is clearly missing the {@code ')'}. When
+// the parser returns from the nested call to {@code expr}, it will have
+// call chain:
//
-// stat → expr → atom
+//
+// stat &rarr expr &rarr atom
+//
//
-// and it will be trying to Match the ')' at this point in the
+// and it will be trying to Match the {@code ')'} at d point in the
// derivation:
//
-// : ID '=' '(' INT ')' ('+' atom)* ';'
-// ^
+//
+// => ID '=' '(' INT ')' ('+' atom)* ”
+// ^
+//
//
-// The attempt to [Match] ')' will fail when it sees ';' and
-// call [RecoverInline]. To recover, it sees that LA(1)==';'
-// is in the set of tokens that can follow the ')' token reference
-// in rule atom. It can assume that you forgot the ')'.
+// The attempt to Match {@code ')'} will fail when it sees {@code ”} and
+// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==”}
+// is in the set of tokens that can follow the {@code ')'} token reference
+// in rule {@code atom}. It can assume that you forgot the {@code ')'}.
func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
// SINGLE TOKEN DELETION
MatchedSymbol := d.SingleTokenDeletion(recognizer)
@@ -362,24 +396,24 @@ func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
return d.GetMissingSymbol(recognizer)
}
// even that didn't work must panic the exception
- recognizer.SetError(NewInputMisMatchException(recognizer))
- return nil
+ panic(NewInputMisMatchException(recognizer))
}
-// SingleTokenInsertion implements the single-token insertion inline error recovery
-// strategy. It is called by [RecoverInline] if the single-token
+// This method implements the single-token insertion inline error recovery
+// strategy. It is called by {@link //recoverInline} if the single-token
// deletion strategy fails to recover from the mismatched input. If this
// method returns {@code true}, {@code recognizer} will be in error recovery
// mode.
//
-// This method determines whether single-token insertion is viable by
-// checking if the LA(1) input symbol could be successfully Matched
-// if it were instead the LA(2) symbol. If this method returns
+// This method determines whether or not single-token insertion is viable by
+// checking if the {@code LA(1)} input symbol could be successfully Matched
+// if it were instead the {@code LA(2)} symbol. If d method returns
// {@code true}, the caller is responsible for creating and inserting a
-// token with the correct type to produce this behavior.
+// token with the correct type to produce d behavior.
//
-// This func returns true if single-token insertion is a viable recovery
-// strategy for the current mismatched input.
+// @param recognizer the parser instance
+// @return {@code true} if single-token insertion is a viable recovery
+// strategy for the current mismatched input, otherwise {@code false}
func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
currentSymbolType := recognizer.GetTokenStream().LA(1)
// if current token is consistent with what could come after current
@@ -397,21 +431,23 @@ func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
return false
}
-// SingleTokenDeletion implements the single-token deletion inline error recovery
-// strategy. It is called by [RecoverInline] to attempt to recover
+// This method implements the single-token deletion inline error recovery
+// strategy. It is called by {@link //recoverInline} to attempt to recover
// from mismatched input. If this method returns nil, the parser and error
// handler state will not have changed. If this method returns non-nil,
-// recognizer will not be in error recovery mode since the
+// {@code recognizer} will not be in error recovery mode since the
// returned token was a successful Match.
//
-// If the single-token deletion is successful, this method calls
-// [ReportUnwantedToken] to Report the error, followed by
-// [Consume] to actually “delete” the extraneous token. Then,
-// before returning, [ReportMatch] is called to signal a successful
-// Match.
+// If the single-token deletion is successful, d method calls
+// {@link //ReportUnwantedToken} to Report the error, followed by
+// {@link Parser//consume} to actually "delete" the extraneous token. Then,
+// before returning {@link //ReportMatch} is called to signal a successful
+// Match.
//
-// The func returns the successfully Matched [Token] instance if single-token
-// deletion successfully recovers from the mismatched input, otherwise nil.
+// @param recognizer the parser instance
+// @return the successfully Matched {@link Token} instance if single-token
+// deletion successfully recovers from the mismatched input, otherwise
+// {@code nil}
func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
NextTokenType := recognizer.GetTokenStream().LA(2)
expecting := d.GetExpectedTokens(recognizer)
@@ -431,28 +467,24 @@ func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
return nil
}
-// GetMissingSymbol conjures up a missing token during error recovery.
+// Conjure up a missing token during error recovery.
//
// The recognizer attempts to recover from single missing
// symbols. But, actions might refer to that missing symbol.
-// For example:
-//
-// x=ID {f($x)}.
-//
-// The action clearly assumes
+// For example, x=ID {f($x)}. The action clearly assumes
// that there has been an identifier Matched previously and that
// $x points at that token. If that token is missing, but
// the next token in the stream is what we want we assume that
-// this token is missing, and we keep going. Because we
+// d token is missing and we keep going. Because we
// have to return some token to replace the missing token,
// we have to conjure one up. This method gives the user control
// over the tokens returned for missing tokens. Mostly,
// you will want to create something special for identifier
// tokens. For literals such as '{' and ',', the default
// action in the parser or tree parser works. It simply creates
-// a [CommonToken] of the appropriate type. The text will be the token name.
-// If you need to change which tokens must be created by the lexer,
-// override this method to create the appropriate tokens.
+// a CommonToken of the appropriate type. The text will be the token.
+// If you change what tokens must be created by the lexer,
+// override d method to create the appropriate tokens.
func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token {
currentSymbol := recognizer.GetCurrentToken()
expecting := d.GetExpectedTokens(recognizer)
@@ -466,7 +498,7 @@ func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token {
if expectedTokenType > 0 && expectedTokenType < len(ln) {
tokenText = ""
} else {
- tokenText = "" // TODO: matches the JS impl
+ tokenText = "" // TODO matches the JS impl
}
}
current := currentSymbol
@@ -484,13 +516,13 @@ func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet
return recognizer.GetExpectedTokens()
}
-// GetTokenErrorDisplay determines how a token should be displayed in an error message.
-// The default is to display just the text, but during development you might
-// want to have a lot of information spit out. Override this func in that case
-// to use t.String() (which, for [CommonToken], dumps everything about
+// How should a token be displayed in an error message? The default
+// is to display just the text, but during development you might
+// want to have a lot of information spit out. Override in that case
+// to use t.String() (which, for CommonToken, dumps everything about
// the token). This is better than forcing you to override a method in
// your token objects because you don't have to go modify your lexer
-// so that it creates a new type.
+// so that it creates a NewJava type.
func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string {
if t == nil {
return ""
@@ -513,57 +545,52 @@ func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
return "'" + s + "'"
}
-// GetErrorRecoverySet computes the error recovery set for the current rule. During
+// Compute the error recovery set for the current rule. During
// rule invocation, the parser pushes the set of tokens that can
-// follow that rule reference on the stack. This amounts to
+// follow that rule reference on the stack d amounts to
// computing FIRST of what follows the rule reference in the
// enclosing rule. See LinearApproximator.FIRST().
-//
// This local follow set only includes tokens
// from within the rule i.e., the FIRST computation done by
// ANTLR stops at the end of a rule.
//
-// # Example
+// # EXAMPLE
//
// When you find a "no viable alt exception", the input is not
// consistent with any of the alternatives for rule r. The best
// thing to do is to consume tokens until you see something that
-// can legally follow a call to r or any rule that called r.
+// can legally follow a call to r//or* any rule that called r.
// You don't want the exact set of viable next tokens because the
// input might just be missing a token--you might consume the
// rest of the input looking for one of the missing tokens.
//
-// Consider the grammar:
-//
-// a : '[' b ']'
-// | '(' b ')'
-// ;
+// Consider grammar:
//
-// b : c '^' INT
-// ;
+// a : '[' b ']'
+// | '(' b ')'
//
-// c : ID
-// | INT
-// ;
+// b : c '^' INT
+// c : ID
+// | INT
//
// At each rule invocation, the set of tokens that could follow
// that rule is pushed on a stack. Here are the various
// context-sensitive follow sets:
//
-// FOLLOW(b1_in_a) = FIRST(']') = ']'
-// FOLLOW(b2_in_a) = FIRST(')') = ')'
-// FOLLOW(c_in_b) = FIRST('^') = '^'
+// FOLLOW(b1_in_a) = FIRST(']') = ']'
+// FOLLOW(b2_in_a) = FIRST(')') = ')'
+// FOLLOW(c_in_b) = FIRST('^') = '^'
//
-// Upon erroneous input “[]”, the call chain is
+// Upon erroneous input "[]", the call chain is
//
-// a → b → c
+// a -> b -> c
//
// and, hence, the follow context stack is:
//
-// Depth Follow set Start of rule execution
-// 0 a (from main())
-// 1 ']' b
-// 2 '^' c
+// depth follow set start of rule execution
+// 0 a (from main())
+// 1 ']' b
+// 2 '^' c
//
// Notice that ')' is not included, because b would have to have
// been called from a different context in rule a for ')' to be
@@ -571,14 +598,11 @@ func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
//
// For error recovery, we cannot consider FOLLOW(c)
// (context-sensitive or otherwise). We need the combined set of
-// all context-sensitive FOLLOW sets - the set of all tokens that
+// all context-sensitive FOLLOW sets--the set of all tokens that
// could follow any reference in the call chain. We need to
// reSync to one of those tokens. Note that FOLLOW(c)='^' and if
// we reSync'd to that token, we'd consume until EOF. We need to
-// Sync to context-sensitive FOLLOWs for a, b, and c:
-//
-// {']','^'}
-//
+// Sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
// In this case, for input "[]", LA(1) is ']' and in the set, so we would
// not consume anything. After printing an error, rule c would
// return normally. Rule b would not find the required '^' though.
@@ -596,19 +620,22 @@ func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
//
// ANTLR's error recovery mechanism is based upon original ideas:
//
-// [Algorithms + Data Structures = Programs] by Niklaus Wirth and
-// [A note on error recovery in recursive descent parsers].
+// "Algorithms + Data Structures = Programs" by Niklaus Wirth
+//
+// and
+//
+// "A note on error recovery in recursive descent parsers":
+// http://portal.acm.org/citation.cfm?id=947902.947905
//
-// Later, Josef Grosch had some good ideas in [Efficient and Comfortable Error Recovery in Recursive Descent
-// Parsers]
+// Later, Josef Grosch had some good ideas:
//
-// Like Grosch I implement context-sensitive FOLLOW sets that are combined at run-time upon error to avoid overhead
-// during parsing. Later, the runtime Sync was improved for loops/sub-rules see [Sync] docs
+// "Efficient and Comfortable Error Recovery in Recursive Descent
+// Parsers":
+// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
//
-// [A note on error recovery in recursive descent parsers]: http://portal.acm.org/citation.cfm?id=947902.947905
-// [Algorithms + Data Structures = Programs]: https://t.ly/5QzgE
-// [Efficient and Comfortable Error Recovery in Recursive Descent Parsers]: ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
-func (d *DefaultErrorStrategy) GetErrorRecoverySet(recognizer Parser) *IntervalSet {
+// Like Grosch I implement context-sensitive FOLLOW sets that are combined
+// at run-time upon error to avoid overhead during parsing.
+func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet {
atn := recognizer.GetInterpreter().atn
ctx := recognizer.GetParserRuleContext()
recoverSet := NewIntervalSet()
@@ -633,36 +660,40 @@ func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet)
}
}
-// The BailErrorStrategy implementation of ANTLRErrorStrategy responds to syntax errors
+//
+// This implementation of {@link ANTLRErrorStrategy} responds to syntax errors
// by immediately canceling the parse operation with a
-// [ParseCancellationException]. The implementation ensures that the
-// [ParserRuleContext//exception] field is set for all parse tree nodes
+// {@link ParseCancellationException}. The implementation ensures that the
+// {@link ParserRuleContext//exception} field is set for all parse tree nodes
// that were not completed prior to encountering the error.
//
-// This error strategy is useful in the following scenarios.
-//
-// - Two-stage parsing: This error strategy allows the first
-// stage of two-stage parsing to immediately terminate if an error is
-// encountered, and immediately fall back to the second stage. In addition to
-// avoiding wasted work by attempting to recover from errors here, the empty
-// implementation of [BailErrorStrategy.Sync] improves the performance of
-// the first stage.
+//
+// This error strategy is useful in the following scenarios.
//
-// - Silent validation: When syntax errors are not being
-// Reported or logged, and the parse result is simply ignored if errors occur,
-// the [BailErrorStrategy] avoids wasting work on recovering from errors
-// when the result will be ignored either way.
+//
+// - Two-stage parsing: This error strategy allows the first
+// stage of two-stage parsing to immediately terminate if an error is
+// encountered, and immediately fall back to the second stage. In addition to
+// avoiding wasted work by attempting to recover from errors here, the empty
+// implementation of {@link BailErrorStrategy//Sync} improves the performance of
+// the first stage.
+// - Silent validation: When syntax errors are not being
+// Reported or logged, and the parse result is simply ignored if errors occur,
+// the {@link BailErrorStrategy} avoids wasting work on recovering from errors
+// when the result will be ignored either way.
+//
//
-// myparser.SetErrorHandler(NewBailErrorStrategy())
+//
+// {@code myparser.setErrorHandler(NewBailErrorStrategy())}
//
-// See also: [Parser.SetErrorHandler(ANTLRErrorStrategy)]
+// @see Parser//setErrorHandler(ANTLRErrorStrategy)
+
type BailErrorStrategy struct {
*DefaultErrorStrategy
}
var _ ErrorStrategy = &BailErrorStrategy{}
-//goland:noinspection GoUnusedExportedFunction
func NewBailErrorStrategy() *BailErrorStrategy {
b := new(BailErrorStrategy)
@@ -672,10 +703,10 @@ func NewBailErrorStrategy() *BailErrorStrategy {
return b
}
-// Recover Instead of recovering from exception e, re-panic it wrapped
-// in a [ParseCancellationException] so it is not caught by the
-// rule func catches. Use Exception.GetCause() to get the
-// original [RecognitionException].
+// Instead of recovering from exception {@code e}, re-panic it wrapped
+// in a {@link ParseCancellationException} so it is not caught by the
+// rule func catches. Use {@link Exception//getCause()} to get the
+// original {@link RecognitionException}.
func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
context := recognizer.GetParserRuleContext()
for context != nil {
@@ -686,10 +717,10 @@ func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
context = nil
}
}
- recognizer.SetError(NewParseCancellationException()) // TODO: we don't emit e properly
+ panic(NewParseCancellationException()) // TODO we don't emit e properly
}
-// RecoverInline makes sure we don't attempt to recover inline if the parser
+// Make sure we don't attempt to recover inline if the parser
// successfully recovers, it won't panic an exception.
func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token {
b.Recover(recognizer, NewInputMisMatchException(recognizer))
@@ -697,6 +728,7 @@ func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token {
return nil
}
-// Sync makes sure we don't attempt to recover from problems in sub-rules.
-func (b *BailErrorStrategy) Sync(_ Parser) {
+// Make sure we don't attempt to recover from problems in subrules.//
+func (b *BailErrorStrategy) Sync(recognizer Parser) {
+ // pass
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/errors.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go
similarity index 73%
rename from vendor/github.com/antlr4-go/antlr/v4/errors.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go
index 8f0f2f601..3954c1378 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/errors.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go
@@ -35,7 +35,7 @@ func NewBaseRecognitionException(message string, recognizer Recognizer, input In
// } else {
// stack := NewError().stack
// }
- // TODO: may be able to use - "runtime" func Stack(buf []byte, all bool) int
+ // TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int
t := new(BaseRecognitionException)
@@ -43,17 +43,15 @@ func NewBaseRecognitionException(message string, recognizer Recognizer, input In
t.recognizer = recognizer
t.input = input
t.ctx = ctx
-
- // The current Token when an error occurred. Since not all streams
+ // The current {@link Token} when an error occurred. Since not all streams
// support accessing symbols by index, we have to track the {@link Token}
// instance itself.
- //
t.offendingToken = nil
-
// Get the ATN state number the parser was in at the time the error
- // occurred. For NoViableAltException and LexerNoViableAltException exceptions, this is the
- // DecisionState number. For others, it is the state whose outgoing edge we couldn't Match.
- //
+ // occurred. For {@link NoViableAltException} and
+ // {@link LexerNoViableAltException} exceptions, this is the
+ // {@link DecisionState} number. For others, it is the state whose outgoing
+ // edge we couldn't Match.
t.offendingState = -1
if t.recognizer != nil {
t.offendingState = t.recognizer.GetState()
@@ -76,15 +74,15 @@ func (b *BaseRecognitionException) GetInputStream() IntStream {
// If the state number is not known, b method returns -1.
-// getExpectedTokens gets the set of input symbols which could potentially follow the
-// previously Matched symbol at the time this exception was raised.
+// Gets the set of input symbols which could potentially follow the
+// previously Matched symbol at the time b exception was panicn.
//
-// If the set of expected tokens is not known and could not be computed,
-// this method returns nil.
+// If the set of expected tokens is not known and could not be computed,
+// b method returns {@code nil}.
//
-// The func returns the set of token types that could potentially follow the current
-// state in the {ATN}, or nil if the information is not available.
-
+// @return The set of token types that could potentially follow the current
+// state in the ATN, or {@code nil} if the information is not available.
+// /
func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet {
if b.recognizer != nil {
return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx)
@@ -101,10 +99,10 @@ type LexerNoViableAltException struct {
*BaseRecognitionException
startIndex int
- deadEndConfigs *ATNConfigSet
+ deadEndConfigs ATNConfigSet
}
-func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs *ATNConfigSet) *LexerNoViableAltException {
+func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException {
l := new(LexerNoViableAltException)
@@ -130,16 +128,14 @@ type NoViableAltException struct {
startToken Token
offendingToken Token
ctx ParserRuleContext
- deadEndConfigs *ATNConfigSet
+ deadEndConfigs ATNConfigSet
}
-// NewNoViableAltException creates an exception indicating that the parser could not decide which of two or more paths
+// Indicates that the parser could not decide which of two or more paths
// to take based upon the remaining input. It tracks the starting token
// of the offending input and also knows where the parser was
-// in the various paths when the error.
-//
-// Reported by [ReportNoViableAlternative]
-func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs *ATNConfigSet, ctx ParserRuleContext) *NoViableAltException {
+// in the various paths when the error. Reported by ReportNoViableAlternative()
+func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException {
if ctx == nil {
ctx = recognizer.GetParserRuleContext()
@@ -161,14 +157,12 @@ func NewNoViableAltException(recognizer Parser, input TokenStream, startToken To
n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx)
// Which configurations did we try at input.Index() that couldn't Match
- // input.LT(1)
+ // input.LT(1)?//
n.deadEndConfigs = deadEndConfigs
-
// The token object at the start index the input stream might
- // not be buffering tokens so get a reference to it.
- //
- // At the time the error occurred, of course the stream needs to keep a
- // buffer of all the tokens, but later we might not have access to those.
+ // not be buffering tokens so get a reference to it. (At the
+ // time the error occurred, of course the stream needs to keep a
+ // buffer all of the tokens but later we might not have access to those.)
n.startToken = startToken
n.offendingToken = offendingToken
@@ -179,7 +173,7 @@ type InputMisMatchException struct {
*BaseRecognitionException
}
-// NewInputMisMatchException creates an exception that signifies any kind of mismatched input exceptions such as
+// This signifies any kind of mismatched input exceptions such as
// when the current input does not Match the expected token.
func NewInputMisMatchException(recognizer Parser) *InputMisMatchException {
@@ -192,10 +186,11 @@ func NewInputMisMatchException(recognizer Parser) *InputMisMatchException {
}
-// FailedPredicateException indicates that a semantic predicate failed during validation. Validation of predicates
+// A semantic predicate failed during validation. Validation of predicates
// occurs when normally parsing the alternative just like Matching a token.
// Disambiguating predicate evaluation occurs when we test a predicate during
// prediction.
+
type FailedPredicateException struct {
*BaseRecognitionException
@@ -204,7 +199,6 @@ type FailedPredicateException struct {
predicate string
}
-//goland:noinspection GoUnusedExportedFunction
func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException {
f := new(FailedPredicateException)
@@ -237,21 +231,6 @@ func (f *FailedPredicateException) formatMessage(predicate, message string) stri
type ParseCancellationException struct {
}
-func (p ParseCancellationException) GetOffendingToken() Token {
- //TODO implement me
- panic("implement me")
-}
-
-func (p ParseCancellationException) GetMessage() string {
- //TODO implement me
- panic("implement me")
-}
-
-func (p ParseCancellationException) GetInputStream() IntStream {
- //TODO implement me
- panic("implement me")
-}
-
func NewParseCancellationException() *ParseCancellationException {
// Error.call(this)
// Error.captureStackTrace(this, ParseCancellationException)
diff --git a/vendor/github.com/antlr4-go/antlr/v4/file_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go
similarity index 52%
rename from vendor/github.com/antlr4-go/antlr/v4/file_stream.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go
index 5f65f809b..bd6ad5efe 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/file_stream.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go
@@ -5,7 +5,8 @@
package antlr
import (
- "bufio"
+ "bytes"
+ "io"
"os"
)
@@ -13,53 +14,34 @@ import (
// when you construct the object.
type FileStream struct {
- InputStream
+ *InputStream
+
filename string
}
-//goland:noinspection GoUnusedExportedFunction
func NewFileStream(fileName string) (*FileStream, error) {
+ buf := bytes.NewBuffer(nil)
+
f, err := os.Open(fileName)
if err != nil {
return nil, err
}
-
- defer func(f *os.File) {
- errF := f.Close()
- if errF != nil {
- }
- }(f)
-
- reader := bufio.NewReader(f)
- fInfo, err := f.Stat()
+ defer f.Close()
+ _, err = io.Copy(buf, f)
if err != nil {
return nil, err
}
- fs := &FileStream{
- InputStream: InputStream{
- index: 0,
- name: fileName,
- },
- filename: fileName,
- }
+ fs := new(FileStream)
- // Pre-build the buffer and read runes efficiently
- //
- fs.data = make([]rune, 0, fInfo.Size())
- for {
- r, _, err := reader.ReadRune()
- if err != nil {
- break
- }
- fs.data = append(fs.data, r)
- }
- fs.size = len(fs.data) // Size in runes
+ fs.filename = fileName
+ s := string(buf.Bytes())
+
+ fs.InputStream = NewInputStream(s)
- // All done.
- //
return fs, nil
+
}
func (f *FileStream) GetSourceName() string {
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go
new file mode 100644
index 000000000..a8b889ced
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go
@@ -0,0 +1,113 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type InputStream struct {
+ name string
+ index int
+ data []rune
+ size int
+}
+
+func NewInputStream(data string) *InputStream {
+
+ is := new(InputStream)
+
+ is.name = ""
+ is.index = 0
+ is.data = []rune(data)
+ is.size = len(is.data) // number of runes
+
+ return is
+}
+
+func (is *InputStream) reset() {
+ is.index = 0
+}
+
+func (is *InputStream) Consume() {
+ if is.index >= is.size {
+ // assert is.LA(1) == TokenEOF
+ panic("cannot consume EOF")
+ }
+ is.index++
+}
+
+func (is *InputStream) LA(offset int) int {
+
+ if offset == 0 {
+ return 0 // nil
+ }
+ if offset < 0 {
+ offset++ // e.g., translate LA(-1) to use offset=0
+ }
+ pos := is.index + offset - 1
+
+ if pos < 0 || pos >= is.size { // invalid
+ return TokenEOF
+ }
+
+ return int(is.data[pos])
+}
+
+func (is *InputStream) LT(offset int) int {
+ return is.LA(offset)
+}
+
+func (is *InputStream) Index() int {
+ return is.index
+}
+
+func (is *InputStream) Size() int {
+ return is.size
+}
+
+// mark/release do nothing we have entire buffer
+func (is *InputStream) Mark() int {
+ return -1
+}
+
+func (is *InputStream) Release(marker int) {
+}
+
+func (is *InputStream) Seek(index int) {
+ if index <= is.index {
+ is.index = index // just jump don't update stream state (line,...)
+ return
+ }
+ // seek forward
+ is.index = intMin(index, is.size)
+}
+
+func (is *InputStream) GetText(start int, stop int) string {
+ if stop >= is.size {
+ stop = is.size - 1
+ }
+ if start >= is.size {
+ return ""
+ }
+
+ return string(is.data[start : stop+1])
+}
+
+func (is *InputStream) GetTextFromTokens(start, stop Token) string {
+ if start != nil && stop != nil {
+ return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex()))
+ }
+
+ return ""
+}
+
+func (is *InputStream) GetTextFromInterval(i *Interval) string {
+ return is.GetText(i.Start, i.Stop)
+}
+
+func (*InputStream) GetSourceName() string {
+ return "Obtained from string"
+}
+
+func (is *InputStream) String() string {
+ return string(is.data)
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/int_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go
similarity index 100%
rename from vendor/github.com/antlr4-go/antlr/v4/int_stream.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go
diff --git a/vendor/github.com/antlr4-go/antlr/v4/interval_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go
similarity index 82%
rename from vendor/github.com/antlr4-go/antlr/v4/interval_set.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go
index cc5066067..c1e155e81 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/interval_set.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go
@@ -14,21 +14,20 @@ type Interval struct {
Stop int
}
-// NewInterval creates a new interval with the given start and stop values.
-func NewInterval(start, stop int) Interval {
- return Interval{
- Start: start,
- Stop: stop,
- }
+/* stop is not included! */
+func NewInterval(start, stop int) *Interval {
+ i := new(Interval)
+
+ i.Start = start
+ i.Stop = stop
+ return i
}
-// Contains returns true if the given item is contained within the interval.
-func (i Interval) Contains(item int) bool {
+func (i *Interval) Contains(item int) bool {
return item >= i.Start && item < i.Stop
}
-// String generates a string representation of the interval.
-func (i Interval) String() string {
+func (i *Interval) String() string {
if i.Start == i.Stop-1 {
return strconv.Itoa(i.Start)
}
@@ -36,18 +35,15 @@ func (i Interval) String() string {
return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1)
}
-// Length returns the length of the interval.
-func (i Interval) Length() int {
+func (i *Interval) length() int {
return i.Stop - i.Start
}
-// IntervalSet represents a collection of [Intervals], which may be read-only.
type IntervalSet struct {
- intervals []Interval
+ intervals []*Interval
readOnly bool
}
-// NewIntervalSet creates a new empty, writable, interval set.
func NewIntervalSet() *IntervalSet {
i := new(IntervalSet)
@@ -58,20 +54,6 @@ func NewIntervalSet() *IntervalSet {
return i
}
-func (i *IntervalSet) Equals(other *IntervalSet) bool {
- if len(i.intervals) != len(other.intervals) {
- return false
- }
-
- for k, v := range i.intervals {
- if v.Start != other.intervals[k].Start || v.Stop != other.intervals[k].Stop {
- return false
- }
- }
-
- return true
-}
-
func (i *IntervalSet) first() int {
if len(i.intervals) == 0 {
return TokenInvalidType
@@ -88,16 +70,16 @@ func (i *IntervalSet) addRange(l, h int) {
i.addInterval(NewInterval(l, h+1))
}
-func (i *IntervalSet) addInterval(v Interval) {
+func (i *IntervalSet) addInterval(v *Interval) {
if i.intervals == nil {
- i.intervals = make([]Interval, 0)
+ i.intervals = make([]*Interval, 0)
i.intervals = append(i.intervals, v)
} else {
// find insert pos
for k, interval := range i.intervals {
// distinct range -> insert
if v.Stop < interval.Start {
- i.intervals = append(i.intervals[0:k], append([]Interval{v}, i.intervals[k:]...)...)
+ i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...)
return
} else if v.Stop == interval.Start {
i.intervals[k].Start = v.Start
@@ -157,16 +139,16 @@ func (i *IntervalSet) contains(item int) bool {
}
func (i *IntervalSet) length() int {
- iLen := 0
+ len := 0
for _, v := range i.intervals {
- iLen += v.Length()
+ len += v.length()
}
- return iLen
+ return len
}
-func (i *IntervalSet) removeRange(v Interval) {
+func (i *IntervalSet) removeRange(v *Interval) {
if v.Start == v.Stop-1 {
i.removeOne(v.Start)
} else if i.intervals != nil {
@@ -180,7 +162,7 @@ func (i *IntervalSet) removeRange(v Interval) {
i.intervals[k] = NewInterval(ni.Start, v.Start)
x := NewInterval(v.Stop, ni.Stop)
// i.intervals.splice(k, 0, x)
- i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...)
+ i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
return
} else if v.Start <= ni.Start && v.Stop >= ni.Stop {
// i.intervals.splice(k, 1)
@@ -217,7 +199,7 @@ func (i *IntervalSet) removeOne(v int) {
x := NewInterval(ki.Start, v)
ki.Start = v + 1
// i.intervals.splice(k, 0, x)
- i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...)
+ i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
return
}
}
@@ -241,7 +223,7 @@ func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []strin
return i.toIndexString()
}
-func (i *IntervalSet) GetIntervals() []Interval {
+func (i *IntervalSet) GetIntervals() []*Interval {
return i.intervals
}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go
new file mode 100644
index 000000000..e5a74f0c6
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go
@@ -0,0 +1,198 @@
+package antlr
+
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+import (
+ "sort"
+)
+
+// Collectable is an interface that a struct should implement if it is to be
+// usable as a key in these collections.
+type Collectable[T any] interface {
+ Hash() int
+ Equals(other Collectable[T]) bool
+}
+
+type Comparator[T any] interface {
+ Hash1(o T) int
+ Equals2(T, T) bool
+}
+
+// JStore implements a container that allows the use of a struct to calculate the key
+// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just
+// serve the needs of the ANTLR Go runtime.
+//
+// For ease of porting the logic of the runtime from the master target (Java), this collection
+// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals()
+// function as the key. The values are stored in a standard go map which internally is a form of hashmap
+// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with
+// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't
+// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and
+// we understand the requirements, then this is fine - this is not a general purpose collection.
+type JStore[T any, C Comparator[T]] struct {
+ store map[int][]T
+ len int
+ comparator Comparator[T]
+}
+
+func NewJStore[T any, C Comparator[T]](comparator Comparator[T]) *JStore[T, C] {
+
+ if comparator == nil {
+ panic("comparator cannot be nil")
+ }
+
+ s := &JStore[T, C]{
+ store: make(map[int][]T, 1),
+ comparator: comparator,
+ }
+ return s
+}
+
+// Put will store given value in the collection. Note that the key for storage is generated from
+// the value itself - this is specifically because that is what ANTLR needs - this would not be useful
+// as any kind of general collection.
+//
+// If the key has a hash conflict, then the value will be added to the slice of values associated with the
+// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is
+// tested by calling the equals() method on the key.
+//
+// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true
+//
+// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false.
+func (s *JStore[T, C]) Put(value T) (v T, exists bool) { //nolint:ireturn
+
+ kh := s.comparator.Hash1(value)
+
+ for _, v1 := range s.store[kh] {
+ if s.comparator.Equals2(value, v1) {
+ return v1, true
+ }
+ }
+ s.store[kh] = append(s.store[kh], value)
+ s.len++
+ return value, false
+}
+
+// Get will return the value associated with the key - the type of the key is the same type as the value
+// which would not generally be useful, but this is a specific thing for ANTLR where the key is
+// generated using the object we are going to store.
+func (s *JStore[T, C]) Get(key T) (T, bool) { //nolint:ireturn
+
+ kh := s.comparator.Hash1(key)
+
+ for _, v := range s.store[kh] {
+ if s.comparator.Equals2(key, v) {
+ return v, true
+ }
+ }
+ return key, false
+}
+
+// Contains returns true if the given key is present in the store
+func (s *JStore[T, C]) Contains(key T) bool { //nolint:ireturn
+
+ _, present := s.Get(key)
+ return present
+}
+
+func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T {
+ vs := make([]T, 0, len(s.store))
+ for _, v := range s.store {
+ vs = append(vs, v...)
+ }
+ sort.Slice(vs, func(i, j int) bool {
+ return less(vs[i], vs[j])
+ })
+
+ return vs
+}
+
+func (s *JStore[T, C]) Each(f func(T) bool) {
+ for _, e := range s.store {
+ for _, v := range e {
+ f(v)
+ }
+ }
+}
+
+func (s *JStore[T, C]) Len() int {
+ return s.len
+}
+
+func (s *JStore[T, C]) Values() []T {
+ vs := make([]T, 0, len(s.store))
+ for _, e := range s.store {
+ for _, v := range e {
+ vs = append(vs, v)
+ }
+ }
+ return vs
+}
+
+type entry[K, V any] struct {
+ key K
+ val V
+}
+
+type JMap[K, V any, C Comparator[K]] struct {
+ store map[int][]*entry[K, V]
+ len int
+ comparator Comparator[K]
+}
+
+func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K]) *JMap[K, V, C] {
+ return &JMap[K, V, C]{
+ store: make(map[int][]*entry[K, V], 1),
+ comparator: comparator,
+ }
+}
+
+func (m *JMap[K, V, C]) Put(key K, val V) {
+ kh := m.comparator.Hash1(key)
+
+ m.store[kh] = append(m.store[kh], &entry[K, V]{key, val})
+ m.len++
+}
+
+func (m *JMap[K, V, C]) Values() []V {
+ vs := make([]V, 0, len(m.store))
+ for _, e := range m.store {
+ for _, v := range e {
+ vs = append(vs, v.val)
+ }
+ }
+ return vs
+}
+
+func (m *JMap[K, V, C]) Get(key K) (V, bool) {
+
+ var none V
+ kh := m.comparator.Hash1(key)
+ for _, e := range m.store[kh] {
+ if m.comparator.Equals2(e.key, key) {
+ return e.val, true
+ }
+ }
+ return none, false
+}
+
+func (m *JMap[K, V, C]) Len() int {
+ return len(m.store)
+}
+
+func (m *JMap[K, V, C]) Delete(key K) {
+ kh := m.comparator.Hash1(key)
+ for i, e := range m.store[kh] {
+ if m.comparator.Equals2(e.key, key) {
+ m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...)
+ m.len--
+ return
+ }
+ }
+}
+
+func (m *JMap[K, V, C]) Clear() {
+ m.store = make(map[int][]*entry[K, V])
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/lexer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go
similarity index 78%
rename from vendor/github.com/antlr4-go/antlr/v4/lexer.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go
index 3c7896a91..6533f0516 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/lexer.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go
@@ -69,7 +69,7 @@ func NewBaseLexer(input CharStream) *BaseLexer {
// create a single token. NextToken will return l object after
// Matching lexer rule(s). If you subclass to allow multiple token
// emissions, then set l to the last token to be Matched or
- // something non nil so that the auto token emit mechanism will not
+ // something nonnil so that the auto token emit mechanism will not
// emit another token.
lexer.token = nil
@@ -111,7 +111,6 @@ const (
LexerSkip = -3
)
-//goland:noinspection GoUnusedConst
const (
LexerDefaultTokenChannel = TokenDefaultChannel
LexerHidden = TokenHiddenChannel
@@ -119,7 +118,7 @@ const (
LexerMaxCharValue = 0x10FFFF
)
-func (b *BaseLexer) Reset() {
+func (b *BaseLexer) reset() {
// wack Lexer state variables
if b.input != nil {
b.input.Seek(0) // rewind the input
@@ -177,7 +176,7 @@ func (b *BaseLexer) safeMatch() (ret int) {
return b.Interpreter.Match(b.input, b.mode)
}
-// NextToken returns a token from the lexer input source i.e., Match a token on the source char stream.
+// Return a token from l source i.e., Match a token on the char stream.
func (b *BaseLexer) NextToken() Token {
if b.input == nil {
panic("NextToken requires a non-nil input stream.")
@@ -206,8 +205,9 @@ func (b *BaseLexer) NextToken() Token {
continueOuter := false
for {
b.thetype = TokenInvalidType
+ ttype := LexerSkip
- ttype := b.safeMatch()
+ ttype = b.safeMatch()
if b.input.LA(1) == TokenEOF {
b.hitEOF = true
@@ -234,11 +234,12 @@ func (b *BaseLexer) NextToken() Token {
}
}
-// Skip instructs the lexer to Skip creating a token for current lexer rule
-// and look for another token. [NextToken] knows to keep looking when
-// a lexer rule finishes with token set to [SKIPTOKEN]. Recall that
+// Instruct the lexer to Skip creating a token for current lexer rule
+// and look for another token. NextToken() knows to keep looking when
+// a lexer rule finishes with token set to SKIPTOKEN. Recall that
// if token==nil at end of any token rule, it creates one for you
// and emits it.
+// /
func (b *BaseLexer) Skip() {
b.thetype = LexerSkip
}
@@ -247,29 +248,23 @@ func (b *BaseLexer) More() {
b.thetype = LexerMore
}
-// SetMode changes the lexer to a new mode. The lexer will use this mode from hereon in and the rules for that mode
-// will be in force.
func (b *BaseLexer) SetMode(m int) {
b.mode = m
}
-// PushMode saves the current lexer mode so that it can be restored later. See [PopMode], then sets the
-// current lexer mode to the supplied mode m.
func (b *BaseLexer) PushMode(m int) {
- if runtimeConfig.lexerATNSimulatorDebug {
+ if LexerATNSimulatorDebug {
fmt.Println("pushMode " + strconv.Itoa(m))
}
b.modeStack.Push(b.mode)
b.mode = m
}
-// PopMode restores the lexer mode saved by a call to [PushMode]. It is a panic error if there is no saved mode to
-// return to.
func (b *BaseLexer) PopMode() int {
if len(b.modeStack) == 0 {
panic("Empty Stack")
}
- if runtimeConfig.lexerATNSimulatorDebug {
+ if LexerATNSimulatorDebug {
fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1]))
}
i, _ := b.modeStack.Pop()
@@ -285,7 +280,7 @@ func (b *BaseLexer) inputStream() CharStream {
func (b *BaseLexer) SetInputStream(input CharStream) {
b.input = nil
b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
- b.Reset()
+ b.reset()
b.input = input
b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
}
@@ -294,19 +289,20 @@ func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair {
return b.tokenFactorySourcePair
}
-// EmitToken by default does not support multiple emits per [NextToken] invocation
-// for efficiency reasons. Subclass and override this func, [NextToken],
-// and [GetToken] (to push tokens into a list and pull from that list
-// rather than a single variable as this implementation does).
+// By default does not support multiple emits per NextToken invocation
+// for efficiency reasons. Subclass and override l method, NextToken,
+// and GetToken (to push tokens into a list and pull from that list
+// rather than a single variable as l implementation does).
+// /
func (b *BaseLexer) EmitToken(token Token) {
b.token = token
}
-// Emit is the standard method called to automatically emit a token at the
+// The standard method called to automatically emit a token at the
// outermost lexical rule. The token object should point into the
// char buffer start..stop. If there is a text override in 'text',
-// use that to set the token's text. Override this method to emit
-// custom [Token] objects or provide a new factory.
+// use that to set the token's text. Override l method to emit
+// custom Token objects or provide a Newfactory.
// /
func (b *BaseLexer) Emit() Token {
t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn)
@@ -314,7 +310,6 @@ func (b *BaseLexer) Emit() Token {
return t
}
-// EmitEOF emits an EOF token. By default, this is the last token emitted
func (b *BaseLexer) EmitEOF() Token {
cpos := b.GetCharPositionInLine()
lpos := b.GetLine()
@@ -323,7 +318,6 @@ func (b *BaseLexer) EmitEOF() Token {
return eof
}
-// GetCharPositionInLine returns the current position in the current line as far as the lexer is concerned.
func (b *BaseLexer) GetCharPositionInLine() int {
return b.Interpreter.GetCharPositionInLine()
}
@@ -340,12 +334,13 @@ func (b *BaseLexer) SetType(t int) {
b.thetype = t
}
-// GetCharIndex returns the index of the current character of lookahead
+// What is the index of the current character of lookahead?///
func (b *BaseLexer) GetCharIndex() int {
return b.input.Index()
}
-// GetText returns the text Matched so far for the current token or any text override.
+// Return the text Matched so far for the current token or any text override.
+// Set the complete text of l token it wipes any previous changes to the text.
func (b *BaseLexer) GetText() string {
if b.text != "" {
return b.text
@@ -354,20 +349,17 @@ func (b *BaseLexer) GetText() string {
return b.Interpreter.GetText(b.input)
}
-// SetText sets the complete text of this token; it wipes any previous changes to the text.
func (b *BaseLexer) SetText(text string) {
b.text = text
}
-// GetATN returns the ATN used by the lexer.
func (b *BaseLexer) GetATN() *ATN {
return b.Interpreter.ATN()
}
-// GetAllTokens returns a list of all [Token] objects in input char stream.
-// Forces a load of all tokens that can be made from the input char stream.
-//
-// Does not include EOF token.
+// Return a list of all Token objects in input char stream.
+// Forces load of all tokens. Does not include EOF token.
+// /
func (b *BaseLexer) GetAllTokens() []Token {
vl := b.Virt
tokens := make([]Token, 0)
@@ -406,13 +398,11 @@ func (b *BaseLexer) getCharErrorDisplay(c rune) string {
return "'" + b.getErrorDisplayForChar(c) + "'"
}
-// Recover can normally Match any char in its vocabulary after Matching
-// a token, so here we do the easy thing and just kill a character and hope
+// Lexers can normally Match any char in it's vocabulary after Matching
+// a token, so do the easy thing and just kill a character and hope
// it all works out. You can instead use the rule invocation stack
// to do sophisticated error recovery if you are in a fragment rule.
-//
-// In general, lexers should not need to recover and should have rules that cover any eventuality, such as
-// a character that makes no sense to the recognizer.
+// /
func (b *BaseLexer) Recover(re RecognitionException) {
if b.input.LA(1) != TokenEOF {
if _, ok := re.(*LexerNoViableAltException); ok {
diff --git a/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go
similarity index 78%
rename from vendor/github.com/antlr4-go/antlr/v4/lexer_action.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go
index eaa7393e0..111656c29 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go
@@ -7,29 +7,14 @@ package antlr
import "strconv"
const (
- // LexerActionTypeChannel represents a [LexerChannelAction] action.
- LexerActionTypeChannel = 0
-
- // LexerActionTypeCustom represents a [LexerCustomAction] action.
- LexerActionTypeCustom = 1
-
- // LexerActionTypeMode represents a [LexerModeAction] action.
- LexerActionTypeMode = 2
-
- // LexerActionTypeMore represents a [LexerMoreAction] action.
- LexerActionTypeMore = 3
-
- // LexerActionTypePopMode represents a [LexerPopModeAction] action.
- LexerActionTypePopMode = 4
-
- // LexerActionTypePushMode represents a [LexerPushModeAction] action.
- LexerActionTypePushMode = 5
-
- // LexerActionTypeSkip represents a [LexerSkipAction] action.
- LexerActionTypeSkip = 6
-
- // LexerActionTypeType represents a [LexerTypeAction] action.
- LexerActionTypeType = 7
+ LexerActionTypeChannel = 0 //The type of a {@link LexerChannelAction} action.
+ LexerActionTypeCustom = 1 //The type of a {@link LexerCustomAction} action.
+ LexerActionTypeMode = 2 //The type of a {@link LexerModeAction} action.
+ LexerActionTypeMore = 3 //The type of a {@link LexerMoreAction} action.
+ LexerActionTypePopMode = 4 //The type of a {@link LexerPopModeAction} action.
+ LexerActionTypePushMode = 5 //The type of a {@link LexerPushModeAction} action.
+ LexerActionTypeSkip = 6 //The type of a {@link LexerSkipAction} action.
+ LexerActionTypeType = 7 //The type of a {@link LexerTypeAction} action.
)
type LexerAction interface {
@@ -54,7 +39,7 @@ func NewBaseLexerAction(action int) *BaseLexerAction {
return la
}
-func (b *BaseLexerAction) execute(_ Lexer) {
+func (b *BaseLexerAction) execute(lexer Lexer) {
panic("Not implemented")
}
@@ -67,19 +52,17 @@ func (b *BaseLexerAction) getIsPositionDependent() bool {
}
func (b *BaseLexerAction) Hash() int {
- h := murmurInit(0)
- h = murmurUpdate(h, b.actionType)
- return murmurFinish(h, 1)
+ return b.actionType
}
func (b *BaseLexerAction) Equals(other LexerAction) bool {
- return b.actionType == other.getActionType()
+ return b == other
}
-// LexerSkipAction implements the [BaseLexerAction.Skip] lexer action by calling [Lexer.Skip].
+// Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}.
//
-// The Skip command does not have any parameters, so this action is
-// implemented as a singleton instance exposed by the [LexerSkipActionINSTANCE].
+// The {@code Skip} command does not have any parameters, so l action is
+// implemented as a singleton instance exposed by {@link //INSTANCE}.
type LexerSkipAction struct {
*BaseLexerAction
}
@@ -90,22 +73,17 @@ func NewLexerSkipAction() *LexerSkipAction {
return la
}
-// LexerSkipActionINSTANCE provides a singleton instance of this parameterless lexer action.
+// Provides a singleton instance of l parameterless lexer action.
var LexerSkipActionINSTANCE = NewLexerSkipAction()
func (l *LexerSkipAction) execute(lexer Lexer) {
lexer.Skip()
}
-// String returns a string representation of the current [LexerSkipAction].
func (l *LexerSkipAction) String() string {
return "skip"
}
-func (b *LexerSkipAction) Equals(other LexerAction) bool {
- return other.getActionType() == LexerActionTypeSkip
-}
-
// Implements the {@code type} lexer action by calling {@link Lexer//setType}
//
// with the assigned type.
@@ -147,10 +125,11 @@ func (l *LexerTypeAction) String() string {
return "actionType(" + strconv.Itoa(l.thetype) + ")"
}
-// LexerPushModeAction implements the pushMode lexer action by calling
-// [Lexer.pushMode] with the assigned mode.
+// Implements the {@code pushMode} lexer action by calling
+// {@link Lexer//pushMode} with the assigned mode.
type LexerPushModeAction struct {
*BaseLexerAction
+
mode int
}
@@ -190,10 +169,10 @@ func (l *LexerPushModeAction) String() string {
return "pushMode(" + strconv.Itoa(l.mode) + ")"
}
-// LexerPopModeAction implements the popMode lexer action by calling [Lexer.popMode].
+// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}.
//
-// The popMode command does not have any parameters, so this action is
-// implemented as a singleton instance exposed by [LexerPopModeActionINSTANCE]
+// The {@code popMode} command does not have any parameters, so l action is
+// implemented as a singleton instance exposed by {@link //INSTANCE}.
type LexerPopModeAction struct {
*BaseLexerAction
}
@@ -245,10 +224,11 @@ func (l *LexerMoreAction) String() string {
return "more"
}
-// LexerModeAction implements the mode lexer action by calling [Lexer.mode] with
+// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with
// the assigned mode.
type LexerModeAction struct {
*BaseLexerAction
+
mode int
}
@@ -342,19 +322,16 @@ func (l *LexerCustomAction) Equals(other LexerAction) bool {
}
}
-// LexerChannelAction implements the channel lexer action by calling
-// [Lexer.setChannel] with the assigned channel.
-//
-// Constructs a new channel action with the specified channel value.
+// Implements the {@code channel} lexer action by calling
+// {@link Lexer//setChannel} with the assigned channel.
+// Constructs a New{@code channel} action with the specified channel value.
+// @param channel The channel value to pass to {@link Lexer//setChannel}.
type LexerChannelAction struct {
*BaseLexerAction
+
channel int
}
-// NewLexerChannelAction creates a channel lexer action by calling
-// [Lexer.setChannel] with the assigned channel.
-//
-// Constructs a new channel action with the specified channel value.
func NewLexerChannelAction(channel int) *LexerChannelAction {
l := new(LexerChannelAction)
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel)
@@ -398,22 +375,25 @@ func (l *LexerChannelAction) String() string {
// lexer actions, see {@link LexerActionExecutor//append} and
// {@link LexerActionExecutor//fixOffsetBeforeMatch}.
+// Constructs a Newindexed custom action by associating a character offset
+// with a {@link LexerAction}.
+//
+// Note: This class is only required for lexer actions for which
+// {@link LexerAction//isPositionDependent} returns {@code true}.
+//
+// @param offset The offset into the input {@link CharStream}, relative to
+// the token start index, at which the specified lexer action should be
+// executed.
+// @param action The lexer action to execute at a particular offset in the
+// input {@link CharStream}.
type LexerIndexedCustomAction struct {
*BaseLexerAction
+
offset int
lexerAction LexerAction
isPositionDependent bool
}
-// NewLexerIndexedCustomAction constructs a new indexed custom action by associating a character offset
-// with a [LexerAction].
-//
-// Note: This class is only required for lexer actions for which
-// [LexerAction.isPositionDependent] returns true.
-//
-// The offset points into the input [CharStream], relative to
-// the token start index, at which the specified lexerAction should be
-// executed.
func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction {
l := new(LexerIndexedCustomAction)
diff --git a/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go
similarity index 70%
rename from vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go
index dfc28c32b..be1ba7a7e 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go
@@ -29,20 +29,28 @@ func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor {
l.lexerActions = lexerActions
// Caches the result of {@link //hashCode} since the hash code is an element
- // of the performance-critical {@link ATNConfig//hashCode} operation.
- l.cachedHash = murmurInit(0)
+ // of the performance-critical {@link LexerATNConfig//hashCode} operation.
+ l.cachedHash = murmurInit(57)
for _, a := range lexerActions {
l.cachedHash = murmurUpdate(l.cachedHash, a.Hash())
}
- l.cachedHash = murmurFinish(l.cachedHash, len(lexerActions))
return l
}
-// LexerActionExecutorappend creates a [LexerActionExecutor] which executes the actions for
-// the input [LexerActionExecutor] followed by a specified
-// [LexerAction].
-// TODO: This does not match the Java code
+// Creates a {@link LexerActionExecutor} which executes the actions for
+// the input {@code lexerActionExecutor} followed by a specified
+// {@code lexerAction}.
+//
+// @param lexerActionExecutor The executor for actions already traversed by
+// the lexer while Matching a token within a particular
+// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as
+// though it were an empty executor.
+// @param lexerAction The lexer action to execute after the actions
+// specified in {@code lexerActionExecutor}.
+//
+// @return A {@link LexerActionExecutor} for executing the combine actions
+// of {@code lexerActionExecutor} and {@code lexerAction}.
func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor {
if lexerActionExecutor == nil {
return NewLexerActionExecutor([]LexerAction{lexerAction})
@@ -51,42 +59,47 @@ func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAc
return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction))
}
-// fixOffsetBeforeMatch creates a [LexerActionExecutor] which encodes the current offset
+// Creates a {@link LexerActionExecutor} which encodes the current offset
// for position-dependent lexer actions.
//
-// Normally, when the executor encounters lexer actions where
-// [LexerAction.isPositionDependent] returns true, it calls
-// [IntStream.Seek] on the input [CharStream] to set the input
-// position to the end of the current token. This behavior provides
-// for efficient [DFA] representation of lexer actions which appear at the end
+// Normally, when the executor encounters lexer actions where
+// {@link LexerAction//isPositionDependent} returns {@code true}, it calls
+// {@link IntStream//seek} on the input {@link CharStream} to set the input
+// position to the end of the current token. This behavior provides
+// for efficient DFA representation of lexer actions which appear at the end
// of a lexer rule, even when the lexer rule Matches a variable number of
-// characters.
+// characters.
//
-// Prior to traversing a Match transition in the [ATN], the current offset
+// Prior to traversing a Match transition in the ATN, the current offset
// from the token start index is assigned to all position-dependent lexer
// actions which have not already been assigned a fixed offset. By storing
-// the offsets relative to the token start index, the [DFA] representation of
+// the offsets relative to the token start index, the DFA representation of
// lexer actions which appear in the middle of tokens remains efficient due
-// to sharing among tokens of the same Length, regardless of their absolute
-// position in the input stream.
+// to sharing among tokens of the same length, regardless of their absolute
+// position in the input stream.
//
-// If the current executor already has offsets assigned to all
-// position-dependent lexer actions, the method returns this instance.
+// If the current executor already has offsets assigned to all
+// position-dependent lexer actions, the method returns {@code this}.
//
-// The offset is assigned to all position-dependent
+// @param offset The current offset to assign to all position-dependent
// lexer actions which do not already have offsets assigned.
//
-// The func returns a [LexerActionExecutor] that stores input stream offsets
+// @return A {@link LexerActionExecutor} which stores input stream offsets
// for all position-dependent lexer actions.
+// /
func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor {
var updatedLexerActions []LexerAction
for i := 0; i < len(l.lexerActions); i++ {
_, ok := l.lexerActions[i].(*LexerIndexedCustomAction)
if l.lexerActions[i].getIsPositionDependent() && !ok {
if updatedLexerActions == nil {
- updatedLexerActions = make([]LexerAction, 0, len(l.lexerActions))
- updatedLexerActions = append(updatedLexerActions, l.lexerActions...)
+ updatedLexerActions = make([]LexerAction, 0)
+
+ for _, a := range l.lexerActions {
+ updatedLexerActions = append(updatedLexerActions, a)
+ }
}
+
updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i])
}
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go
similarity index 80%
rename from vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go
index fe938b025..c573b7521 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go
@@ -10,8 +10,10 @@ import (
"strings"
)
-//goland:noinspection GoUnusedGlobalVariable
var (
+ LexerATNSimulatorDebug = false
+ LexerATNSimulatorDFADebug = false
+
LexerATNSimulatorMinDFAEdge = 0
LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN
@@ -30,11 +32,11 @@ type ILexerATNSimulator interface {
}
type LexerATNSimulator struct {
- BaseATNSimulator
+ *BaseATNSimulator
recog Lexer
predictionMode int
- mergeCache *JPCMap2
+ mergeCache DoubleDict
startIndex int
Line int
CharPositionInLine int
@@ -44,35 +46,27 @@ type LexerATNSimulator struct {
}
func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator {
- l := &LexerATNSimulator{
- BaseATNSimulator: BaseATNSimulator{
- atn: atn,
- sharedContextCache: sharedContextCache,
- },
- }
+ l := new(LexerATNSimulator)
+
+ l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache)
l.decisionToDFA = decisionToDFA
l.recog = recog
-
// The current token's starting index into the character stream.
// Shared across DFA to ATN simulation in case the ATN fails and the
// DFA did not have a previous accept state. In l case, we use the
// ATN-generated exception object.
l.startIndex = -1
-
- // line number 1..n within the input
+ // line number 1..n within the input///
l.Line = 1
-
// The index of the character relative to the beginning of the line
- // 0..n-1
+ // 0..n-1///
l.CharPositionInLine = 0
-
l.mode = LexerDefaultMode
-
// Used during DFA/ATN exec to record the most recent accept configuration
// info
l.prevAccept = NewSimState()
-
+ // done
return l
}
@@ -120,7 +114,7 @@ func (l *LexerATNSimulator) reset() {
func (l *LexerATNSimulator) MatchATN(input CharStream) int {
startState := l.atn.modeToStartState[l.mode]
- if runtimeConfig.lexerATNSimulatorDebug {
+ if LexerATNSimulatorDebug {
fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String())
}
oldMode := l.mode
@@ -132,7 +126,7 @@ func (l *LexerATNSimulator) MatchATN(input CharStream) int {
predict := l.execATN(input, next)
- if runtimeConfig.lexerATNSimulatorDebug {
+ if LexerATNSimulatorDebug {
fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString())
}
return predict
@@ -140,18 +134,18 @@ func (l *LexerATNSimulator) MatchATN(input CharStream) int {
func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
- if runtimeConfig.lexerATNSimulatorDebug {
+ if LexerATNSimulatorDebug {
fmt.Println("start state closure=" + ds0.configs.String())
}
if ds0.isAcceptState {
- // allow zero-Length tokens
+ // allow zero-length tokens
l.captureSimState(l.prevAccept, input, ds0)
}
t := input.LA(1)
s := ds0 // s is current/from DFA state
for { // while more work
- if runtimeConfig.lexerATNSimulatorDebug {
+ if LexerATNSimulatorDebug {
fmt.Println("execATN loop starting closure: " + s.configs.String())
}
@@ -194,7 +188,7 @@ func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
}
}
t = input.LA(1)
- s = target // flip current DFA target becomes new src/from state
+ s = target // flip current DFA target becomes Newsrc/from state
}
return l.failOrAccept(l.prevAccept, input, s.configs, t)
@@ -220,39 +214,43 @@ func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState
return nil
}
target := s.getIthEdge(t - LexerATNSimulatorMinDFAEdge)
- if runtimeConfig.lexerATNSimulatorDebug && target != nil {
+ if LexerATNSimulatorDebug && target != nil {
fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber))
}
return target
}
-// computeTargetState computes a target state for an edge in the [DFA], and attempt to add the
-// computed state and corresponding edge to the [DFA].
+// Compute a target state for an edge in the DFA, and attempt to add the
+// computed state and corresponding edge to the DFA.
//
-// The func returns the computed target [DFA] state for the given input symbol t.
-// If this does not lead to a valid [DFA] state, this method
-// returns ATNSimulatorError.
+// @param input The input stream
+// @param s The current DFA state
+// @param t The next input symbol
+//
+// @return The computed target DFA state for the given input symbol
+// {@code t}. If {@code t} does not lead to a valid DFA state, l method
+// returns {@link //ERROR}.
func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState {
reach := NewOrderedATNConfigSet()
// if we don't find an existing DFA state
// Fill reach starting from closure, following t transitions
- l.getReachableConfigSet(input, s.configs, reach, t)
+ l.getReachableConfigSet(input, s.configs, reach.BaseATNConfigSet, t)
if len(reach.configs) == 0 { // we got nowhere on t from s
if !reach.hasSemanticContext {
// we got nowhere on t, don't panic out l knowledge it'd
- // cause a fail-over from DFA later.
+ // cause a failover from DFA later.
l.addDFAEdge(s, t, ATNSimulatorError, nil)
}
// stop when we can't Match any more char
return ATNSimulatorError
}
// Add an edge from s to target DFA found/created for reach
- return l.addDFAEdge(s, t, nil, reach)
+ return l.addDFAEdge(s, t, nil, reach.BaseATNConfigSet)
}
-func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach *ATNConfigSet, t int) int {
+func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach ATNConfigSet, t int) int {
if l.prevAccept.dfaState != nil {
lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor
l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column)
@@ -267,35 +265,34 @@ func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream,
panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach))
}
-// getReachableConfigSet when given a starting configuration set, figures out all [ATN] configurations
-// we can reach upon input t.
-//
-// Parameter reach is a return parameter.
-func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure *ATNConfigSet, reach *ATNConfigSet, t int) {
+// Given a starting configuration set, figure out all ATN configurations
+// we can reach upon input {@code t}. Parameter {@code reach} is a return
+// parameter.
+func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) {
// l is used to Skip processing for configs which have a lower priority
- // than a runtimeConfig that already reached an accept state for the same rule
+ // than a config that already reached an accept state for the same rule
SkipAlt := ATNInvalidAltNumber
- for _, cfg := range closure.configs {
- currentAltReachedAcceptState := cfg.GetAlt() == SkipAlt
- if currentAltReachedAcceptState && cfg.passedThroughNonGreedyDecision {
+ for _, cfg := range closure.GetItems() {
+ currentAltReachedAcceptState := (cfg.GetAlt() == SkipAlt)
+ if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision {
continue
}
- if runtimeConfig.lexerATNSimulatorDebug {
+ if LexerATNSimulatorDebug {
- fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String())
+ fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) // l.recog, true))
}
for _, trans := range cfg.GetState().GetTransitions() {
target := l.getReachableTarget(trans, t)
if target != nil {
- lexerActionExecutor := cfg.lexerActionExecutor
+ lexerActionExecutor := cfg.(*LexerATNConfig).lexerActionExecutor
if lexerActionExecutor != nil {
lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex)
}
- treatEOFAsEpsilon := t == TokenEOF
- config := NewLexerATNConfig3(cfg, target, lexerActionExecutor)
+ treatEOFAsEpsilon := (t == TokenEOF)
+ config := NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor)
if l.closure(input, config, reach,
currentAltReachedAcceptState, true, treatEOFAsEpsilon) {
// any remaining configs for l alt have a lower priority
@@ -308,7 +305,7 @@ func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure *ATN
}
func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) {
- if runtimeConfig.lexerATNSimulatorDebug {
+ if LexerATNSimulatorDebug {
fmt.Printf("ACTION %v\n", lexerActionExecutor)
}
// seek to after last char in token
@@ -328,7 +325,7 @@ func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState
return nil
}
-func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *ATNConfigSet {
+func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *OrderedATNConfigSet {
configs := NewOrderedATNConfigSet()
for i := 0; i < len(p.GetTransitions()); i++ {
target := p.GetTransitions()[i].getTarget()
@@ -339,24 +336,25 @@ func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *ATN
return configs
}
-// closure since the alternatives within any lexer decision are ordered by
-// preference, this method stops pursuing the closure as soon as an accept
+// Since the alternatives within any lexer decision are ordered by
+// preference, l method stops pursuing the closure as soon as an accept
// state is reached. After the first accept state is reached by depth-first
-// search from runtimeConfig, all other (potentially reachable) states for
-// this rule would have a lower priority.
+// search from {@code config}, all other (potentially reachable) states for
+// l rule would have a lower priority.
//
-// The func returns true if an accept state is reached.
-func (l *LexerATNSimulator) closure(input CharStream, config *ATNConfig, configs *ATNConfigSet,
+// @return {@code true} if an accept state is reached, otherwise
+// {@code false}.
+func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet,
currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool {
- if runtimeConfig.lexerATNSimulatorDebug {
- fmt.Println("closure(" + config.String() + ")")
+ if LexerATNSimulatorDebug {
+ fmt.Println("closure(" + config.String() + ")") // config.String(l.recog, true) + ")")
}
_, ok := config.state.(*RuleStopState)
if ok {
- if runtimeConfig.lexerATNSimulatorDebug {
+ if LexerATNSimulatorDebug {
if l.recog != nil {
fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config)
} else {
@@ -403,10 +401,10 @@ func (l *LexerATNSimulator) closure(input CharStream, config *ATNConfig, configs
}
// side-effect: can alter configs.hasSemanticContext
-func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *ATNConfig, trans Transition,
- configs *ATNConfigSet, speculative, treatEOFAsEpsilon bool) *ATNConfig {
+func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans Transition,
+ configs ATNConfigSet, speculative, treatEOFAsEpsilon bool) *LexerATNConfig {
- var cfg *ATNConfig
+ var cfg *LexerATNConfig
if trans.getSerializationType() == TransitionRULE {
@@ -437,10 +435,10 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *ATNConfig
pt := trans.(*PredicateTransition)
- if runtimeConfig.lexerATNSimulatorDebug {
+ if LexerATNSimulatorDebug {
fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex))
}
- configs.hasSemanticContext = true
+ configs.SetHasSemanticContext(true)
if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) {
cfg = NewLexerATNConfig4(config, trans.getTarget())
}
@@ -451,7 +449,7 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *ATNConfig
// TODO: if the entry rule is invoked recursively, some
// actions may be executed during the recursive call. The
// problem can appear when hasEmptyPath() is true but
- // isEmpty() is false. In this case, the config needs to be
+ // isEmpty() is false. In l case, the config needs to be
// split into two contexts - one with just the empty path
// and another with everything but the empty path.
// Unfortunately, the current algorithm does not allow
@@ -478,18 +476,26 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *ATNConfig
return cfg
}
-// evaluatePredicate eEvaluates a predicate specified in the lexer.
+// Evaluate a predicate specified in the lexer.
//
-// If speculative is true, this method was called before
-// [consume] for the Matched character. This method should call
-// [consume] before evaluating the predicate to ensure position
-// sensitive values, including [GetText], [GetLine],
-// and [GetColumn], properly reflect the current
-// lexer state. This method should restore input and the simulator
-// to the original state before returning, i.e. undo the actions made by the
-// call to [Consume].
+// If {@code speculative} is {@code true}, l method was called before
+// {@link //consume} for the Matched character. This method should call
+// {@link //consume} before evaluating the predicate to ensure position
+// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine},
+// and {@link Lexer//getcolumn}, properly reflect the current
+// lexer state. This method should restore {@code input} and the simulator
+// to the original state before returning (i.e. undo the actions made by the
+// call to {@link //consume}.
//
-// The func returns true if the specified predicate evaluates to true.
+// @param input The input stream.
+// @param ruleIndex The rule containing the predicate.
+// @param predIndex The index of the predicate within the rule.
+// @param speculative {@code true} if the current index in {@code input} is
+// one character before the predicate's location.
+//
+// @return {@code true} if the specified predicate evaluates to
+// {@code true}.
+// /
func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool {
// assume true if no recognizer was provided
if l.recog == nil {
@@ -521,7 +527,7 @@ func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream
settings.dfaState = dfaState
}
-func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs *ATNConfigSet) *DFAState {
+func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs ATNConfigSet) *DFAState {
if to == nil && cfgs != nil {
// leading to l call, ATNConfigSet.hasSemanticContext is used as a
// marker indicating dynamic predicate evaluation makes l edge
@@ -533,9 +539,10 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg
// TJP notes: next time through the DFA, we see a pred again and eval.
// If that gets us to a previously created (but dangling) DFA
// state, we can continue in pure DFA mode from there.
- //
- suppressEdge := cfgs.hasSemanticContext
- cfgs.hasSemanticContext = false
+ // /
+ suppressEdge := cfgs.HasSemanticContext()
+ cfgs.SetHasSemanticContext(false)
+
to = l.addDFAState(cfgs, true)
if suppressEdge {
@@ -547,7 +554,7 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg
// Only track edges within the DFA bounds
return to
}
- if runtimeConfig.lexerATNSimulatorDebug {
+ if LexerATNSimulatorDebug {
fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk))
}
l.atn.edgeMu.Lock()
@@ -565,12 +572,13 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg
// configurations already. This method also detects the first
// configuration containing an ATN rule stop state. Later, when
// traversing the DFA, we will know which rule to accept.
-func (l *LexerATNSimulator) addDFAState(configs *ATNConfigSet, suppressEdge bool) *DFAState {
+func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) *DFAState {
proposed := NewDFAState(-1, configs)
- var firstConfigWithRuleStopState *ATNConfig
+ var firstConfigWithRuleStopState ATNConfig
+
+ for _, cfg := range configs.GetItems() {
- for _, cfg := range configs.configs {
_, ok := cfg.GetState().(*RuleStopState)
if ok {
@@ -580,14 +588,14 @@ func (l *LexerATNSimulator) addDFAState(configs *ATNConfigSet, suppressEdge bool
}
if firstConfigWithRuleStopState != nil {
proposed.isAcceptState = true
- proposed.lexerActionExecutor = firstConfigWithRuleStopState.lexerActionExecutor
+ proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor
proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()])
}
dfa := l.decisionToDFA[l.mode]
l.atn.stateMu.Lock()
defer l.atn.stateMu.Unlock()
- existing, present := dfa.Get(proposed)
+ existing, present := dfa.states.Get(proposed)
if present {
// This state was already present, so just return it.
@@ -597,11 +605,10 @@ func (l *LexerATNSimulator) addDFAState(configs *ATNConfigSet, suppressEdge bool
// We need to add the new state
//
- proposed.stateNumber = dfa.Len()
- configs.readOnly = true
- configs.configLookup = nil // Not needed now
+ proposed.stateNumber = dfa.states.Len()
+ configs.SetReadOnly(true)
proposed.configs = configs
- dfa.Put(proposed)
+ dfa.states.Put(proposed)
}
if !suppressEdge {
dfa.setS0(proposed)
@@ -613,7 +620,7 @@ func (l *LexerATNSimulator) getDFA(mode int) *DFA {
return l.decisionToDFA[mode]
}
-// GetText returns the text [Match]ed so far for the current token.
+// Get the text Matched so far for the current token.
func (l *LexerATNSimulator) GetText(input CharStream) string {
// index is first lookahead char, don't include.
return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1))
diff --git a/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go
similarity index 73%
rename from vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go
index 4955ac876..76689615a 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go
@@ -14,11 +14,11 @@ func NewLL1Analyzer(atn *ATN) *LL1Analyzer {
return la
}
+// - Special value added to the lookahead sets to indicate that we hit
+// a predicate during analysis if {@code seeThruPreds==false}.
+//
+// /
const (
- // LL1AnalyzerHitPred is a special value added to the lookahead sets to indicate that we hit
- // a predicate during analysis if
- //
- // seeThruPreds==false
LL1AnalyzerHitPred = TokenInvalidType
)
@@ -38,12 +38,11 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
count := len(s.GetTransitions())
look := make([]*IntervalSet, count)
for alt := 0; alt < count; alt++ {
-
look[alt] = NewIntervalSet()
- lookBusy := NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.getDecisionLookahead for lookBusy")
- la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), false, false)
-
- // Wipe out lookahead for la alternative if we found nothing,
+ lookBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst)
+ seeThruPreds := false // fail to get lookahead upon pred
+ la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false)
+ // Wipe out lookahead for la alternative if we found nothing
// or we had a predicate when we !seeThruPreds
if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) {
look[alt] = nil
@@ -52,31 +51,32 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
return look
}
-// Look computes the set of tokens that can follow s in the [ATN] in the
-// specified ctx.
-//
-// If ctx is nil and the end of the rule containing
-// s is reached, [EPSILON] is added to the result set.
-//
-// If ctx is not nil and the end of the outermost rule is
-// reached, [EOF] is added to the result set.
+// *
+// Compute set of tokens that can follow {@code s} in the ATN in the
+// specified {@code ctx}.
//
-// Parameter s the ATN state, and stopState is the ATN state to stop at. This can be a
-// [BlockEndState] to detect epsilon paths through a closure.
+// If {@code ctx} is {@code nil} and the end of the rule containing
+// {@code s} is reached, {@link Token//EPSILON} is added to the result set.
+// If {@code ctx} is not {@code nil} and the end of the outermost rule is
+// reached, {@link Token//EOF} is added to the result set.
//
-// Parameter ctx is the complete parser context, or nil if the context
+// @param s the ATN state
+// @param stopState the ATN state to stop at. This can be a
+// {@link BlockEndState} to detect epsilon paths through a closure.
+// @param ctx the complete parser context, or {@code nil} if the context
// should be ignored
//
-// The func returns the set of tokens that can follow s in the [ATN] in the
-// specified ctx.
+// @return The set of tokens that can follow {@code s} in the ATN in the
+// specified {@code ctx}.
+// /
func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet {
r := NewIntervalSet()
- var lookContext *PredictionContext
+ seeThruPreds := true // ignore preds get all lookahead
+ var lookContext PredictionContext
if ctx != nil {
lookContext = predictionContextFromRuleContext(s.GetATN(), ctx)
}
- la.look1(s, stopState, lookContext, r, NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.Look for la.look1()"),
- NewBitSet(), true, true)
+ la.look1(s, stopState, lookContext, r, NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst), NewBitSet(), seeThruPreds, true)
return r
}
@@ -110,17 +110,16 @@ func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet
// outermost context is reached. This parameter has no effect if {@code ctx}
// is {@code nil}.
-func (la *LL1Analyzer) look2(_, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]],
- calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
+func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
returnState := la.atn.states[ctx.getReturnState(i)]
la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
}
-func (la *LL1Analyzer) look1(s, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
+func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
- c := NewATNConfig6(s, 0, ctx)
+ c := NewBaseATNConfig6(s, 0, ctx)
if lookBusy.Contains(c) {
return
@@ -152,7 +151,7 @@ func (la *LL1Analyzer) look1(s, stopState ATNState, ctx *PredictionContext, look
return
}
- if ctx.pcType != PredictionContextEmpty {
+ if ctx != BasePredictionContextEMPTY {
removed := calledRuleStack.contains(s.GetRuleIndex())
defer func() {
if removed {
@@ -203,8 +202,7 @@ func (la *LL1Analyzer) look1(s, stopState ATNState, ctx *PredictionContext, look
}
}
-func (la *LL1Analyzer) look3(stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]],
- calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
+func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
diff --git a/vendor/github.com/antlr4-go/antlr/v4/parser.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go
similarity index 80%
rename from vendor/github.com/antlr4-go/antlr/v4/parser.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go
index fb57ac15d..d26bf0639 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/parser.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go
@@ -48,10 +48,8 @@ type BaseParser struct {
_SyntaxErrors int
}
-// NewBaseParser contains all the parsing support code to embed in parsers. Essentially most of it is error
-// recovery stuff.
-//
-//goland:noinspection GoUnusedExportedFunction
+// p.is all the parsing support code essentially most of it is error
+// recovery stuff.//
func NewBaseParser(input TokenStream) *BaseParser {
p := new(BaseParser)
@@ -60,46 +58,39 @@ func NewBaseParser(input TokenStream) *BaseParser {
// The input stream.
p.input = nil
-
// The error handling strategy for the parser. The default value is a new
// instance of {@link DefaultErrorStrategy}.
p.errHandler = NewDefaultErrorStrategy()
p.precedenceStack = make([]int, 0)
p.precedenceStack.Push(0)
-
- // The ParserRuleContext object for the currently executing rule.
+ // The {@link ParserRuleContext} object for the currently executing rule.
// p.is always non-nil during the parsing process.
p.ctx = nil
-
- // Specifies whether the parser should construct a parse tree during
+ // Specifies whether or not the parser should construct a parse tree during
// the parsing process. The default value is {@code true}.
p.BuildParseTrees = true
-
- // When setTrace(true) is called, a reference to the
- // TraceListener is stored here, so it can be easily removed in a
- // later call to setTrace(false). The listener itself is
+ // When {@link //setTrace}{@code (true)} is called, a reference to the
+ // {@link TraceListener} is stored here so it can be easily removed in a
+ // later call to {@link //setTrace}{@code (false)}. The listener itself is
// implemented as a parser listener so p.field is not directly used by
// other parser methods.
p.tracer = nil
-
- // The list of ParseTreeListener listeners registered to receive
+ // The list of {@link ParseTreeListener} listeners registered to receive
// events during the parse.
p.parseListeners = nil
-
// The number of syntax errors Reported during parsing. p.value is
- // incremented each time NotifyErrorListeners is called.
+ // incremented each time {@link //NotifyErrorListeners} is called.
p._SyntaxErrors = 0
p.SetInputStream(input)
return p
}
-// This field maps from the serialized ATN string to the deserialized [ATN] with
+// p.field maps from the serialized ATN string to the deserialized {@link
+// ATN} with
// bypass alternatives.
//
-// [ATNDeserializationOptions.isGenerateRuleBypassTransitions]
-//
-//goland:noinspection GoUnusedGlobalVariable
+// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions()
var bypassAltsAtnCache = make(map[string]int)
// reset the parser's state//
@@ -152,13 +143,10 @@ func (p *BaseParser) Match(ttype int) Token {
p.Consume()
} else {
t = p.errHandler.RecoverInline(p)
- if p.HasError() {
- return nil
- }
if p.BuildParseTrees && t.GetTokenIndex() == -1 {
-
- // we must have conjured up a new token during single token
- // insertion if it's not the current symbol
+ // we must have conjured up a Newtoken during single token
+ // insertion
+ // if it's not the current symbol
p.ctx.AddErrorNode(t)
}
}
@@ -190,8 +178,9 @@ func (p *BaseParser) MatchWildcard() Token {
} else {
t = p.errHandler.RecoverInline(p)
if p.BuildParseTrees && t.GetTokenIndex() == -1 {
- // we must have conjured up a new token during single token
- // insertion if it's not the current symbol
+ // we must have conjured up a Newtoken during single token
+ // insertion
+ // if it's not the current symbol
p.ctx.AddErrorNode(t)
}
}
@@ -213,27 +202,33 @@ func (p *BaseParser) GetParseListeners() []ParseTreeListener {
return p.parseListeners
}
-// AddParseListener registers listener to receive events during the parsing process.
+// Registers {@code listener} to receive events during the parsing process.
//
-// To support output-preserving grammar transformations (including but not
+// To support output-preserving grammar transformations (including but not
// limited to left-recursion removal, automated left-factoring, and
// optimized code generation), calls to listener methods during the parse
// may differ substantially from calls made by
-// [ParseTreeWalker.DEFAULT] used after the parse is complete. In
+// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In
// particular, rule entry and exit events may occur in a different order
// during the parse than after the parser. In addition, calls to certain
-// rule entry methods may be omitted.
+// rule entry methods may be omitted.
+//
+// With the following specific exceptions, calls to listener events are
+// deterministic, i.e. for identical input the calls to listener
+// methods will be the same.
+//
+//
+// - Alterations to the grammar used to generate code may change the
+// behavior of the listener calls.
+// - Alterations to the command line options passed to ANTLR 4 when
+// generating the parser may change the behavior of the listener calls.
+// - Changing the version of the ANTLR Tool used to generate the parser
+// may change the behavior of the listener calls.
+//
//
-// With the following specific exceptions, calls to listener events are
-// deterministic, i.e. for identical input the calls to listener
-// methods will be the same.
+// @param listener the listener to add
//
-// - Alterations to the grammar used to generate code may change the
-// behavior of the listener calls.
-// - Alterations to the command line options passed to ANTLR 4 when
-// generating the parser may change the behavior of the listener calls.
-// - Changing the version of the ANTLR Tool used to generate the parser
-// may change the behavior of the listener calls.
+// @panics nilPointerException if {@code} listener is {@code nil}
func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
if listener == nil {
panic("listener")
@@ -244,10 +239,11 @@ func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
p.parseListeners = append(p.parseListeners, listener)
}
-// RemoveParseListener removes listener from the list of parse listeners.
+// Remove {@code listener} from the list of parse listeners.
//
-// If listener is nil or has not been added as a parse
-// listener, this func does nothing.
+// If {@code listener} is {@code nil} or has not been added as a parse
+// listener, p.method does nothing.
+// @param listener the listener to remove
func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) {
if p.parseListeners != nil {
@@ -278,7 +274,7 @@ func (p *BaseParser) removeParseListeners() {
p.parseListeners = nil
}
-// TriggerEnterRuleEvent notifies all parse listeners of an enter rule event.
+// Notify any parse listeners of an enter rule event.
func (p *BaseParser) TriggerEnterRuleEvent() {
if p.parseListeners != nil {
ctx := p.ctx
@@ -289,7 +285,9 @@ func (p *BaseParser) TriggerEnterRuleEvent() {
}
}
-// TriggerExitRuleEvent notifies any parse listeners of an exit rule event.
+// Notify any parse listeners of an exit rule event.
+//
+// @see //addParseListener
func (p *BaseParser) TriggerExitRuleEvent() {
if p.parseListeners != nil {
// reverse order walk of listeners
@@ -316,16 +314,19 @@ func (p *BaseParser) GetTokenFactory() TokenFactory {
return p.input.GetTokenSource().GetTokenFactory()
}
-// setTokenFactory is used to tell our token source and error strategy about a new way to create tokens.
+// Tell our token source and error strategy about a Newway to create tokens.//
func (p *BaseParser) setTokenFactory(factory TokenFactory) {
p.input.GetTokenSource().setTokenFactory(factory)
}
-// GetATNWithBypassAlts - the ATN with bypass alternatives is expensive to create, so we create it
+// The ATN with bypass alternatives is expensive to create so we create it
// lazily.
+//
+// @panics UnsupportedOperationException if the current parser does not
+// implement the {@link //getSerializedATN()} method.
func (p *BaseParser) GetATNWithBypassAlts() {
- // TODO - Implement this?
+ // TODO
panic("Not implemented!")
// serializedAtn := p.getSerializedATN()
@@ -353,7 +354,6 @@ func (p *BaseParser) GetATNWithBypassAlts() {
// String id = m.Get("ID")
//
-//goland:noinspection GoUnusedParameter
func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) {
panic("NewParseTreePatternMatcher not implemented!")
@@ -386,16 +386,14 @@ func (p *BaseParser) GetTokenStream() TokenStream {
return p.input
}
-// SetTokenStream installs input as the token stream and resets the parser.
+// Set the token stream and reset the parser.//
func (p *BaseParser) SetTokenStream(input TokenStream) {
p.input = nil
p.reset()
p.input = input
}
-// GetCurrentToken returns the current token at LT(1).
-//
-// [Match] needs to return the current input symbol, which gets put
+// Match needs to return the current input symbol, which gets put
// into the label for the associated token ref e.g., x=ID.
func (p *BaseParser) GetCurrentToken() Token {
return p.input.LT(1)
@@ -448,7 +446,7 @@ func (p *BaseParser) addContextToParseTree() {
}
}
-func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, _ int) {
+func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) {
p.SetState(state)
p.ctx = localctx
p.ctx.SetStart(p.input.LT(1))
@@ -476,7 +474,7 @@ func (p *BaseParser) ExitRule() {
func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) {
localctx.SetAltNumber(altNum)
- // if we have a new localctx, make sure we replace existing ctx
+ // if we have Newlocalctx, make sure we replace existing ctx
// that is previous child of parse tree
if p.BuildParseTrees && p.ctx != localctx {
if p.ctx.GetParent() != nil {
@@ -500,7 +498,7 @@ func (p *BaseParser) GetPrecedence() int {
return p.precedenceStack[len(p.precedenceStack)-1]
}
-func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, _, precedence int) {
+func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) {
p.SetState(state)
p.precedenceStack.Push(precedence)
p.ctx = localctx
@@ -514,7 +512,7 @@ func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, _, pr
//
// Like {@link //EnterRule} but for recursive rules.
-func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, _ int) {
+func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) {
previous := p.ctx
previous.SetParent(localctx)
previous.SetInvokingState(state)
@@ -532,7 +530,7 @@ func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state,
}
func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) {
- _, _ = p.precedenceStack.Pop()
+ p.precedenceStack.Pop()
p.ctx.SetStop(p.input.LT(-1))
retCtx := p.ctx // save current ctx (return value)
// unroll so ctx is as it was before call to recursive method
@@ -563,22 +561,29 @@ func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext {
return nil
}
-func (p *BaseParser) Precpred(_ RuleContext, precedence int) bool {
+func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool {
return precedence >= p.precedenceStack[len(p.precedenceStack)-1]
}
-//goland:noinspection GoUnusedParameter
func (p *BaseParser) inContext(context ParserRuleContext) bool {
// TODO: useful in parser?
return false
}
-// IsExpectedToken checks whether symbol can follow the current state in the
-// {ATN}. The behavior of p.method is equivalent to the following, but is
+//
+// Checks whether or not {@code symbol} can follow the current state in the
+// ATN. The behavior of p.method is equivalent to the following, but is
// implemented such that the complete context-sensitive follow set does not
// need to be explicitly constructed.
//
-// return getExpectedTokens().contains(symbol)
+//
+// return getExpectedTokens().contains(symbol)
+//
+//
+// @param symbol the symbol type to check
+// @return {@code true} if {@code symbol} can follow the current state in
+// the ATN, otherwise {@code false}.
+
func (p *BaseParser) IsExpectedToken(symbol int) bool {
atn := p.Interpreter.atn
ctx := p.ctx
@@ -606,9 +611,11 @@ func (p *BaseParser) IsExpectedToken(symbol int) bool {
return false
}
-// GetExpectedTokens and returns the set of input symbols which could follow the current parser
-// state and context, as given by [GetState] and [GetContext],
+// Computes the set of input symbols which could follow the current parser
+// state and context, as given by {@link //GetState} and {@link //GetContext},
// respectively.
+//
+// @see ATN//getExpectedTokens(int, RuleContext)
func (p *BaseParser) GetExpectedTokens() *IntervalSet {
return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx)
}
@@ -619,7 +626,7 @@ func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet {
return atn.NextTokens(s, nil)
}
-// GetRuleIndex get a rule's index (i.e., RULE_ruleName field) or -1 if not found.
+// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.//
func (p *BaseParser) GetRuleIndex(ruleName string) int {
var ruleIndex, ok = p.GetRuleIndexMap()[ruleName]
if ok {
@@ -629,10 +636,13 @@ func (p *BaseParser) GetRuleIndex(ruleName string) int {
return -1
}
-// GetRuleInvocationStack returns a list of the rule names in your parser instance
+// Return List<String> of the rule names in your parser instance
// leading up to a call to the current rule. You could override if
// you want more details such as the file/line info of where
// in the ATN a rule is invoked.
+//
+// this very useful for error messages.
+
func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string {
if c == nil {
c = p.ctx
@@ -658,16 +668,16 @@ func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string {
return stack
}
-// GetDFAStrings returns a list of all DFA states used for debugging purposes
+// For debugging and other purposes.//
func (p *BaseParser) GetDFAStrings() string {
return fmt.Sprint(p.Interpreter.decisionToDFA)
}
-// DumpDFA prints the whole of the DFA for debugging
+// For debugging and other purposes.//
func (p *BaseParser) DumpDFA() {
seenOne := false
for _, dfa := range p.Interpreter.decisionToDFA {
- if dfa.Len() > 0 {
+ if dfa.states.Len() > 0 {
if seenOne {
fmt.Println()
}
@@ -682,10 +692,8 @@ func (p *BaseParser) GetSourceName() string {
return p.GrammarFileName
}
-// SetTrace installs a trace listener for the parse.
-//
-// During a parse it is sometimes useful to listen in on the rule entry and exit
-// events as well as token Matches. This is for quick and dirty debugging.
+// During a parse is sometimes useful to listen in on the rule entry and exit
+// events as well as token Matches. p.is for quick and dirty debugging.
func (p *BaseParser) SetTrace(trace *TraceListener) {
if trace == nil {
p.RemoveParseListener(p.tracer)
diff --git a/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go
similarity index 64%
rename from vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go
index ae2869692..8bcc46a0d 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go
@@ -10,51 +10,31 @@ import (
"strings"
)
-var ()
-
-// ClosureBusy is a store of ATNConfigs and is a tiny abstraction layer over
-// a standard JStore so that we can use Lazy instantiation of the JStore, mostly
-// to avoid polluting the stats module with a ton of JStore instances with nothing in them.
-type ClosureBusy struct {
- bMap *JStore[*ATNConfig, Comparator[*ATNConfig]]
- desc string
-}
-
-// NewClosureBusy creates a new ClosureBusy instance used to avoid infinite recursion for right-recursive rules
-func NewClosureBusy(desc string) *ClosureBusy {
- return &ClosureBusy{
- desc: desc,
- }
-}
-
-func (c *ClosureBusy) Put(config *ATNConfig) (*ATNConfig, bool) {
- if c.bMap == nil {
- c.bMap = NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, c.desc)
- }
- return c.bMap.Put(config)
-}
+var (
+ ParserATNSimulatorDebug = false
+ ParserATNSimulatorTraceATNSim = false
+ ParserATNSimulatorDFADebug = false
+ ParserATNSimulatorRetryDebug = false
+ TurnOffLRLoopEntryBranchOpt = false
+)
type ParserATNSimulator struct {
- BaseATNSimulator
+ *BaseATNSimulator
parser Parser
predictionMode int
input TokenStream
startIndex int
dfa *DFA
- mergeCache *JPCMap
+ mergeCache *DoubleDict
outerContext ParserRuleContext
}
-//goland:noinspection GoUnusedExportedFunction
func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator {
- p := &ParserATNSimulator{
- BaseATNSimulator: BaseATNSimulator{
- atn: atn,
- sharedContextCache: sharedContextCache,
- },
- }
+ p := new(ParserATNSimulator)
+
+ p.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache)
p.parser = parser
p.decisionToDFA = decisionToDFA
@@ -66,12 +46,12 @@ func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, shared
p.outerContext = nil
p.dfa = nil
// Each prediction operation uses a cache for merge of prediction contexts.
- // Don't keep around as it wastes huge amounts of memory. [JPCMap]
- // isn't Synchronized, but we're ok since two threads shouldn't reuse same
- // parser/atn-simulator object because it can only handle one input at a time.
- // This maps graphs a and b to merged result c. (a,b) -> c. We can avoid
- // the merge if we ever see a and b again. Note that (b,a) -> c should
- // also be examined during cache lookup.
+ // Don't keep around as it wastes huge amounts of memory. DoubleKeyMap
+ // isn't Synchronized but we're ok since two threads shouldn't reuse same
+ // parser/atnsim object because it can only handle one input at a time.
+ // This maps graphs a and b to merged result c. (a,b)&rarrc. We can avoid
+ // the merge if we ever see a and b again. Note that (b,a)&rarrc should
+ // also be examined during cache lookup.
//
p.mergeCache = nil
@@ -89,14 +69,14 @@ func (p *ParserATNSimulator) SetPredictionMode(v int) {
func (p *ParserATNSimulator) reset() {
}
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) AdaptivePredict(parser *BaseParser, input TokenStream, decision int, outerContext ParserRuleContext) int {
- if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int {
+ if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
fmt.Println("adaptivePredict decision " + strconv.Itoa(decision) +
" exec LA(1)==" + p.getLookaheadName(input) +
" line " + strconv.Itoa(input.LT(1).GetLine()) + ":" +
strconv.Itoa(input.LT(1).GetColumn()))
}
+
p.input = input
p.startIndex = input.Index()
p.outerContext = outerContext
@@ -108,15 +88,7 @@ func (p *ParserATNSimulator) AdaptivePredict(parser *BaseParser, input TokenStre
defer func() {
p.dfa = nil
- p.mergeCache = nil // whack cache after each prediction
- // Do not attempt to run a GC now that we're done with the cache as makes the
- // GC overhead terrible for badly formed grammars and has little effect on well formed
- // grammars.
- // I have made some extra effort to try and reduce memory pressure by reusing allocations when
- // possible. However, it can only have a limited effect. The real solution is to encourage grammar
- // authors to think more carefully about their grammar and to use the new antlr.stats tag to inspect
- // what is happening at runtime, along with using the error listener to report ambiguities.
-
+ p.mergeCache = nil // wack cache after each prediction
input.Seek(index)
input.Release(m)
}()
@@ -141,7 +113,7 @@ func (p *ParserATNSimulator) AdaptivePredict(parser *BaseParser, input TokenStre
if outerContext == nil {
outerContext = ParserRuleContextEmpty
}
- if runtimeConfig.parserATNSimulatorDebug {
+ if ParserATNSimulatorDebug {
fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) +
" exec LA(1)==" + p.getLookaheadName(input) +
", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil))
@@ -170,52 +142,47 @@ func (p *ParserATNSimulator) AdaptivePredict(parser *BaseParser, input TokenStre
p.atn.stateMu.Unlock()
}
- alt, re := p.execATN(dfa, s0, input, index, outerContext)
- parser.SetError(re)
- if runtimeConfig.parserATNSimulatorDebug {
+ alt := p.execATN(dfa, s0, input, index, outerContext)
+ if ParserATNSimulatorDebug {
fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil))
}
return alt
}
-// execATN performs ATN simulation to compute a predicted alternative based
-// upon the remaining input, but also updates the DFA cache to avoid
-// having to traverse the ATN again for the same input sequence.
-//
+// Performs ATN simulation to compute a predicted alternative based
+// upon the remaining input, but also updates the DFA cache to avoid
+// having to traverse the ATN again for the same input sequence.
+
// There are some key conditions we're looking for after computing a new
// set of ATN configs (proposed DFA state):
-//
-// - If the set is empty, there is no viable alternative for current symbol
-// - Does the state uniquely predict an alternative?
-// - Does the state have a conflict that would prevent us from
-// putting it on the work list?
-//
+// if the set is empty, there is no viable alternative for current symbol
+// does the state uniquely predict an alternative?
+// does the state have a conflict that would prevent us from
+// putting it on the work list?
+
// We also have some key operations to do:
+// add an edge from previous DFA state to potentially NewDFA state, D,
+// upon current symbol but only if adding to work list, which means in all
+// cases except no viable alternative (and possibly non-greedy decisions?)
+// collecting predicates and adding semantic context to DFA accept states
+// adding rule context to context-sensitive DFA accept states
+// consuming an input symbol
+// Reporting a conflict
+// Reporting an ambiguity
+// Reporting a context sensitivity
+// Reporting insufficient predicates
+
+// cover these cases:
//
-// - Add an edge from previous DFA state to potentially NewDFA state, D,
-// - Upon current symbol but only if adding to work list, which means in all
-// cases except no viable alternative (and possibly non-greedy decisions?)
-// - Collecting predicates and adding semantic context to DFA accept states
-// - adding rule context to context-sensitive DFA accept states
-// - Consuming an input symbol
-// - Reporting a conflict
-// - Reporting an ambiguity
-// - Reporting a context sensitivity
-// - Reporting insufficient predicates
-//
-// Cover these cases:
-//
-// - dead end
-// - single alt
-// - single alt + predicates
-// - conflict
-// - conflict + predicates
-//
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) (int, RecognitionException) {
-
- if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+// dead end
+// single alt
+// single alt + preds
+// conflict
+// conflict + preds
+func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) int {
+
+ if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) +
", DFA state " + s0.String() +
", LA(1)==" + p.getLookaheadName(input) +
@@ -224,7 +191,7 @@ func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream,
previousD := s0
- if runtimeConfig.parserATNSimulatorDebug {
+ if ParserATNSimulatorDebug {
fmt.Println("s0 = " + s0.String())
}
t := input.LA(1)
@@ -247,17 +214,17 @@ func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream,
input.Seek(startIndex)
alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext)
if alt != ATNInvalidAltNumber {
- return alt, nil
+ return alt
}
- p.parser.SetError(e)
- return ATNInvalidAltNumber, e
+
+ panic(e)
}
if D.requiresFullContext && p.predictionMode != PredictionModeSLL {
// IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error)
- conflictingAlts := D.configs.conflictingAlts
+ conflictingAlts := D.configs.GetConflictingAlts()
if D.predicates != nil {
- if runtimeConfig.parserATNSimulatorDebug {
- fmt.Println("DFA state has preds in DFA sim LL fail-over")
+ if ParserATNSimulatorDebug {
+ fmt.Println("DFA state has preds in DFA sim LL failover")
}
conflictIndex := input.Index()
if conflictIndex != startIndex {
@@ -265,10 +232,10 @@ func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream,
}
conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true)
if conflictingAlts.length() == 1 {
- if runtimeConfig.parserATNSimulatorDebug {
+ if ParserATNSimulatorDebug {
fmt.Println("Full LL avoided")
}
- return conflictingAlts.minValue(), nil
+ return conflictingAlts.minValue()
}
if conflictIndex != startIndex {
// restore the index so Reporting the fallback to full
@@ -276,18 +243,18 @@ func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream,
input.Seek(conflictIndex)
}
}
- if runtimeConfig.parserATNSimulatorDFADebug {
+ if ParserATNSimulatorDFADebug {
fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String())
}
fullCtx := true
s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx)
p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index())
- alt, re := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext)
- return alt, re
+ alt := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext)
+ return alt
}
if D.isAcceptState {
if D.predicates == nil {
- return D.prediction, nil
+ return D.prediction
}
stopIndex := input.Index()
input.Seek(startIndex)
@@ -295,13 +262,13 @@ func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream,
switch alts.length() {
case 0:
- return ATNInvalidAltNumber, p.noViableAlt(input, outerContext, D.configs, startIndex)
+ panic(p.noViableAlt(input, outerContext, D.configs, startIndex))
case 1:
- return alts.minValue(), nil
+ return alts.minValue()
default:
// Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported.
p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs)
- return alts.minValue(), nil
+ return alts.minValue()
}
}
previousD = D
@@ -347,8 +314,7 @@ func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int)
// @return The computed target DFA state for the given input symbol
// {@code t}. If {@code t} does not lead to a valid DFA state, p method
// returns {@link //ERROR}.
-//
-//goland:noinspection GoBoolExpressions
+
func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState {
reach := p.computeReachSet(previousD.configs, t, false)
@@ -356,12 +322,12 @@ func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t
p.addDFAEdge(dfa, previousD, t, ATNSimulatorError)
return ATNSimulatorError
}
- // create new target state we'll add to DFA after it's complete
+ // create Newtarget state we'll add to DFA after it's complete
D := NewDFAState(-1, reach)
predictedAlt := p.getUniqueAlt(reach)
- if runtimeConfig.parserATNSimulatorDebug {
+ if ParserATNSimulatorDebug {
altSubSets := PredictionModegetConflictingAltSubsets(reach)
fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) +
", previous=" + previousD.configs.String() +
@@ -374,17 +340,17 @@ func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t
if predictedAlt != ATNInvalidAltNumber {
// NO CONFLICT, UNIQUELY PREDICTED ALT
D.isAcceptState = true
- D.configs.uniqueAlt = predictedAlt
+ D.configs.SetUniqueAlt(predictedAlt)
D.setPrediction(predictedAlt)
} else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) {
// MORE THAN ONE VIABLE ALTERNATIVE
- D.configs.conflictingAlts = p.getConflictingAlts(reach)
+ D.configs.SetConflictingAlts(p.getConflictingAlts(reach))
D.requiresFullContext = true
// in SLL-only mode, we will stop at p state and return the minimum alt
D.isAcceptState = true
- D.setPrediction(D.configs.conflictingAlts.minValue())
+ D.setPrediction(D.configs.GetConflictingAlts().minValue())
}
- if D.isAcceptState && D.configs.hasSemanticContext {
+ if D.isAcceptState && D.configs.HasSemanticContext() {
p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision))
if D.predicates != nil {
D.setPrediction(ATNInvalidAltNumber)
@@ -415,17 +381,15 @@ func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState
}
// comes back with reach.uniqueAlt set to a valid alt
-//
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 *ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) (int, RecognitionException) {
+func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) int {
- if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+ if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
fmt.Println("execATNWithFullContext " + s0.String())
}
fullCtx := true
foundExactAmbig := false
- var reach *ATNConfigSet
+ var reach ATNConfigSet
previous := s0
input.Seek(startIndex)
t := input.LA(1)
@@ -443,23 +407,25 @@ func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 *A
// ATN states in SLL implies LL will also get nowhere.
// If conflict in states that dip out, choose min since we
// will get error no matter what.
+ e := p.noViableAlt(input, outerContext, previous, startIndex)
input.Seek(startIndex)
alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext)
if alt != ATNInvalidAltNumber {
- return alt, nil
+ return alt
}
- return alt, p.noViableAlt(input, outerContext, previous, startIndex)
+
+ panic(e)
}
altSubSets := PredictionModegetConflictingAltSubsets(reach)
- if runtimeConfig.parserATNSimulatorDebug {
+ if ParserATNSimulatorDebug {
fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" +
strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" +
fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets)))
}
- reach.uniqueAlt = p.getUniqueAlt(reach)
+ reach.SetUniqueAlt(p.getUniqueAlt(reach))
// unique prediction?
- if reach.uniqueAlt != ATNInvalidAltNumber {
- predictedAlt = reach.uniqueAlt
+ if reach.GetUniqueAlt() != ATNInvalidAltNumber {
+ predictedAlt = reach.GetUniqueAlt()
break
}
if p.predictionMode != PredictionModeLLExactAmbigDetection {
@@ -488,9 +454,9 @@ func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 *A
// If the configuration set uniquely predicts an alternative,
// without conflict, then we know that it's a full LL decision
// not SLL.
- if reach.uniqueAlt != ATNInvalidAltNumber {
+ if reach.GetUniqueAlt() != ATNInvalidAltNumber {
p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index())
- return predictedAlt, nil
+ return predictedAlt
}
// We do not check predicates here because we have checked them
// on-the-fly when doing full context prediction.
@@ -503,10 +469,10 @@ func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 *A
//
// For example, we might know that we have conflicting configurations.
// But, that does not mean that there is no way forward without a
- // conflict. It's possible to have non-conflicting alt subsets as in:
- //
+ // conflict. It's possible to have nonconflicting alt subsets as in:
+
// altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}]
- //
+
// from
//
// [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]),
@@ -521,15 +487,14 @@ func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 *A
p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, reach.Alts(), reach)
- return predictedAlt, nil
+ return predictedAlt
}
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullCtx bool) *ATNConfigSet {
+func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCtx bool) ATNConfigSet {
if p.mergeCache == nil {
- p.mergeCache = NewJPCMap(ReachSetCollection, "Merge cache for computeReachSet()")
+ p.mergeCache = NewDoubleDict()
}
- intermediate := NewATNConfigSet(fullCtx)
+ intermediate := NewBaseATNConfigSet(fullCtx)
// Configurations already in a rule stop state indicate reaching the end
// of the decision rule (local context) or end of the start rule (full
@@ -541,18 +506,18 @@ func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullC
// ensure that the alternative Matching the longest overall sequence is
// chosen when multiple such configurations can Match the input.
- var skippedStopStates []*ATNConfig
+ var skippedStopStates []*BaseATNConfig
// First figure out where we can reach on input t
- for _, c := range closure.configs {
- if runtimeConfig.parserATNSimulatorDebug {
+ for _, c := range closure.GetItems() {
+ if ParserATNSimulatorDebug {
fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String())
}
if _, ok := c.GetState().(*RuleStopState); ok {
if fullCtx || t == TokenEOF {
- skippedStopStates = append(skippedStopStates, c)
- if runtimeConfig.parserATNSimulatorDebug {
+ skippedStopStates = append(skippedStopStates, c.(*BaseATNConfig))
+ if ParserATNSimulatorDebug {
fmt.Println("added " + c.String() + " to SkippedStopStates")
}
}
@@ -562,9 +527,9 @@ func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullC
for _, trans := range c.GetState().GetTransitions() {
target := p.getReachableTarget(trans, t)
if target != nil {
- cfg := NewATNConfig4(c, target)
+ cfg := NewBaseATNConfig4(c, target)
intermediate.Add(cfg, p.mergeCache)
- if runtimeConfig.parserATNSimulatorDebug {
+ if ParserATNSimulatorDebug {
fmt.Println("added " + cfg.String() + " to intermediate")
}
}
@@ -572,7 +537,7 @@ func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullC
}
// Now figure out where the reach operation can take us...
- var reach *ATNConfigSet
+ var reach ATNConfigSet
// This block optimizes the reach operation for intermediate sets which
// trivially indicate a termination state for the overall
@@ -600,8 +565,8 @@ func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullC
// operation on the intermediate set to compute its initial value.
//
if reach == nil {
- reach = NewATNConfigSet(fullCtx)
- closureBusy := NewClosureBusy("ParserATNSimulator.computeReachSet() make a closureBusy")
+ reach = NewBaseATNConfigSet(fullCtx)
+ closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst)
treatEOFAsEpsilon := t == TokenEOF
amount := len(intermediate.configs)
for k := 0; k < amount; k++ {
@@ -623,10 +588,10 @@ func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullC
//
// This is handled before the configurations in SkippedStopStates,
// because any configurations potentially added from that list are
- // already guaranteed to meet this condition whether it's
+ // already guaranteed to meet p condition whether or not it's
// required.
//
- reach = p.removeAllConfigsNotInRuleStopState(reach, reach.Equals(intermediate))
+ reach = p.removeAllConfigsNotInRuleStopState(reach, reach == intermediate)
}
// If SkippedStopStates!=nil, then it contains at least one
// configuration. For full-context reach operations, these
@@ -642,40 +607,41 @@ func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullC
}
}
- if runtimeConfig.parserATNSimulatorTraceATNSim {
+ if ParserATNSimulatorTraceATNSim {
fmt.Println("computeReachSet " + closure.String() + " -> " + reach.String())
}
- if len(reach.configs) == 0 {
+ if len(reach.GetItems()) == 0 {
return nil
}
return reach
}
-// removeAllConfigsNotInRuleStopState returns a configuration set containing only the configurations from
-// configs which are in a [RuleStopState]. If all
-// configurations in configs are already in a rule stop state, this
-// method simply returns configs.
+// Return a configuration set containing only the configurations from
+// {@code configs} which are in a {@link RuleStopState}. If all
+// configurations in {@code configs} are already in a rule stop state, p
+// method simply returns {@code configs}.
//
-// When lookToEndOfRule is true, this method uses
-// [ATN].[NextTokens] for each configuration in configs which is
+// When {@code lookToEndOfRule} is true, p method uses
+// {@link ATN//NextTokens} for each configuration in {@code configs} which is
// not already in a rule stop state to see if a rule stop state is reachable
-// from the configuration via epsilon-only transitions.
+// from the configuration via epsilon-only transitions.
//
-// When lookToEndOfRule is true, this method checks for rule stop states
+// @param configs the configuration set to update
+// @param lookToEndOfRule when true, p method checks for rule stop states
// reachable by epsilon-only transitions from each configuration in
-// configs.
+// {@code configs}.
//
-// The func returns configs if all configurations in configs are in a
-// rule stop state, otherwise it returns a new configuration set containing only
-// the configurations from configs which are in a rule stop state
-func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs *ATNConfigSet, lookToEndOfRule bool) *ATNConfigSet {
+// @return {@code configs} if all configurations in {@code configs} are in a
+// rule stop state, otherwise return a Newconfiguration set containing only
+// the configurations from {@code configs} which are in a rule stop state
+func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfigSet, lookToEndOfRule bool) ATNConfigSet {
if PredictionModeallConfigsInRuleStopStates(configs) {
return configs
}
- result := NewATNConfigSet(configs.fullCtx)
- for _, config := range configs.configs {
+ result := NewBaseATNConfigSet(configs.FullContext())
+ for _, config := range configs.GetItems() {
if _, ok := config.GetState().(*RuleStopState); ok {
result.Add(config, p.mergeCache)
continue
@@ -684,81 +650,91 @@ func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs *ATNConf
NextTokens := p.atn.NextTokens(config.GetState(), nil)
if NextTokens.contains(TokenEpsilon) {
endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()]
- result.Add(NewATNConfig4(config, endOfRuleState), p.mergeCache)
+ result.Add(NewBaseATNConfig4(config, endOfRuleState), p.mergeCache)
}
}
}
return result
}
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) *ATNConfigSet {
+func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) ATNConfigSet {
// always at least the implicit call to start rule
initialContext := predictionContextFromRuleContext(p.atn, ctx)
- configs := NewATNConfigSet(fullCtx)
- if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+ configs := NewBaseATNConfigSet(fullCtx)
+ if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
fmt.Println("computeStartState from ATN state " + a.String() +
" initialContext=" + initialContext.String())
}
for i := 0; i < len(a.GetTransitions()); i++ {
target := a.GetTransitions()[i].getTarget()
- c := NewATNConfig6(target, i+1, initialContext)
- closureBusy := NewClosureBusy("ParserATNSimulator.computeStartState() make a closureBusy")
+ c := NewBaseATNConfig6(target, i+1, initialContext)
+ closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst)
p.closure(c, configs, closureBusy, true, fullCtx, false)
}
return configs
}
-// applyPrecedenceFilter transforms the start state computed by
-// [computeStartState] to the special start state used by a
-// precedence [DFA] for a particular precedence value. The transformation
+// This method transforms the start state computed by
+// {@link //computeStartState} to the special start state used by a
+// precedence DFA for a particular precedence value. The transformation
// process applies the following changes to the start state's configuration
// set.
//
-// 1. Evaluate the precedence predicates for each configuration using
-// [SemanticContext].evalPrecedence.
-// 2. Remove all configurations which predict an alternative greater than
-// 1, for which another configuration that predicts alternative 1 is in the
-// same ATN state with the same prediction context.
-//
-// Transformation 2 is valid for the following reasons:
-//
-// - The closure block cannot contain any epsilon transitions which bypass
-// the body of the closure, so all states reachable via alternative 1 are
-// part of the precedence alternatives of the transformed left-recursive
-// rule.
-// - The "primary" portion of a left recursive rule cannot contain an
-// epsilon transition, so the only way an alternative other than 1 can exist
-// in a state that is also reachable via alternative 1 is by nesting calls
-// to the left-recursive rule, with the outer calls not being at the
-// preferred precedence level.
-//
-// The prediction context must be considered by this filter to address
-// situations like the following:
-//
-// grammar TA
-// prog: statement* EOF
-// statement: letterA | statement letterA 'b'
-// letterA: 'a'
+//
+// - Evaluate the precedence predicates for each configuration using
+// {@link SemanticContext//evalPrecedence}.
+// - Remove all configurations which predict an alternative greater than
+// 1, for which another configuration that predicts alternative 1 is in the
+// same ATN state with the same prediction context. This transformation is
+// valid for the following reasons:
+//
+// - The closure block cannot contain any epsilon transitions which bypass
+// the body of the closure, so all states reachable via alternative 1 are
+// part of the precedence alternatives of the transformed left-recursive
+// rule.
+// - The "primary" portion of a left recursive rule cannot contain an
+// epsilon transition, so the only way an alternative other than 1 can exist
+// in a state that is also reachable via alternative 1 is by nesting calls
+// to the left-recursive rule, with the outer calls not being at the
+// preferred precedence level.
+//
+//
+//
//
-// In the above grammar, the [ATN] state immediately before the token
-// reference 'a' in letterA is reachable from the left edge
+//
+// The prediction context must be considered by p filter to address
+// situations like the following.
+//
+//
+//
+// grammar TA
+// prog: statement* EOF
+// statement: letterA | statement letterA 'b'
+// letterA: 'a'
+//
+//
+//
+// If the above grammar, the ATN state immediately before the token
+// reference {@code 'a'} in {@code letterA} is reachable from the left edge
// of both the primary and closure blocks of the left-recursive rule
-// statement. The prediction context associated with each of these
+// {@code statement}. The prediction context associated with each of these
// configurations distinguishes between them, and prevents the alternative
-// which stepped out to prog, and then back in to statement
+// which stepped out to {@code prog} (and then back in to {@code statement}
// from being eliminated by the filter.
+//
//
-// The func returns the transformed configuration set representing the start state
-// for a precedence [DFA] at a particular precedence level (determined by
-// calling [Parser].getPrecedence).
-func (p *ParserATNSimulator) applyPrecedenceFilter(configs *ATNConfigSet) *ATNConfigSet {
+// @param configs The configuration set computed by
+// {@link //computeStartState} as the start state for the DFA.
+// @return The transformed configuration set representing the start state
+// for a precedence DFA at a particular precedence level (determined by
+// calling {@link Parser//getPrecedence}).
+func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConfigSet {
- statesFromAlt1 := make(map[int]*PredictionContext)
- configSet := NewATNConfigSet(configs.fullCtx)
+ statesFromAlt1 := make(map[int]PredictionContext)
+ configSet := NewBaseATNConfigSet(configs.FullContext())
- for _, config := range configs.configs {
+ for _, config := range configs.GetItems() {
// handle alt 1 first
if config.GetAlt() != 1 {
continue
@@ -770,12 +746,12 @@ func (p *ParserATNSimulator) applyPrecedenceFilter(configs *ATNConfigSet) *ATNCo
}
statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext()
if updatedContext != config.GetSemanticContext() {
- configSet.Add(NewATNConfig2(config, updatedContext), p.mergeCache)
+ configSet.Add(NewBaseATNConfig2(config, updatedContext), p.mergeCache)
} else {
configSet.Add(config, p.mergeCache)
}
}
- for _, config := range configs.configs {
+ for _, config := range configs.GetItems() {
if config.GetAlt() == 1 {
// already handled
@@ -804,11 +780,10 @@ func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATN
return nil
}
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs *ATNConfigSet, nalts int) []SemanticContext {
+func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs ATNConfigSet, nalts int) []SemanticContext {
altToPred := make([]SemanticContext, nalts+1)
- for _, c := range configs.configs {
+ for _, c := range configs.GetItems() {
if ambigAlts.contains(c.GetAlt()) {
altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext())
}
@@ -822,11 +797,11 @@ func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs *AT
nPredAlts++
}
}
- // unambiguous alts are nil in altToPred
+ // nonambig alts are nil in altToPred
if nPredAlts == 0 {
altToPred = nil
}
- if runtimeConfig.parserATNSimulatorDebug {
+ if ParserATNSimulatorDebug {
fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred))
}
return altToPred
@@ -837,7 +812,7 @@ func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPre
containsPredicate := false
for i := 1; i < len(altToPred); i++ {
pred := altToPred[i]
- // un-predicated is indicated by SemanticContextNONE
+ // unpredicated is indicated by SemanticContextNONE
if ambigAlts != nil && ambigAlts.contains(i) {
pairs = append(pairs, NewPredPrediction(pred, i))
}
@@ -851,42 +826,51 @@ func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPre
return pairs
}
-// getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule is used to improve the localization of error messages by
-// choosing an alternative rather than panic a NoViableAltException in particular prediction scenarios where the
-// Error state was reached during [ATN] simulation.
+// This method is used to improve the localization of error messages by
+// choosing an alternative rather than panicing a
+// {@link NoViableAltException} in particular prediction scenarios where the
+// {@link //ERROR} state was reached during ATN simulation.
//
-// The default implementation of this method uses the following
-// algorithm to identify an [ATN] configuration which successfully parsed the
+//
+// The default implementation of p method uses the following
+// algorithm to identify an ATN configuration which successfully parsed the
// decision entry rule. Choosing such an alternative ensures that the
-// [ParserRuleContext] returned by the calling rule will be complete
+// {@link ParserRuleContext} returned by the calling rule will be complete
// and valid, and the syntax error will be Reported later at a more
-// localized location.
+// localized location.
//
-// - If a syntactically valid path or paths reach the end of the decision rule, and
-// they are semantically valid if predicated, return the min associated alt.
-// - Else, if a semantically invalid but syntactically valid path exist
-// or paths exist, return the minimum associated alt.
-// - Otherwise, return [ATNInvalidAltNumber].
+//
+// - If a syntactically valid path or paths reach the end of the decision rule and
+// they are semantically valid if predicated, return the min associated alt.
+// - Else, if a semantically invalid but syntactically valid path exist
+// or paths exist, return the minimum associated alt.
+//
+// - Otherwise, return {@link ATN//INVALID_ALT_NUMBER}.
+//
//
+//
// In some scenarios, the algorithm described above could predict an
-// alternative which will result in a [FailedPredicateException] in
-// the parser. Specifically, this could occur if the only configuration
+// alternative which will result in a {@link FailedPredicateException} in
+// the parser. Specifically, p could occur if the only configuration
// capable of successfully parsing to the end of the decision rule is
-// blocked by a semantic predicate. By choosing this alternative within
-// [AdaptivePredict] instead of panic a [NoViableAltException], the resulting
-// [FailedPredicateException] in the parser will identify the specific
+// blocked by a semantic predicate. By choosing p alternative within
+// {@link //AdaptivePredict} instead of panicing a
+// {@link NoViableAltException}, the resulting
+// {@link FailedPredicateException} in the parser will identify the specific
// predicate which is preventing the parser from successfully parsing the
// decision rule, which helps developers identify and correct logic errors
// in semantic predicates.
+//
//
-// pass in the configs holding ATN configurations which were valid immediately before
-// the ERROR state was reached, outerContext as the initial parser context from the paper
+// @param configs The ATN configurations which were valid immediately before
+// the {@link //ERROR} state was reached
+// @param outerContext The is the \gamma_0 initial parser context from the paper
// or the parser stack at the instant before prediction commences.
//
-// Teh func returns the value to return from [AdaptivePredict], or
-// [ATNInvalidAltNumber] if a suitable alternative was not
-// identified and [AdaptivePredict] should report an error instead.
-func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs *ATNConfigSet, outerContext ParserRuleContext) int {
+// @return The value to return from {@link //AdaptivePredict}, or
+// {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not
+// identified and {@link //AdaptivePredict} should Report an error instead.
+func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs ATNConfigSet, outerContext ParserRuleContext) int {
cfgs := p.splitAccordingToSemanticValidity(configs, outerContext)
semValidConfigs := cfgs[0]
semInvalidConfigs := cfgs[1]
@@ -895,7 +879,7 @@ func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntry
return alt
}
// Is there a syntactically valid path with a failed pred?
- if len(semInvalidConfigs.configs) > 0 {
+ if len(semInvalidConfigs.GetItems()) > 0 {
alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs)
if alt != ATNInvalidAltNumber { // syntactically viable path exists
return alt
@@ -904,10 +888,10 @@ func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntry
return ATNInvalidAltNumber
}
-func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs *ATNConfigSet) int {
+func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConfigSet) int {
alts := NewIntervalSet()
- for _, c := range configs.configs {
+ for _, c := range configs.GetItems() {
_, ok := c.GetState().(*RuleStopState)
if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) {
@@ -931,14 +915,14 @@ func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs *ATNCon
// prediction, which is where predicates need to evaluate.
type ATNConfigSetPair struct {
- item0, item1 *ATNConfigSet
+ item0, item1 ATNConfigSet
}
-func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs *ATNConfigSet, outerContext ParserRuleContext) []*ATNConfigSet {
- succeeded := NewATNConfigSet(configs.fullCtx)
- failed := NewATNConfigSet(configs.fullCtx)
+func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigSet, outerContext ParserRuleContext) []ATNConfigSet {
+ succeeded := NewBaseATNConfigSet(configs.FullContext())
+ failed := NewBaseATNConfigSet(configs.FullContext())
- for _, c := range configs.configs {
+ for _, c := range configs.GetItems() {
if c.GetSemanticContext() != SemanticContextNone {
predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext)
if predicateEvaluationResult {
@@ -950,16 +934,15 @@ func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs *ATNConfig
succeeded.Add(c, nil)
}
}
- return []*ATNConfigSet{succeeded, failed}
+ return []ATNConfigSet{succeeded, failed}
}
-// evalSemanticContext looks through a list of predicate/alt pairs, returning alts for the
-// pairs that win. A [SemanticContextNone] predicate indicates an alt containing an
-// un-predicated runtimeConfig which behaves as "always true." If !complete
-// then we stop at the first predicate that evaluates to true. This
-// includes pairs with nil predicates.
+// Look through a list of predicate/alt pairs, returning alts for the
//
-//goland:noinspection GoBoolExpressions
+// pairs that win. A {@code NONE} predicate indicates an alt containing an
+// unpredicated config which behaves as "always true." If !complete
+// then we stop at the first predicate that evaluates to true. This
+// includes pairs with nil predicates.
func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet {
predictions := NewBitSet()
for i := 0; i < len(predPredictions); i++ {
@@ -973,11 +956,11 @@ func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPredicti
}
predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext)
- if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorDFADebug {
+ if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug {
fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult))
}
if predicateEvaluationResult {
- if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorDFADebug {
+ if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug {
fmt.Println("PREDICT " + fmt.Sprint(pair.alt))
}
predictions.add(pair.alt)
@@ -989,82 +972,19 @@ func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPredicti
return predictions
}
-func (p *ParserATNSimulator) closure(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx, treatEOFAsEpsilon bool) {
+func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx, treatEOFAsEpsilon bool) {
initialDepth := 0
p.closureCheckingStopState(config, configs, closureBusy, collectPredicates,
fullCtx, initialDepth, treatEOFAsEpsilon)
}
-func (p *ParserATNSimulator) closureCheckingStopState(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
- if runtimeConfig.parserATNSimulatorTraceATNSim {
+func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+ if ParserATNSimulatorTraceATNSim {
fmt.Println("closure(" + config.String() + ")")
- }
-
- var stack []*ATNConfig
- visited := make(map[*ATNConfig]bool)
-
- stack = append(stack, config)
-
- for len(stack) > 0 {
- currConfig := stack[len(stack)-1]
- stack = stack[:len(stack)-1]
-
- if _, ok := visited[currConfig]; ok {
- continue
- }
- visited[currConfig] = true
-
- if _, ok := currConfig.GetState().(*RuleStopState); ok {
- // We hit rule end. If we have context info, use it
- // run thru all possible stack tops in ctx
- if !currConfig.GetContext().isEmpty() {
- for i := 0; i < currConfig.GetContext().length(); i++ {
- if currConfig.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState {
- if fullCtx {
- nb := NewATNConfig1(currConfig, currConfig.GetState(), BasePredictionContextEMPTY)
- configs.Add(nb, p.mergeCache)
- continue
- } else {
- // we have no context info, just chase follow links (if greedy)
- if runtimeConfig.parserATNSimulatorDebug {
- fmt.Println("FALLING off rule " + p.getRuleName(currConfig.GetState().GetRuleIndex()))
- }
- p.closureWork(currConfig, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
- }
- continue
- }
- returnState := p.atn.states[currConfig.GetContext().getReturnState(i)]
- newContext := currConfig.GetContext().GetParent(i) // "pop" return state
-
- c := NewATNConfig5(returnState, currConfig.GetAlt(), newContext, currConfig.GetSemanticContext())
- // While we have context to pop back from, we may have
- // gotten that context AFTER having falling off a rule.
- // Make sure we track that we are now out of context.
- c.SetReachesIntoOuterContext(currConfig.GetReachesIntoOuterContext())
-
- stack = append(stack, c)
- }
- continue
- } else if fullCtx {
- // reached end of start rule
- configs.Add(currConfig, p.mergeCache)
- continue
- } else {
- // else if we have no context info, just chase follow links (if greedy)
- if runtimeConfig.parserATNSimulatorDebug {
- fmt.Println("FALLING off rule " + p.getRuleName(currConfig.GetState().GetRuleIndex()))
- }
- }
+ //fmt.Println("configs(" + configs.String() + ")")
+ if config.GetReachesIntoOuterContext() > 50 {
+ panic("problem")
}
-
- p.closureWork(currConfig, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
- }
-}
-
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) closureCheckingStopStateRecursive(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
- if runtimeConfig.parserATNSimulatorTraceATNSim {
- fmt.Println("closure(" + config.String() + ")")
}
if _, ok := config.GetState().(*RuleStopState); ok {
@@ -1074,12 +994,11 @@ func (p *ParserATNSimulator) closureCheckingStopStateRecursive(config *ATNConfig
for i := 0; i < config.GetContext().length(); i++ {
if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState {
if fullCtx {
- nb := NewATNConfig1(config, config.GetState(), BasePredictionContextEMPTY)
- configs.Add(nb, p.mergeCache)
+ configs.Add(NewBaseATNConfig1(config, config.GetState(), BasePredictionContextEMPTY), p.mergeCache)
continue
} else {
// we have no context info, just chase follow links (if greedy)
- if runtimeConfig.parserATNSimulatorDebug {
+ if ParserATNSimulatorDebug {
fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex()))
}
p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
@@ -1089,7 +1008,7 @@ func (p *ParserATNSimulator) closureCheckingStopStateRecursive(config *ATNConfig
returnState := p.atn.states[config.GetContext().getReturnState(i)]
newContext := config.GetContext().GetParent(i) // "pop" return state
- c := NewATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext())
+ c := NewBaseATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext())
// While we have context to pop back from, we may have
// gotten that context AFTER having falling off a rule.
// Make sure we track that we are now out of context.
@@ -1103,7 +1022,7 @@ func (p *ParserATNSimulator) closureCheckingStopStateRecursive(config *ATNConfig
return
} else {
// else if we have no context info, just chase follow links (if greedy)
- if runtimeConfig.parserATNSimulatorDebug {
+ if ParserATNSimulatorDebug {
fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex()))
}
}
@@ -1111,10 +1030,8 @@ func (p *ParserATNSimulator) closureCheckingStopStateRecursive(config *ATNConfig
p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
}
-// Do the actual work of walking epsilon edges
-//
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) closureWork(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+// Do the actual work of walking epsilon edges//
+func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
state := config.GetState()
// optimization
if !state.GetEpsilonOnlyTransitions() {
@@ -1131,7 +1048,7 @@ func (p *ParserATNSimulator) closureWork(config *ATNConfig, configs *ATNConfigSe
_, ok := t.(*ActionTransition)
continueCollecting := collectPredicates && !ok
c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon)
- if c != nil {
+ if ci, ok := c.(*BaseATNConfig); ok && ci != nil {
newDepth := depth
if _, ok := config.GetState().(*RuleStopState); ok {
@@ -1139,7 +1056,7 @@ func (p *ParserATNSimulator) closureWork(config *ATNConfig, configs *ATNConfigSe
// We can't get here if incoming config was rule stop and we had context
// track how far we dip into outer context. Might
// come in handy and we avoid evaluating context dependent
- // preds if this is > 0.
+ // preds if p is > 0.
if p.dfa != nil && p.dfa.getPrecedenceDfa() {
if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() {
@@ -1155,9 +1072,9 @@ func (p *ParserATNSimulator) closureWork(config *ATNConfig, configs *ATNConfigSe
continue
}
- configs.dipsIntoOuterContext = true // TODO: can remove? only care when we add to set per middle of this method
+ configs.SetDipsIntoOuterContext(true) // TODO: can remove? only care when we add to set per middle of p method
newDepth--
- if runtimeConfig.parserATNSimulatorDebug {
+ if ParserATNSimulatorDebug {
fmt.Println("dips into outer ctx: " + c.String())
}
} else {
@@ -1181,9 +1098,8 @@ func (p *ParserATNSimulator) closureWork(config *ATNConfig, configs *ATNConfigSe
}
}
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config *ATNConfig) bool {
- if !runtimeConfig.lRLoopEntryBranchOpt {
+func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config ATNConfig) bool {
+ if TurnOffLRLoopEntryBranchOpt {
return false
}
@@ -1280,7 +1196,7 @@ func (p *ParserATNSimulator) getRuleName(index int) string {
return sb.String()
}
-func (p *ParserATNSimulator) getEpsilonTarget(config *ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) *ATNConfig {
+func (p *ParserATNSimulator) getEpsilonTarget(config ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) ATNConfig {
switch t.getSerializationType() {
case TransitionRULE:
@@ -1292,13 +1208,13 @@ func (p *ParserATNSimulator) getEpsilonTarget(config *ATNConfig, t Transition, c
case TransitionACTION:
return p.actionTransition(config, t.(*ActionTransition))
case TransitionEPSILON:
- return NewATNConfig4(config, t.getTarget())
+ return NewBaseATNConfig4(config, t.getTarget())
case TransitionATOM, TransitionRANGE, TransitionSET:
// EOF transitions act like epsilon transitions after the first EOF
// transition is traversed
if treatEOFAsEpsilon {
if t.Matches(TokenEOF, 0, 1) {
- return NewATNConfig4(config, t.getTarget())
+ return NewBaseATNConfig4(config, t.getTarget())
}
}
return nil
@@ -1307,63 +1223,60 @@ func (p *ParserATNSimulator) getEpsilonTarget(config *ATNConfig, t Transition, c
}
}
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) actionTransition(config *ATNConfig, t *ActionTransition) *ATNConfig {
- if runtimeConfig.parserATNSimulatorDebug {
+func (p *ParserATNSimulator) actionTransition(config ATNConfig, t *ActionTransition) *BaseATNConfig {
+ if ParserATNSimulatorDebug {
fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex))
}
- return NewATNConfig4(config, t.getTarget())
+ return NewBaseATNConfig4(config, t.getTarget())
}
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) precedenceTransition(config *ATNConfig,
- pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *ATNConfig {
+func (p *ParserATNSimulator) precedenceTransition(config ATNConfig,
+ pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig {
- if runtimeConfig.parserATNSimulatorDebug {
+ if ParserATNSimulatorDebug {
fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " +
strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true")
if p.parser != nil {
fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil)))
}
}
- var c *ATNConfig
+ var c *BaseATNConfig
if collectPredicates && inContext {
if fullCtx {
// In full context mode, we can evaluate predicates on-the-fly
// during closure, which dramatically reduces the size of
- // the runtimeConfig sets. It also obviates the need to test predicates
+ // the config sets. It also obviates the need to test predicates
// later during conflict resolution.
currentPosition := p.input.Index()
p.input.Seek(p.startIndex)
predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext)
p.input.Seek(currentPosition)
if predSucceeds {
- c = NewATNConfig4(config, pt.getTarget()) // no pred context
+ c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context
}
} else {
newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate())
- c = NewATNConfig3(config, pt.getTarget(), newSemCtx)
+ c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx)
}
} else {
- c = NewATNConfig4(config, pt.getTarget())
+ c = NewBaseATNConfig4(config, pt.getTarget())
}
- if runtimeConfig.parserATNSimulatorDebug {
- fmt.Println("runtimeConfig from pred transition=" + c.String())
+ if ParserATNSimulatorDebug {
+ fmt.Println("config from pred transition=" + c.String())
}
return c
}
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) predTransition(config *ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *ATNConfig {
+func (p *ParserATNSimulator) predTransition(config ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig {
- if runtimeConfig.parserATNSimulatorDebug {
+ if ParserATNSimulatorDebug {
fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) +
":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent))
if p.parser != nil {
fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil)))
}
}
- var c *ATNConfig
+ var c *BaseATNConfig
if collectPredicates && (!pt.isCtxDependent || inContext) {
if fullCtx {
// In full context mode, we can evaluate predicates on-the-fly
@@ -1375,92 +1288,78 @@ func (p *ParserATNSimulator) predTransition(config *ATNConfig, pt *PredicateTran
predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext)
p.input.Seek(currentPosition)
if predSucceeds {
- c = NewATNConfig4(config, pt.getTarget()) // no pred context
+ c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context
}
} else {
newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate())
- c = NewATNConfig3(config, pt.getTarget(), newSemCtx)
+ c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx)
}
} else {
- c = NewATNConfig4(config, pt.getTarget())
+ c = NewBaseATNConfig4(config, pt.getTarget())
}
- if runtimeConfig.parserATNSimulatorDebug {
+ if ParserATNSimulatorDebug {
fmt.Println("config from pred transition=" + c.String())
}
return c
}
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) ruleTransition(config *ATNConfig, t *RuleTransition) *ATNConfig {
- if runtimeConfig.parserATNSimulatorDebug {
+func (p *ParserATNSimulator) ruleTransition(config ATNConfig, t *RuleTransition) *BaseATNConfig {
+ if ParserATNSimulatorDebug {
fmt.Println("CALL rule " + p.getRuleName(t.getTarget().GetRuleIndex()) + ", ctx=" + config.GetContext().String())
}
returnState := t.followState
newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber())
- return NewATNConfig1(config, t.getTarget(), newContext)
+ return NewBaseATNConfig1(config, t.getTarget(), newContext)
}
-func (p *ParserATNSimulator) getConflictingAlts(configs *ATNConfigSet) *BitSet {
+func (p *ParserATNSimulator) getConflictingAlts(configs ATNConfigSet) *BitSet {
altsets := PredictionModegetConflictingAltSubsets(configs)
return PredictionModeGetAlts(altsets)
}
-// getConflictingAltsOrUniqueAlt Sam pointed out a problem with the previous definition, v3, of
+// Sam pointed out a problem with the previous definition, v3, of
// ambiguous states. If we have another state associated with conflicting
// alternatives, we should keep going. For example, the following grammar
//
-// s : (ID | ID ID?) ;
-//
-// When the [ATN] simulation reaches the state before ;, it has a [DFA]
-// state that looks like:
-//
-// [12|1|[], 6|2|[], 12|2|[]].
-//
-// Naturally
-//
-// 12|1|[] and 12|2|[]
-//
-// conflict, but we cannot stop processing this node
-// because alternative to has another way to continue, via
-//
-// [6|2|[]].
+// s : (ID | ID ID?) ''
//
+// When the ATN simulation reaches the state before '', it has a DFA
+// state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally
+// 12|1|[] and 12|2|[] conflict, but we cannot stop processing p node
+// because alternative to has another way to continue, via [6|2|[]].
// The key is that we have a single state that has config's only associated
// with a single alternative, 2, and crucially the state transitions
// among the configurations are all non-epsilon transitions. That means
// we don't consider any conflicts that include alternative 2. So, we
// ignore the conflict between alts 1 and 2. We ignore a set of
// conflicting alts when there is an intersection with an alternative
-// associated with a single alt state in the state config-list map.
+// associated with a single alt state in the state&rarrconfig-list map.
//
// It's also the case that we might have two conflicting configurations but
-// also a 3rd non-conflicting configuration for a different alternative:
-//
-// [1|1|[], 1|2|[], 8|3|[]].
-//
-// This can come about from grammar:
+// also a 3rd nonconflicting configuration for a different alternative:
+// [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar:
//
-// a : A | A | A B
+// a : A | A | A B
//
// After Matching input A, we reach the stop state for rule A, state 1.
// State 8 is the state right before B. Clearly alternatives 1 and 2
// conflict and no amount of further lookahead will separate the two.
-// However, alternative 3 will be able to continue, so we do not
-// stop working on this state.
-//
-// In the previous example, we're concerned
+// However, alternative 3 will be able to continue and so we do not
+// stop working on p state. In the previous example, we're concerned
// with states associated with the conflicting alternatives. Here alt
// 3 is not associated with the conflicting configs, but since we can continue
// looking for input reasonably, I don't declare the state done. We
// ignore a set of conflicting alts when we have an alternative
// that we still need to pursue.
-func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs *ATNConfigSet) *BitSet {
+//
+
+func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs ATNConfigSet) *BitSet {
var conflictingAlts *BitSet
- if configs.uniqueAlt != ATNInvalidAltNumber {
+ if configs.GetUniqueAlt() != ATNInvalidAltNumber {
conflictingAlts = NewBitSet()
- conflictingAlts.add(configs.uniqueAlt)
+ conflictingAlts.add(configs.GetUniqueAlt())
} else {
- conflictingAlts = configs.conflictingAlts
+ conflictingAlts = configs.GetConflictingAlts()
}
return conflictingAlts
}
@@ -1485,10 +1384,11 @@ func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string {
return p.GetTokenName(input.LA(1))
}
-// Used for debugging in [AdaptivePredict] around [execATN], but I cut
-// it out for clarity now that alg. works well. We can leave this
-// "dead" code for a bit.
-func (p *ParserATNSimulator) dumpDeadEndConfigs(_ *NoViableAltException) {
+// Used for debugging in AdaptivePredict around execATN but I cut
+//
+// it out for clarity now that alg. works well. We can leave p
+// "dead" code for a bit.
+func (p *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) {
panic("Not implemented")
@@ -1518,13 +1418,13 @@ func (p *ParserATNSimulator) dumpDeadEndConfigs(_ *NoViableAltException) {
// }
}
-func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs *ATNConfigSet, startIndex int) *NoViableAltException {
+func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs ATNConfigSet, startIndex int) *NoViableAltException {
return NewNoViableAltException(p.parser, input, input.Get(startIndex), input.LT(1), configs, outerContext)
}
-func (p *ParserATNSimulator) getUniqueAlt(configs *ATNConfigSet) int {
+func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int {
alt := ATNInvalidAltNumber
- for _, c := range configs.configs {
+ for _, c := range configs.GetItems() {
if alt == ATNInvalidAltNumber {
alt = c.GetAlt() // found first alt
} else if c.GetAlt() != alt {
@@ -1552,10 +1452,8 @@ func (p *ParserATNSimulator) getUniqueAlt(configs *ATNConfigSet) int {
// @return If {@code to} is {@code nil}, p method returns {@code nil}
// otherwise p method returns the result of calling {@link //addDFAState}
// on {@code to}
-//
-//goland:noinspection GoBoolExpressions
func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState {
- if runtimeConfig.parserATNSimulatorDebug {
+ if ParserATNSimulatorDebug {
fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t))
}
if to == nil {
@@ -1574,7 +1472,7 @@ func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFA
from.setIthEdge(t+1, to) // connect
p.atn.edgeMu.Unlock()
- if runtimeConfig.parserATNSimulatorDebug {
+ if ParserATNSimulatorDebug {
var names []string
if p.parser != nil {
names = p.parser.GetLiteralNames()
@@ -1585,49 +1483,48 @@ func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFA
return to
}
-// addDFAState adds state D to the [DFA] if it is not already present, and returns
-// the actual instance stored in the [DFA]. If a state equivalent to D
-// is already in the [DFA], the existing state is returned. Otherwise, this
-// method returns D after adding it to the [DFA].
+// Add state {@code D} to the DFA if it is not already present, and return
+// the actual instance stored in the DFA. If a state equivalent to {@code D}
+// is already in the DFA, the existing state is returned. Otherwise p
+// method returns {@code D} after adding it to the DFA.
//
-// If D is [ATNSimulatorError], this method returns [ATNSimulatorError] and
-// does not change the DFA.
+// If {@code D} is {@link //ERROR}, p method returns {@link //ERROR} and
+// does not change the DFA.
//
-//goland:noinspection GoBoolExpressions
+// @param dfa The dfa
+// @param D The DFA state to add
+// @return The state stored in the DFA. This will be either the existing
+// state if {@code D} is already in the DFA, or {@code D} itself if the
+// state was not already present.
func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState {
if d == ATNSimulatorError {
return d
}
-
- existing, present := dfa.Get(d)
+ existing, present := dfa.states.Get(d)
if present {
- if runtimeConfig.parserATNSimulatorTraceATNSim {
+ if ParserATNSimulatorTraceATNSim {
fmt.Print("addDFAState " + d.String() + " exists")
}
return existing
}
- // The state will be added if not already there or we will be given back the existing state struct
- // if it is present.
+ // The state was not present, so update it with configs
//
- d.stateNumber = dfa.Len()
- if !d.configs.readOnly {
- d.configs.OptimizeConfigs(&p.BaseATNSimulator)
- d.configs.readOnly = true
- d.configs.configLookup = nil
+ d.stateNumber = dfa.states.Len()
+ if !d.configs.ReadOnly() {
+ d.configs.OptimizeConfigs(p.BaseATNSimulator)
+ d.configs.SetReadOnly(true)
}
- dfa.Put(d)
-
- if runtimeConfig.parserATNSimulatorTraceATNSim {
+ dfa.states.Put(d)
+ if ParserATNSimulatorTraceATNSim {
fmt.Println("addDFAState new " + d.String())
}
return d
}
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs *ATNConfigSet, startIndex, stopIndex int) {
- if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug {
+func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs ATNConfigSet, startIndex, stopIndex int) {
+ if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug {
interval := NewInterval(startIndex, stopIndex+1)
fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() +
", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
@@ -1637,9 +1534,8 @@ func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAl
}
}
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs *ATNConfigSet, startIndex, stopIndex int) {
- if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug {
+func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs ATNConfigSet, startIndex, stopIndex int) {
+ if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug {
interval := NewInterval(startIndex, stopIndex+1)
fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() +
", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
@@ -1649,15 +1545,10 @@ func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int,
}
}
-// ReportAmbiguity reports and ambiguity in the parse, which shows that the parser will explore a different route.
-//
-// If context-sensitive parsing, we know it's an ambiguity not a conflict or error, but we can report it to the developer
-// so that they can see that this is happening and can take action if they want to.
-//
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, _ *DFAState, startIndex, stopIndex int,
- exact bool, ambigAlts *BitSet, configs *ATNConfigSet) {
- if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug {
+// If context sensitive parsing, we know it's ambiguity not conflict//
+func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, D *DFAState, startIndex, stopIndex int,
+ exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
+ if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug {
interval := NewInterval(startIndex, stopIndex+1)
fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() +
", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
diff --git a/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go
similarity index 77%
rename from vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go
index c249bc138..1c8cee747 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go
@@ -31,9 +31,7 @@ type ParserRuleContext interface {
}
type BaseParserRuleContext struct {
- parentCtx RuleContext
- invokingState int
- RuleIndex int
+ *BaseRuleContext
start, stop Token
exception RecognitionException
@@ -42,22 +40,8 @@ type BaseParserRuleContext struct {
func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext {
prc := new(BaseParserRuleContext)
- InitBaseParserRuleContext(prc, parent, invokingStateNumber)
- return prc
-}
-
-func InitBaseParserRuleContext(prc *BaseParserRuleContext, parent ParserRuleContext, invokingStateNumber int) {
- // What context invoked b rule?
- prc.parentCtx = parent
- // What state invoked the rule associated with b context?
- // The "return address" is the followState of invokingState
- // If parent is nil, b should be -1.
- if parent == nil {
- prc.invokingState = -1
- } else {
- prc.invokingState = invokingStateNumber
- }
+ prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber)
prc.RuleIndex = -1
// * If we are debugging or building a parse tree for a Visitor,
@@ -72,6 +56,8 @@ func InitBaseParserRuleContext(prc *BaseParserRuleContext, parent ParserRuleCont
// The exception that forced prc rule to return. If the rule successfully
// completed, prc is {@code nil}.
prc.exception = nil
+
+ return prc
}
func (prc *BaseParserRuleContext) SetException(e RecognitionException) {
@@ -104,15 +90,14 @@ func (prc *BaseParserRuleContext) GetText() string {
return s
}
-// EnterRule is called when any rule is entered.
-func (prc *BaseParserRuleContext) EnterRule(_ ParseTreeListener) {
+// Double dispatch methods for listeners
+func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) {
}
-// ExitRule is called when any rule is exited.
-func (prc *BaseParserRuleContext) ExitRule(_ ParseTreeListener) {
+func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) {
}
-// * Does not set parent link other add methods do that
+// * Does not set parent link other add methods do that///
func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode {
if prc.children == nil {
prc.children = make([]Tree, 0)
@@ -135,9 +120,10 @@ func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext {
return child
}
-// RemoveLastChild is used by [EnterOuterAlt] to toss out a [RuleContext] previously added as
-// we entered a rule. If we have a label, we will need to remove
-// the generic ruleContext object.
+// * Used by EnterOuterAlt to toss out a RuleContext previously added as
+// we entered a rule. If we have // label, we will need to remove
+// generic ruleContext object.
+// /
func (prc *BaseParserRuleContext) RemoveLastChild() {
if prc.children != nil && len(prc.children) > 0 {
prc.children = prc.children[0 : len(prc.children)-1]
@@ -307,7 +293,7 @@ func (prc *BaseParserRuleContext) GetChildCount() int {
return len(prc.children)
}
-func (prc *BaseParserRuleContext) GetSourceInterval() Interval {
+func (prc *BaseParserRuleContext) GetSourceInterval() *Interval {
if prc.start == nil || prc.stop == nil {
return TreeInvalidInterval
}
@@ -354,50 +340,6 @@ func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) s
return s
}
-func (prc *BaseParserRuleContext) SetParent(v Tree) {
- if v == nil {
- prc.parentCtx = nil
- } else {
- prc.parentCtx = v.(RuleContext)
- }
-}
-
-func (prc *BaseParserRuleContext) GetInvokingState() int {
- return prc.invokingState
-}
-
-func (prc *BaseParserRuleContext) SetInvokingState(t int) {
- prc.invokingState = t
-}
-
-func (prc *BaseParserRuleContext) GetRuleIndex() int {
- return prc.RuleIndex
-}
-
-func (prc *BaseParserRuleContext) GetAltNumber() int {
- return ATNInvalidAltNumber
-}
-
-func (prc *BaseParserRuleContext) SetAltNumber(_ int) {}
-
-// IsEmpty returns true if the context of b is empty.
-//
-// A context is empty if there is no invoking state, meaning nobody calls
-// current context.
-func (prc *BaseParserRuleContext) IsEmpty() bool {
- return prc.invokingState == -1
-}
-
-// GetParent returns the combined text of all child nodes. This method only considers
-// tokens which have been added to the parse tree.
-//
-// Since tokens on hidden channels (e.g. whitespace or comments) are not
-// added to the parse trees, they will not appear in the output of this
-// method.
-func (prc *BaseParserRuleContext) GetParent() Tree {
- return prc.parentCtx
-}
-
var ParserRuleContextEmpty = NewBaseParserRuleContext(nil, -1)
type InterpreterRuleContext interface {
@@ -408,7 +350,6 @@ type BaseInterpreterRuleContext struct {
*BaseParserRuleContext
}
-//goland:noinspection GoUnusedExportedFunction
func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext {
prc := new(BaseInterpreterRuleContext)
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go
new file mode 100644
index 000000000..ba62af361
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go
@@ -0,0 +1,806 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "golang.org/x/exp/slices"
+ "strconv"
+)
+
+// Represents {@code $} in local context prediction, which means wildcard.
+// {@code//+x =//}.
+// /
+const (
+ BasePredictionContextEmptyReturnState = 0x7FFFFFFF
+)
+
+// Represents {@code $} in an array in full context mode, when {@code $}
+// doesn't mean wildcard: {@code $ + x = [$,x]}. Here,
+// {@code $} = {@link //EmptyReturnState}.
+// /
+
+var (
+ BasePredictionContextglobalNodeCount = 1
+ BasePredictionContextid = BasePredictionContextglobalNodeCount
+)
+
+type PredictionContext interface {
+ Hash() int
+ Equals(interface{}) bool
+ GetParent(int) PredictionContext
+ getReturnState(int) int
+ length() int
+ isEmpty() bool
+ hasEmptyPath() bool
+ String() string
+}
+
+type BasePredictionContext struct {
+ cachedHash int
+}
+
+func NewBasePredictionContext(cachedHash int) *BasePredictionContext {
+ pc := new(BasePredictionContext)
+ pc.cachedHash = cachedHash
+
+ return pc
+}
+
+func (b *BasePredictionContext) isEmpty() bool {
+ return false
+}
+
+func calculateHash(parent PredictionContext, returnState int) int {
+ h := murmurInit(1)
+ h = murmurUpdate(h, parent.Hash())
+ h = murmurUpdate(h, returnState)
+ return murmurFinish(h, 2)
+}
+
+var _emptyPredictionContextHash int
+
+func init() {
+ _emptyPredictionContextHash = murmurInit(1)
+ _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0)
+}
+
+func calculateEmptyHash() int {
+ return _emptyPredictionContextHash
+}
+
+// Used to cache {@link BasePredictionContext} objects. Its used for the shared
+// context cash associated with contexts in DFA states. This cache
+// can be used for both lexers and parsers.
+
+type PredictionContextCache struct {
+ cache map[PredictionContext]PredictionContext
+}
+
+func NewPredictionContextCache() *PredictionContextCache {
+ t := new(PredictionContextCache)
+ t.cache = make(map[PredictionContext]PredictionContext)
+ return t
+}
+
+// Add a context to the cache and return it. If the context already exists,
+// return that one instead and do not add a Newcontext to the cache.
+// Protect shared cache from unsafe thread access.
+func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext {
+ if ctx == BasePredictionContextEMPTY {
+ return BasePredictionContextEMPTY
+ }
+ existing := p.cache[ctx]
+ if existing != nil {
+ return existing
+ }
+ p.cache[ctx] = ctx
+ return ctx
+}
+
+func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext {
+ return p.cache[ctx]
+}
+
+func (p *PredictionContextCache) length() int {
+ return len(p.cache)
+}
+
+type SingletonPredictionContext interface {
+ PredictionContext
+}
+
+type BaseSingletonPredictionContext struct {
+ *BasePredictionContext
+
+ parentCtx PredictionContext
+ returnState int
+}
+
+func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext {
+ var cachedHash int
+ if parent != nil {
+ cachedHash = calculateHash(parent, returnState)
+ } else {
+ cachedHash = calculateEmptyHash()
+ }
+
+ s := new(BaseSingletonPredictionContext)
+ s.BasePredictionContext = NewBasePredictionContext(cachedHash)
+
+ s.parentCtx = parent
+ s.returnState = returnState
+
+ return s
+}
+
+func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext {
+ if returnState == BasePredictionContextEmptyReturnState && parent == nil {
+ // someone can pass in the bits of an array ctx that mean $
+ return BasePredictionContextEMPTY
+ }
+
+ return NewBaseSingletonPredictionContext(parent, returnState)
+}
+
+func (b *BaseSingletonPredictionContext) length() int {
+ return 1
+}
+
+func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext {
+ return b.parentCtx
+}
+
+func (b *BaseSingletonPredictionContext) getReturnState(index int) int {
+ return b.returnState
+}
+
+func (b *BaseSingletonPredictionContext) hasEmptyPath() bool {
+ return b.returnState == BasePredictionContextEmptyReturnState
+}
+
+func (b *BaseSingletonPredictionContext) Hash() int {
+ return b.cachedHash
+}
+
+func (b *BaseSingletonPredictionContext) Equals(other interface{}) bool {
+ if b == other {
+ return true
+ }
+ if _, ok := other.(*BaseSingletonPredictionContext); !ok {
+ return false
+ }
+
+ otherP := other.(*BaseSingletonPredictionContext)
+
+ if b.returnState != otherP.getReturnState(0) {
+ return false
+ }
+ if b.parentCtx == nil {
+ return otherP.parentCtx == nil
+ }
+
+ return b.parentCtx.Equals(otherP.parentCtx)
+}
+
+func (b *BaseSingletonPredictionContext) String() string {
+ var up string
+
+ if b.parentCtx == nil {
+ up = ""
+ } else {
+ up = b.parentCtx.String()
+ }
+
+ if len(up) == 0 {
+ if b.returnState == BasePredictionContextEmptyReturnState {
+ return "$"
+ }
+
+ return strconv.Itoa(b.returnState)
+ }
+
+ return strconv.Itoa(b.returnState) + " " + up
+}
+
+var BasePredictionContextEMPTY = NewEmptyPredictionContext()
+
+type EmptyPredictionContext struct {
+ *BaseSingletonPredictionContext
+}
+
+func NewEmptyPredictionContext() *EmptyPredictionContext {
+
+ p := new(EmptyPredictionContext)
+
+ p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState)
+ p.cachedHash = calculateEmptyHash()
+ return p
+}
+
+func (e *EmptyPredictionContext) isEmpty() bool {
+ return true
+}
+
+func (e *EmptyPredictionContext) GetParent(index int) PredictionContext {
+ return nil
+}
+
+func (e *EmptyPredictionContext) getReturnState(index int) int {
+ return e.returnState
+}
+
+func (e *EmptyPredictionContext) Hash() int {
+ return e.cachedHash
+}
+
+func (e *EmptyPredictionContext) Equals(other interface{}) bool {
+ return e == other
+}
+
+func (e *EmptyPredictionContext) String() string {
+ return "$"
+}
+
+type ArrayPredictionContext struct {
+ *BasePredictionContext
+
+ parents []PredictionContext
+ returnStates []int
+}
+
+func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext {
+ // Parent can be nil only if full ctx mode and we make an array
+ // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using
+ // nil parent and
+ // returnState == {@link //EmptyReturnState}.
+ hash := murmurInit(1)
+
+ for _, parent := range parents {
+ hash = murmurUpdate(hash, parent.Hash())
+ }
+
+ for _, returnState := range returnStates {
+ hash = murmurUpdate(hash, returnState)
+ }
+
+ hash = murmurFinish(hash, len(parents)<<1)
+
+ c := new(ArrayPredictionContext)
+ c.BasePredictionContext = NewBasePredictionContext(hash)
+
+ c.parents = parents
+ c.returnStates = returnStates
+
+ return c
+}
+
+func (a *ArrayPredictionContext) GetReturnStates() []int {
+ return a.returnStates
+}
+
+func (a *ArrayPredictionContext) hasEmptyPath() bool {
+ return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState
+}
+
+func (a *ArrayPredictionContext) isEmpty() bool {
+ // since EmptyReturnState can only appear in the last position, we
+ // don't need to verify that size==1
+ return a.returnStates[0] == BasePredictionContextEmptyReturnState
+}
+
+func (a *ArrayPredictionContext) length() int {
+ return len(a.returnStates)
+}
+
+func (a *ArrayPredictionContext) GetParent(index int) PredictionContext {
+ return a.parents[index]
+}
+
+func (a *ArrayPredictionContext) getReturnState(index int) int {
+ return a.returnStates[index]
+}
+
+// Equals is the default comparison function for ArrayPredictionContext when no specialized
+// implementation is needed for a collection
+func (a *ArrayPredictionContext) Equals(o interface{}) bool {
+ if a == o {
+ return true
+ }
+ other, ok := o.(*ArrayPredictionContext)
+ if !ok {
+ return false
+ }
+ if a.cachedHash != other.Hash() {
+ return false // can't be same if hash is different
+ }
+
+ // Must compare the actual array elements and not just the array address
+ //
+ return slices.Equal(a.returnStates, other.returnStates) &&
+ slices.EqualFunc(a.parents, other.parents, func(x, y PredictionContext) bool {
+ return x.Equals(y)
+ })
+}
+
+// Hash is the default hash function for ArrayPredictionContext when no specialized
+// implementation is needed for a collection
+func (a *ArrayPredictionContext) Hash() int {
+ return a.BasePredictionContext.cachedHash
+}
+
+func (a *ArrayPredictionContext) String() string {
+ if a.isEmpty() {
+ return "[]"
+ }
+
+ s := "["
+ for i := 0; i < len(a.returnStates); i++ {
+ if i > 0 {
+ s = s + ", "
+ }
+ if a.returnStates[i] == BasePredictionContextEmptyReturnState {
+ s = s + "$"
+ continue
+ }
+ s = s + strconv.Itoa(a.returnStates[i])
+ if a.parents[i] != nil {
+ s = s + " " + a.parents[i].String()
+ } else {
+ s = s + "nil"
+ }
+ }
+
+ return s + "]"
+}
+
+// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph.
+// Return {@link //EMPTY} if {@code outerContext} is empty or nil.
+// /
+func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext {
+ if outerContext == nil {
+ outerContext = ParserRuleContextEmpty
+ }
+ // if we are in RuleContext of start rule, s, then BasePredictionContext
+ // is EMPTY. Nobody called us. (if we are empty, return empty)
+ if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty {
+ return BasePredictionContextEMPTY
+ }
+ // If we have a parent, convert it to a BasePredictionContext graph
+ parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext))
+ state := a.states[outerContext.GetInvokingState()]
+ transition := state.GetTransitions()[0]
+
+ return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber())
+}
+
+func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
+
+ // Share same graph if both same
+ //
+ if a == b || a.Equals(b) {
+ return a
+ }
+
+ // In Java, EmptyPredictionContext inherits from SingletonPredictionContext, and so the test
+ // in java for SingletonPredictionContext will succeed and a new ArrayPredictionContext will be created
+ // from it.
+ // In go, EmptyPredictionContext does not equate to SingletonPredictionContext and so that conversion
+ // will fail. We need to test for both Empty and Singleton and create an ArrayPredictionContext from
+ // either of them.
+
+ ac, ok1 := a.(*BaseSingletonPredictionContext)
+ bc, ok2 := b.(*BaseSingletonPredictionContext)
+
+ if ok1 && ok2 {
+ return mergeSingletons(ac, bc, rootIsWildcard, mergeCache)
+ }
+ // At least one of a or b is array
+ // If one is $ and rootIsWildcard, return $ as// wildcard
+ if rootIsWildcard {
+ if _, ok := a.(*EmptyPredictionContext); ok {
+ return a
+ }
+ if _, ok := b.(*EmptyPredictionContext); ok {
+ return b
+ }
+ }
+
+ // Convert Singleton or Empty so both are arrays to normalize - We should not use the existing parameters
+ // here.
+ //
+ // TODO: I think that maybe the Prediction Context structs should be redone as there is a chance we will see this mess again - maybe redo the logic here
+
+ var arp, arb *ArrayPredictionContext
+ var ok bool
+ if arp, ok = a.(*ArrayPredictionContext); ok {
+ } else if _, ok = a.(*BaseSingletonPredictionContext); ok {
+ arp = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)})
+ } else if _, ok = a.(*EmptyPredictionContext); ok {
+ arp = NewArrayPredictionContext([]PredictionContext{}, []int{})
+ }
+
+ if arb, ok = b.(*ArrayPredictionContext); ok {
+ } else if _, ok = b.(*BaseSingletonPredictionContext); ok {
+ arb = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)})
+ } else if _, ok = b.(*EmptyPredictionContext); ok {
+ arb = NewArrayPredictionContext([]PredictionContext{}, []int{})
+ }
+
+ // Both arp and arb
+ return mergeArrays(arp, arb, rootIsWildcard, mergeCache)
+}
+
+// Merge two {@link SingletonBasePredictionContext} instances.
+//
+// Stack tops equal, parents merge is same return left graph.
+//
+//
+// Same stack top, parents differ merge parents giving array node, then
+// remainders of those graphs. A Newroot node is created to point to the
+// merged parents.
+//
+//
+// Different stack tops pointing to same parent. Make array node for the
+// root where both element in the root point to the same (original)
+// parent.
+//
+//
+// Different stack tops pointing to different parents. Make array node for
+// the root where each element points to the corresponding original
+// parent.
+//
+//
+// @param a the first {@link SingletonBasePredictionContext}
+// @param b the second {@link SingletonBasePredictionContext}
+// @param rootIsWildcard {@code true} if this is a local-context merge,
+// otherwise false to indicate a full-context merge
+// @param mergeCache
+// /
+func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
+ if mergeCache != nil {
+ previous := mergeCache.Get(a.Hash(), b.Hash())
+ if previous != nil {
+ return previous.(PredictionContext)
+ }
+ previous = mergeCache.Get(b.Hash(), a.Hash())
+ if previous != nil {
+ return previous.(PredictionContext)
+ }
+ }
+
+ rootMerge := mergeRoot(a, b, rootIsWildcard)
+ if rootMerge != nil {
+ if mergeCache != nil {
+ mergeCache.set(a.Hash(), b.Hash(), rootMerge)
+ }
+ return rootMerge
+ }
+ if a.returnState == b.returnState {
+ parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
+ // if parent is same as existing a or b parent or reduced to a parent,
+ // return it
+ if parent == a.parentCtx {
+ return a // ax + bx = ax, if a=b
+ }
+ if parent == b.parentCtx {
+ return b // ax + bx = bx, if a=b
+ }
+ // else: ax + ay = a'[x,y]
+ // merge parents x and y, giving array node with x,y then remainders
+ // of those graphs. dup a, a' points at merged array
+ // Newjoined parent so create Newsingleton pointing to it, a'
+ spc := SingletonBasePredictionContextCreate(parent, a.returnState)
+ if mergeCache != nil {
+ mergeCache.set(a.Hash(), b.Hash(), spc)
+ }
+ return spc
+ }
+ // a != b payloads differ
+ // see if we can collapse parents due to $+x parents if local ctx
+ var singleParent PredictionContext
+ if a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx) { // ax +
+ // bx =
+ // [a,b]x
+ singleParent = a.parentCtx
+ }
+ if singleParent != nil { // parents are same
+ // sort payloads and use same parent
+ payloads := []int{a.returnState, b.returnState}
+ if a.returnState > b.returnState {
+ payloads[0] = b.returnState
+ payloads[1] = a.returnState
+ }
+ parents := []PredictionContext{singleParent, singleParent}
+ apc := NewArrayPredictionContext(parents, payloads)
+ if mergeCache != nil {
+ mergeCache.set(a.Hash(), b.Hash(), apc)
+ }
+ return apc
+ }
+ // parents differ and can't merge them. Just pack together
+ // into array can't merge.
+ // ax + by = [ax,by]
+ payloads := []int{a.returnState, b.returnState}
+ parents := []PredictionContext{a.parentCtx, b.parentCtx}
+ if a.returnState > b.returnState { // sort by payload
+ payloads[0] = b.returnState
+ payloads[1] = a.returnState
+ parents = []PredictionContext{b.parentCtx, a.parentCtx}
+ }
+ apc := NewArrayPredictionContext(parents, payloads)
+ if mergeCache != nil {
+ mergeCache.set(a.Hash(), b.Hash(), apc)
+ }
+ return apc
+}
+
+// Handle case where at least one of {@code a} or {@code b} is
+// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used
+// to represent {@link //EMPTY}.
+//
+// Local-Context Merges
+//
+// These local-context merge operations are used when {@code rootIsWildcard}
+// is true.
+//
+// {@link //EMPTY} is superset of any graph return {@link //EMPTY}.
+//
+//
+// {@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
+// {@code //EMPTY} return left graph.
+//
+//
+// Special case of last merge if local context.
+//
+//
+// Full-Context Merges
+//
+// These full-context merge operations are used when {@code rootIsWildcard}
+// is false.
+//
+//
+//
+// Must keep all contexts {@link //EMPTY} in array is a special value (and
+// nil parent).
+//
+//
+//
+//
+// @param a the first {@link SingletonBasePredictionContext}
+// @param b the second {@link SingletonBasePredictionContext}
+// @param rootIsWildcard {@code true} if this is a local-context merge,
+// otherwise false to indicate a full-context merge
+// /
+func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionContext {
+ if rootIsWildcard {
+ if a == BasePredictionContextEMPTY {
+ return BasePredictionContextEMPTY // // + b =//
+ }
+ if b == BasePredictionContextEMPTY {
+ return BasePredictionContextEMPTY // a +// =//
+ }
+ } else {
+ if a == BasePredictionContextEMPTY && b == BasePredictionContextEMPTY {
+ return BasePredictionContextEMPTY // $ + $ = $
+ } else if a == BasePredictionContextEMPTY { // $ + x = [$,x]
+ payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState}
+ parents := []PredictionContext{b.GetParent(-1), nil}
+ return NewArrayPredictionContext(parents, payloads)
+ } else if b == BasePredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present)
+ payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState}
+ parents := []PredictionContext{a.GetParent(-1), nil}
+ return NewArrayPredictionContext(parents, payloads)
+ }
+ }
+ return nil
+}
+
+// Merge two {@link ArrayBasePredictionContext} instances.
+//
+// Different tops, different parents.
+//
+//
+// Shared top, same parents.
+//
+//
+// Shared top, different parents.
+//
+//
+// Shared top, all shared parents.
+//
+//
+// Equal tops, merge parents and reduce top to
+// {@link SingletonBasePredictionContext}.
+//
+// /
+func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
+ if mergeCache != nil {
+ previous := mergeCache.Get(a.Hash(), b.Hash())
+ if previous != nil {
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
+ }
+ return previous.(PredictionContext)
+ }
+ previous = mergeCache.Get(b.Hash(), a.Hash())
+ if previous != nil {
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
+ }
+ return previous.(PredictionContext)
+ }
+ }
+ // merge sorted payloads a + b => M
+ i := 0 // walks a
+ j := 0 // walks b
+ k := 0 // walks target M array
+
+ mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates))
+ mergedParents := make([]PredictionContext, len(a.returnStates)+len(b.returnStates))
+ // walk and merge to yield mergedParents, mergedReturnStates
+ for i < len(a.returnStates) && j < len(b.returnStates) {
+ aParent := a.parents[i]
+ bParent := b.parents[j]
+ if a.returnStates[i] == b.returnStates[j] {
+ // same payload (stack tops are equal), must yield merged singleton
+ payload := a.returnStates[i]
+ // $+$ = $
+ bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil
+ axAX := aParent != nil && bParent != nil && aParent == bParent // ax+ax
+ // ->
+ // ax
+ if bothDollars || axAX {
+ mergedParents[k] = aParent // choose left
+ mergedReturnStates[k] = payload
+ } else { // ax+ay -> a'[x,y]
+ mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache)
+ mergedParents[k] = mergedParent
+ mergedReturnStates[k] = payload
+ }
+ i++ // hop over left one as usual
+ j++ // but also Skip one in right side since we merge
+ } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M
+ mergedParents[k] = aParent
+ mergedReturnStates[k] = a.returnStates[i]
+ i++
+ } else { // b > a, copy b[j] to M
+ mergedParents[k] = bParent
+ mergedReturnStates[k] = b.returnStates[j]
+ j++
+ }
+ k++
+ }
+ // copy over any payloads remaining in either array
+ if i < len(a.returnStates) {
+ for p := i; p < len(a.returnStates); p++ {
+ mergedParents[k] = a.parents[p]
+ mergedReturnStates[k] = a.returnStates[p]
+ k++
+ }
+ } else {
+ for p := j; p < len(b.returnStates); p++ {
+ mergedParents[k] = b.parents[p]
+ mergedReturnStates[k] = b.returnStates[p]
+ k++
+ }
+ }
+ // trim merged if we combined a few that had same stack tops
+ if k < len(mergedParents) { // write index < last position trim
+ if k == 1 { // for just one merged element, return singleton top
+ pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0])
+ if mergeCache != nil {
+ mergeCache.set(a.Hash(), b.Hash(), pc)
+ }
+ return pc
+ }
+ mergedParents = mergedParents[0:k]
+ mergedReturnStates = mergedReturnStates[0:k]
+ }
+
+ M := NewArrayPredictionContext(mergedParents, mergedReturnStates)
+
+ // if we created same array as a or b, return that instead
+ // TODO: track whether this is possible above during merge sort for speed
+ // TODO: In go, I do not think we can just do M == xx as M is a brand new allocation. This could be causing allocation problems
+ if M == a {
+ if mergeCache != nil {
+ mergeCache.set(a.Hash(), b.Hash(), a)
+ }
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a")
+ }
+ return a
+ }
+ if M == b {
+ if mergeCache != nil {
+ mergeCache.set(a.Hash(), b.Hash(), b)
+ }
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b")
+ }
+ return b
+ }
+ combineCommonParents(mergedParents)
+
+ if mergeCache != nil {
+ mergeCache.set(a.Hash(), b.Hash(), M)
+ }
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String())
+ }
+ return M
+}
+
+// Make pass over all M {@code parents} merge any {@code equals()}
+// ones.
+// /
+func combineCommonParents(parents []PredictionContext) {
+ uniqueParents := make(map[PredictionContext]PredictionContext)
+
+ for p := 0; p < len(parents); p++ {
+ parent := parents[p]
+ if uniqueParents[parent] == nil {
+ uniqueParents[parent] = parent
+ }
+ }
+ for q := 0; q < len(parents); q++ {
+ parents[q] = uniqueParents[parents[q]]
+ }
+}
+
+func getCachedBasePredictionContext(context PredictionContext, contextCache *PredictionContextCache, visited map[PredictionContext]PredictionContext) PredictionContext {
+
+ if context.isEmpty() {
+ return context
+ }
+ existing := visited[context]
+ if existing != nil {
+ return existing
+ }
+ existing = contextCache.Get(context)
+ if existing != nil {
+ visited[context] = existing
+ return existing
+ }
+ changed := false
+ parents := make([]PredictionContext, context.length())
+ for i := 0; i < len(parents); i++ {
+ parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited)
+ if changed || parent != context.GetParent(i) {
+ if !changed {
+ parents = make([]PredictionContext, context.length())
+ for j := 0; j < context.length(); j++ {
+ parents[j] = context.GetParent(j)
+ }
+ changed = true
+ }
+ parents[i] = parent
+ }
+ }
+ if !changed {
+ contextCache.add(context)
+ visited[context] = context
+ return context
+ }
+ var updated PredictionContext
+ if len(parents) == 0 {
+ updated = BasePredictionContextEMPTY
+ } else if len(parents) == 1 {
+ updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0))
+ } else {
+ updated = NewArrayPredictionContext(parents, context.(*ArrayPredictionContext).GetReturnStates())
+ }
+ contextCache.add(updated)
+ visited[updated] = updated
+ visited[context] = updated
+
+ return updated
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go
new file mode 100644
index 000000000..7b9b72fab
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go
@@ -0,0 +1,529 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// This enumeration defines the prediction modes available in ANTLR 4 along with
+// utility methods for analyzing configuration sets for conflicts and/or
+// ambiguities.
+
+const (
+ //
+ // The SLL(*) prediction mode. This prediction mode ignores the current
+ // parser context when making predictions. This is the fastest prediction
+ // mode, and provides correct results for many grammars. This prediction
+ // mode is more powerful than the prediction mode provided by ANTLR 3, but
+ // may result in syntax errors for grammar and input combinations which are
+ // not SLL.
+ //
+ //
+ // When using this prediction mode, the parser will either return a correct
+ // parse tree (i.e. the same parse tree that would be returned with the
+ // {@link //LL} prediction mode), or it will Report a syntax error. If a
+ // syntax error is encountered when using the {@link //SLL} prediction mode,
+ // it may be due to either an actual syntax error in the input or indicate
+ // that the particular combination of grammar and input requires the more
+ // powerful {@link //LL} prediction abilities to complete successfully.
+ //
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeSLL = 0
+ //
+ // The LL(*) prediction mode. This prediction mode allows the current parser
+ // context to be used for resolving SLL conflicts that occur during
+ // prediction. This is the fastest prediction mode that guarantees correct
+ // parse results for all combinations of grammars with syntactically correct
+ // inputs.
+ //
+ //
+ // When using this prediction mode, the parser will make correct decisions
+ // for all syntactically-correct grammar and input combinations. However, in
+ // cases where the grammar is truly ambiguous this prediction mode might not
+ // Report a precise answer for exactly which alternatives are
+ // ambiguous.
+ //
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeLL = 1
+ //
+ // The LL(*) prediction mode with exact ambiguity detection. In addition to
+ // the correctness guarantees provided by the {@link //LL} prediction mode,
+ // this prediction mode instructs the prediction algorithm to determine the
+ // complete and exact set of ambiguous alternatives for every ambiguous
+ // decision encountered while parsing.
+ //
+ //
+ // This prediction mode may be used for diagnosing ambiguities during
+ // grammar development. Due to the performance overhead of calculating sets
+ // of ambiguous alternatives, this prediction mode should be avoided when
+ // the exact results are not necessary.
+ //
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeLLExactAmbigDetection = 2
+)
+
+// Computes the SLL prediction termination condition.
+//
+//
+// This method computes the SLL prediction termination condition for both of
+// the following cases.
+//
+//
+// - The usual SLL+LL fallback upon SLL conflict
+// - Pure SLL without LL fallback
+//
+//
+// COMBINED SLL+LL PARSING
+//
+// When LL-fallback is enabled upon SLL conflict, correct predictions are
+// ensured regardless of how the termination condition is computed by this
+// method. Due to the substantially higher cost of LL prediction, the
+// prediction should only fall back to LL when the additional lookahead
+// cannot lead to a unique SLL prediction.
+//
+// Assuming combined SLL+LL parsing, an SLL configuration set with only
+// conflicting subsets should fall back to full LL, even if the
+// configuration sets don't resolve to the same alternative (e.g.
+// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting
+// configuration, SLL could continue with the hopes that more lookahead will
+// resolve via one of those non-conflicting configurations.
+//
+// Here's the prediction termination rule them: SLL (for SLL+LL parsing)
+// stops when it sees only conflicting configuration subsets. In contrast,
+// full LL keeps going when there is uncertainty.
+//
+// HEURISTIC
+//
+// As a heuristic, we stop prediction when we see any conflicting subset
+// unless we see a state that only has one alternative associated with it.
+// The single-alt-state thing lets prediction continue upon rules like
+// (otherwise, it would admit defeat too soon):
+//
+// {@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ” }
+//
+// When the ATN simulation reaches the state before {@code ”}, it has a
+// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally
+// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop
+// processing this node because alternative to has another way to continue,
+// via {@code [6|2|[]]}.
+//
+// It also let's us continue for this rule:
+//
+// {@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }
+//
+// After Matching input A, we reach the stop state for rule A, state 1.
+// State 8 is the state right before B. Clearly alternatives 1 and 2
+// conflict and no amount of further lookahead will separate the two.
+// However, alternative 3 will be able to continue and so we do not stop
+// working on this state. In the previous example, we're concerned with
+// states associated with the conflicting alternatives. Here alt 3 is not
+// associated with the conflicting configs, but since we can continue
+// looking for input reasonably, don't declare the state done.
+//
+// PURE SLL PARSING
+//
+// To handle pure SLL parsing, all we have to do is make sure that we
+// combine stack contexts for configurations that differ only by semantic
+// predicate. From there, we can do the usual SLL termination heuristic.
+//
+// PREDICATES IN SLL+LL PARSING
+//
+// SLL decisions don't evaluate predicates until after they reach DFA stop
+// states because they need to create the DFA cache that works in all
+// semantic situations. In contrast, full LL evaluates predicates collected
+// during start state computation so it can ignore predicates thereafter.
+// This means that SLL termination detection can totally ignore semantic
+// predicates.
+//
+// Implementation-wise, {@link ATNConfigSet} combines stack contexts but not
+// semantic predicate contexts so we might see two configurations like the
+// following.
+//
+// {@code (s, 1, x, {}), (s, 1, x', {p})}
+//
+// Before testing these configurations against others, we have to merge
+// {@code x} and {@code x'} (without modifying the existing configurations).
+// For example, we test {@code (x+x')==x”} when looking for conflicts in
+// the following configurations.
+//
+// {@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})}
+//
+// If the configuration set has predicates (as indicated by
+// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of
+// the configurations to strip out all of the predicates so that a standard
+// {@link ATNConfigSet} will merge everything ignoring predicates.
+func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool {
+ // Configs in rule stop states indicate reaching the end of the decision
+ // rule (local context) or end of start rule (full context). If all
+ // configs meet this condition, then none of the configurations is able
+ // to Match additional input so we terminate prediction.
+ //
+ if PredictionModeallConfigsInRuleStopStates(configs) {
+ return true
+ }
+ // pure SLL mode parsing
+ if mode == PredictionModeSLL {
+ // Don't bother with combining configs from different semantic
+ // contexts if we can fail over to full LL costs more time
+ // since we'll often fail over anyway.
+ if configs.HasSemanticContext() {
+ // dup configs, tossing out semantic predicates
+ dup := NewBaseATNConfigSet(false)
+ for _, c := range configs.GetItems() {
+
+ // NewBaseATNConfig({semanticContext:}, c)
+ c = NewBaseATNConfig2(c, SemanticContextNone)
+ dup.Add(c, nil)
+ }
+ configs = dup
+ }
+ // now we have combined contexts for configs with dissimilar preds
+ }
+ // pure SLL or combined SLL+LL mode parsing
+ altsets := PredictionModegetConflictingAltSubsets(configs)
+ return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs)
+}
+
+// Checks if any configuration in {@code configs} is in a
+// {@link RuleStopState}. Configurations meeting this condition have reached
+// the end of the decision rule (local context) or end of start rule (full
+// context).
+//
+// @param configs the configuration set to test
+// @return {@code true} if any configuration in {@code configs} is in a
+// {@link RuleStopState}, otherwise {@code false}
+func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool {
+ for _, c := range configs.GetItems() {
+ if _, ok := c.GetState().(*RuleStopState); ok {
+ return true
+ }
+ }
+ return false
+}
+
+// Checks if all configurations in {@code configs} are in a
+// {@link RuleStopState}. Configurations meeting this condition have reached
+// the end of the decision rule (local context) or end of start rule (full
+// context).
+//
+// @param configs the configuration set to test
+// @return {@code true} if all configurations in {@code configs} are in a
+// {@link RuleStopState}, otherwise {@code false}
+func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool {
+
+ for _, c := range configs.GetItems() {
+ if _, ok := c.GetState().(*RuleStopState); !ok {
+ return false
+ }
+ }
+ return true
+}
+
+// Full LL prediction termination.
+//
+// Can we stop looking ahead during ATN simulation or is there some
+// uncertainty as to which alternative we will ultimately pick, after
+// consuming more input? Even if there are partial conflicts, we might know
+// that everything is going to resolve to the same minimum alternative. That
+// means we can stop since no more lookahead will change that fact. On the
+// other hand, there might be multiple conflicts that resolve to different
+// minimums. That means we need more look ahead to decide which of those
+// alternatives we should predict.
+//
+// The basic idea is to split the set of configurations {@code C}, into
+// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with
+// non-conflicting configurations. Two configurations conflict if they have
+// identical {@link ATNConfig//state} and {@link ATNConfig//context} values
+// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)}
+// and {@code (s, j, ctx, _)} for {@code i!=j}.
+//
+// Reduce these configuration subsets to the set of possible alternatives.
+// You can compute the alternative subsets in one pass as follows:
+//
+// {@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in
+// {@code C} holding {@code s} and {@code ctx} fixed.
+//
+// Or in pseudo-code, for each configuration {@code c} in {@code C}:
+//
+//
+// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
+// alt and not pred
+//
+//
+// The values in {@code map} are the set of {@code A_s,ctx} sets.
+//
+// If {@code |A_s,ctx|=1} then there is no conflict associated with
+// {@code s} and {@code ctx}.
+//
+// Reduce the subsets to singletons by choosing a minimum of each subset. If
+// the union of these alternative subsets is a singleton, then no amount of
+// more lookahead will help us. We will always pick that alternative. If,
+// however, there is more than one alternative, then we are uncertain which
+// alternative to predict and must continue looking for resolution. We may
+// or may not discover an ambiguity in the future, even if there are no
+// conflicting subsets this round.
+//
+// The biggest sin is to terminate early because it means we've made a
+// decision but were uncertain as to the eventual outcome. We haven't used
+// enough lookahead. On the other hand, announcing a conflict too late is no
+// big deal you will still have the conflict. It's just inefficient. It
+// might even look until the end of file.
+//
+// No special consideration for semantic predicates is required because
+// predicates are evaluated on-the-fly for full LL prediction, ensuring that
+// no configuration contains a semantic context during the termination
+// check.
+//
+// CONFLICTING CONFIGS
+//
+// Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict
+// when {@code i!=j} but {@code x=x'}. Because we merge all
+// {@code (s, i, _)} configurations together, that means that there are at
+// most {@code n} configurations associated with state {@code s} for
+// {@code n} possible alternatives in the decision. The merged stacks
+// complicate the comparison of configuration contexts {@code x} and
+// {@code x'}. Sam checks to see if one is a subset of the other by calling
+// merge and checking to see if the merged result is either {@code x} or
+// {@code x'}. If the {@code x} associated with lowest alternative {@code i}
+// is the superset, then {@code i} is the only possible prediction since the
+// others resolve to {@code min(i)} as well. However, if {@code x} is
+// associated with {@code j>i} then at least one stack configuration for
+// {@code j} is not in conflict with alternative {@code i}. The algorithm
+// should keep going, looking for more lookahead due to the uncertainty.
+//
+// For simplicity, I'm doing a equality check between {@code x} and
+// {@code x'} that lets the algorithm continue to consume lookahead longer
+// than necessary. The reason I like the equality is of course the
+// simplicity but also because that is the test you need to detect the
+// alternatives that are actually in conflict.
+//
+// CONTINUE/STOP RULE
+//
+// Continue if union of resolved alternative sets from non-conflicting and
+// conflicting alternative subsets has more than one alternative. We are
+// uncertain about which alternative to predict.
+//
+// The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which
+// alternatives are still in the running for the amount of input we've
+// consumed at this point. The conflicting sets let us to strip away
+// configurations that won't lead to more states because we resolve
+// conflicts to the configuration with a minimum alternate for the
+// conflicting set.
+//
+// CASES
+//
+//
+//
+// - no conflicts and more than 1 alternative in set => continue
+//
+// - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)},
+// {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set
+// {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
+// {@code {1,3}} => continue
+//
+//
+// - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
+// {@code (s', 2, y)}, {@code (s”, 1, z)} yields non-conflicting set
+// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
+// {@code {1}} => stop and predict 1
+//
+// - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
+// {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U
+// {@code {1}} = {@code {1}} => stop and predict 1, can announce
+// ambiguity {@code {1,2}}
+//
+// - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)},
+// {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U
+// {@code {2}} = {@code {1,2}} => continue
+//
+// - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)},
+// {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U
+// {@code {3}} = {@code {1,3}} => continue
+//
+//
+//
+// EXACT AMBIGUITY DETECTION
+//
+// If all states Report the same conflicting set of alternatives, then we
+// know we have the exact ambiguity set.
+//
+// |A_i|>1
and
+// A_i = A_j
for all i, j.
+//
+// In other words, we continue examining lookahead until all {@code A_i}
+// have more than one alternative and all {@code A_i} are the same. If
+// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate
+// because the resolved set is {@code {1}}. To determine what the real
+// ambiguity is, we have to know whether the ambiguity is between one and
+// two or one and three so we keep going. We can only stop prediction when
+// we need exact ambiguity detection when the sets look like
+// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...
+func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int {
+ return PredictionModegetSingleViableAlt(altsets)
+}
+
+// Determines if every alternative subset in {@code altsets} contains more
+// than one alternative.
+//
+// @param altsets a collection of alternative subsets
+// @return {@code true} if every {@link BitSet} in {@code altsets} has
+// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
+func PredictionModeallSubsetsConflict(altsets []*BitSet) bool {
+ return !PredictionModehasNonConflictingAltSet(altsets)
+}
+
+// Determines if any single alternative subset in {@code altsets} contains
+// exactly one alternative.
+//
+// @param altsets a collection of alternative subsets
+// @return {@code true} if {@code altsets} contains a {@link BitSet} with
+// {@link BitSet//cardinality cardinality} 1, otherwise {@code false}
+func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if alts.length() == 1 {
+ return true
+ }
+ }
+ return false
+}
+
+// Determines if any single alternative subset in {@code altsets} contains
+// more than one alternative.
+//
+// @param altsets a collection of alternative subsets
+// @return {@code true} if {@code altsets} contains a {@link BitSet} with
+// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
+func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if alts.length() > 1 {
+ return true
+ }
+ }
+ return false
+}
+
+// Determines if every alternative subset in {@code altsets} is equivalent.
+//
+// @param altsets a collection of alternative subsets
+// @return {@code true} if every member of {@code altsets} is equal to the
+// others, otherwise {@code false}
+func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
+ var first *BitSet
+
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if first == nil {
+ first = alts
+ } else if alts != first {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Returns the unique alternative predicted by all alternative subsets in
+// {@code altsets}. If no such alternative exists, this method returns
+// {@link ATN//INVALID_ALT_NUMBER}.
+//
+// @param altsets a collection of alternative subsets
+func PredictionModegetUniqueAlt(altsets []*BitSet) int {
+ all := PredictionModeGetAlts(altsets)
+ if all.length() == 1 {
+ return all.minValue()
+ }
+
+ return ATNInvalidAltNumber
+}
+
+// Gets the complete set of represented alternatives for a collection of
+// alternative subsets. This method returns the union of each {@link BitSet}
+// in {@code altsets}.
+//
+// @param altsets a collection of alternative subsets
+// @return the set of represented alternatives in {@code altsets}
+func PredictionModeGetAlts(altsets []*BitSet) *BitSet {
+ all := NewBitSet()
+ for _, alts := range altsets {
+ all.or(alts)
+ }
+ return all
+}
+
+// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set.
+// For each configuration {@code c} in {@code configs}:
+//
+//
+// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
+// alt and not pred
+//
+func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet {
+ configToAlts := NewJMap[ATNConfig, *BitSet, *ATNAltConfigComparator[ATNConfig]](atnAltCfgEqInst)
+
+ for _, c := range configs.GetItems() {
+
+ alts, ok := configToAlts.Get(c)
+ if !ok {
+ alts = NewBitSet()
+ configToAlts.Put(c, alts)
+ }
+ alts.add(c.GetAlt())
+ }
+
+ return configToAlts.Values()
+}
+
+// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set. For each
+// configuration {@code c} in {@code configs}:
+//
+//
+// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
+//
+func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict {
+ m := NewAltDict()
+
+ for _, c := range configs.GetItems() {
+ alts := m.Get(c.GetState().String())
+ if alts == nil {
+ alts = NewBitSet()
+ m.put(c.GetState().String(), alts)
+ }
+ alts.(*BitSet).add(c.GetAlt())
+ }
+ return m
+}
+
+func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool {
+ values := PredictionModeGetStateToAltMap(configs).values()
+ for i := 0; i < len(values); i++ {
+ if values[i].(*BitSet).length() == 1 {
+ return true
+ }
+ }
+ return false
+}
+
+func PredictionModegetSingleViableAlt(altsets []*BitSet) int {
+ result := ATNInvalidAltNumber
+
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ minAlt := alts.minValue()
+ if result == ATNInvalidAltNumber {
+ result = minAlt
+ } else if result != minAlt { // more than 1 viable alt
+ return ATNInvalidAltNumber
+ }
+ }
+ return result
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/recognizer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go
similarity index 70%
rename from vendor/github.com/antlr4-go/antlr/v4/recognizer.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go
index 2e0b504fb..bfe542d09 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/recognizer.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go
@@ -26,9 +26,6 @@ type Recognizer interface {
RemoveErrorListeners()
GetATN() *ATN
GetErrorListenerDispatch() ErrorListener
- HasError() bool
- GetError() RecognitionException
- SetError(RecognitionException)
}
type BaseRecognizer struct {
@@ -39,7 +36,6 @@ type BaseRecognizer struct {
LiteralNames []string
SymbolicNames []string
GrammarFileName string
- SynErr RecognitionException
}
func NewBaseRecognizer() *BaseRecognizer {
@@ -49,10 +45,7 @@ func NewBaseRecognizer() *BaseRecognizer {
return rec
}
-//goland:noinspection GoUnusedGlobalVariable
var tokenTypeMapCache = make(map[string]int)
-
-//goland:noinspection GoUnusedGlobalVariable
var ruleIndexMapCache = make(map[string]int)
func (b *BaseRecognizer) checkVersion(toolVersion string) {
@@ -62,19 +55,7 @@ func (b *BaseRecognizer) checkVersion(toolVersion string) {
}
}
-func (b *BaseRecognizer) SetError(err RecognitionException) {
- b.SynErr = err
-}
-
-func (b *BaseRecognizer) HasError() bool {
- return b.SynErr != nil
-}
-
-func (b *BaseRecognizer) GetError() RecognitionException {
- return b.SynErr
-}
-
-func (b *BaseRecognizer) Action(_ RuleContext, _, _ int) {
+func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int) {
panic("action not implemented on Recognizer!")
}
@@ -124,11 +105,9 @@ func (b *BaseRecognizer) SetState(v int) {
// return result
//}
-// GetRuleIndexMap Get a map from rule names to rule indexes.
+// Get a map from rule names to rule indexes.
//
-// Used for XPath and tree pattern compilation.
-//
-// TODO: JI This is not yet implemented in the Go runtime. Maybe not needed.
+// Used for XPath and tree pattern compilation.
func (b *BaseRecognizer) GetRuleIndexMap() map[string]int {
panic("Method not defined!")
@@ -145,8 +124,7 @@ func (b *BaseRecognizer) GetRuleIndexMap() map[string]int {
// return result
}
-// GetTokenType get the token type based upon its name
-func (b *BaseRecognizer) GetTokenType(_ string) int {
+func (b *BaseRecognizer) GetTokenType(tokenName string) int {
panic("Method not defined!")
// var ttype = b.GetTokenTypeMap()[tokenName]
// if (ttype !=nil) {
@@ -184,27 +162,26 @@ func (b *BaseRecognizer) GetTokenType(_ string) int {
// }
//}
-// GetErrorHeader returns the error header, normally line/character position information.
-//
-// Can be overridden in sub structs embedding BaseRecognizer.
+// What is the error header, normally line/character position information?//
func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string {
line := e.GetOffendingToken().GetLine()
column := e.GetOffendingToken().GetColumn()
return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column)
}
-// GetTokenErrorDisplay shows how a token should be displayed in an error message.
+// How should a token be displayed in an error message? The default
//
-// The default is to display just the text, but during development you might
-// want to have a lot of information spit out. Override in that case
-// to use t.String() (which, for CommonToken, dumps everything about
-// the token). This is better than forcing you to override a method in
-// your token objects because you don't have to go modify your lexer
-// so that it creates a NewJava type.
+// is to display just the text, but during development you might
+// want to have a lot of information spit out. Override in that case
+// to use t.String() (which, for CommonToken, dumps everything about
+// the token). This is better than forcing you to override a method in
+// your token objects because you don't have to go modify your lexer
+// so that it creates a NewJava type.
//
-// Deprecated: This method is not called by the ANTLR 4 Runtime. Specific
-// implementations of [ANTLRErrorStrategy] may provide a similar
-// feature when necessary. For example, see [DefaultErrorStrategy].GetTokenErrorDisplay()
+// @deprecated This method is not called by the ANTLR 4 Runtime. Specific
+// implementations of {@link ANTLRErrorStrategy} may provide a similar
+// feature when necessary. For example, see
+// {@link DefaultErrorStrategy//GetTokenErrorDisplay}.
func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string {
if t == nil {
return ""
@@ -228,14 +205,12 @@ func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener {
return NewProxyErrorListener(b.listeners)
}
-// Sempred embedding structs need to override this if there are sempreds or actions
-// that the ATN interpreter needs to execute
-func (b *BaseRecognizer) Sempred(_ RuleContext, _ int, _ int) bool {
+// subclass needs to override these if there are sempreds or actions
+// that the ATN interp needs to execute
+func (b *BaseRecognizer) Sempred(localctx RuleContext, ruleIndex int, actionIndex int) bool {
return true
}
-// Precpred embedding structs need to override this if there are preceding predicates
-// that the ATN interpreter needs to execute
-func (b *BaseRecognizer) Precpred(_ RuleContext, _ int) bool {
+func (b *BaseRecognizer) Precpred(localctx RuleContext, precedence int) bool {
return true
}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go
new file mode 100644
index 000000000..210699ba2
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go
@@ -0,0 +1,114 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// A rule context is a record of a single rule invocation. It knows
+// which context invoked it, if any. If there is no parent context, then
+// naturally the invoking state is not valid. The parent link
+// provides a chain upwards from the current rule invocation to the root
+// of the invocation tree, forming a stack. We actually carry no
+// information about the rule associated with b context (except
+// when parsing). We keep only the state number of the invoking state from
+// the ATN submachine that invoked b. Contrast b with the s
+// pointer inside ParserRuleContext that tracks the current state
+// being "executed" for the current rule.
+//
+// The parent contexts are useful for computing lookahead sets and
+// getting error information.
+//
+// These objects are used during parsing and prediction.
+// For the special case of parsers, we use the subclass
+// ParserRuleContext.
+//
+// @see ParserRuleContext
+//
+
+type RuleContext interface {
+ RuleNode
+
+ GetInvokingState() int
+ SetInvokingState(int)
+
+ GetRuleIndex() int
+ IsEmpty() bool
+
+ GetAltNumber() int
+ SetAltNumber(altNumber int)
+
+ String([]string, RuleContext) string
+}
+
+type BaseRuleContext struct {
+ parentCtx RuleContext
+ invokingState int
+ RuleIndex int
+}
+
+func NewBaseRuleContext(parent RuleContext, invokingState int) *BaseRuleContext {
+
+ rn := new(BaseRuleContext)
+
+ // What context invoked b rule?
+ rn.parentCtx = parent
+
+ // What state invoked the rule associated with b context?
+ // The "return address" is the followState of invokingState
+ // If parent is nil, b should be -1.
+ if parent == nil {
+ rn.invokingState = -1
+ } else {
+ rn.invokingState = invokingState
+ }
+
+ return rn
+}
+
+func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext {
+ return b
+}
+
+func (b *BaseRuleContext) SetParent(v Tree) {
+ if v == nil {
+ b.parentCtx = nil
+ } else {
+ b.parentCtx = v.(RuleContext)
+ }
+}
+
+func (b *BaseRuleContext) GetInvokingState() int {
+ return b.invokingState
+}
+
+func (b *BaseRuleContext) SetInvokingState(t int) {
+ b.invokingState = t
+}
+
+func (b *BaseRuleContext) GetRuleIndex() int {
+ return b.RuleIndex
+}
+
+func (b *BaseRuleContext) GetAltNumber() int {
+ return ATNInvalidAltNumber
+}
+
+func (b *BaseRuleContext) SetAltNumber(altNumber int) {}
+
+// A context is empty if there is no invoking state meaning nobody call
+// current context.
+func (b *BaseRuleContext) IsEmpty() bool {
+ return b.invokingState == -1
+}
+
+// Return the combined text of all child nodes. This method only considers
+// tokens which have been added to the parse tree.
+//
+// Since tokens on hidden channels (e.g. whitespace or comments) are not
+// added to the parse trees, they will not appear in the output of b
+// method.
+//
+
+func (b *BaseRuleContext) GetParent() Tree {
+ return b.parentCtx
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go
similarity index 92%
rename from vendor/github.com/antlr4-go/antlr/v4/semantic_context.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go
index 68cb9061e..a702e99de 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go
@@ -9,13 +9,14 @@ import (
"strconv"
)
-// SemanticContext is a tree structure used to record the semantic context in which
+// A tree structure used to record the semantic context in which
+// an ATN configuration is valid. It's either a single predicate,
+// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}.
//
-// an ATN configuration is valid. It's either a single predicate,
-// a conjunction p1 && p2, or a sum of products p1 || p2.
+//
I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of
+// {@link SemanticContext} within the scope of this outer class.
//
-// I have scoped the AND, OR, and Predicate subclasses of
-// [SemanticContext] within the scope of this outer ``class''
+
type SemanticContext interface {
Equals(other Collectable[SemanticContext]) bool
Hash() int
@@ -79,7 +80,7 @@ func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate {
var SemanticContextNone = NewPredicate(-1, -1, false)
-func (p *Predicate) evalPrecedence(_ Recognizer, _ RuleContext) SemanticContext {
+func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
return p
}
@@ -197,7 +198,7 @@ type AND struct {
func NewAND(a, b SemanticContext) *AND {
- operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewAND() operands")
+ operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst)
if aa, ok := a.(*AND); ok {
for _, o := range aa.opnds {
operands.Put(o)
@@ -229,7 +230,9 @@ func NewAND(a, b SemanticContext) *AND {
vs := operands.Values()
opnds := make([]SemanticContext, len(vs))
- copy(opnds, vs)
+ for i, v := range vs {
+ opnds[i] = v.(SemanticContext)
+ }
and := new(AND)
and.opnds = opnds
@@ -313,12 +316,12 @@ func (a *AND) Hash() int {
return murmurFinish(h, len(a.opnds))
}
-func (o *OR) Hash() int {
- h := murmurInit(41) // Init with o value different from AND
- for _, op := range o.opnds {
+func (a *OR) Hash() int {
+ h := murmurInit(41) // Init with a value different from AND
+ for _, op := range a.opnds {
h = murmurUpdate(h, op.Hash())
}
- return murmurFinish(h, len(o.opnds))
+ return murmurFinish(h, len(a.opnds))
}
func (a *AND) String() string {
@@ -346,7 +349,7 @@ type OR struct {
func NewOR(a, b SemanticContext) *OR {
- operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewOR() operands")
+ operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst)
if aa, ok := a.(*OR); ok {
for _, o := range aa.opnds {
operands.Put(o)
@@ -379,7 +382,9 @@ func NewOR(a, b SemanticContext) *OR {
vs := operands.Values()
opnds := make([]SemanticContext, len(vs))
- copy(opnds, vs)
+ for i, v := range vs {
+ opnds[i] = v.(SemanticContext)
+ }
o := new(OR)
o.opnds = opnds
diff --git a/vendor/github.com/antlr4-go/antlr/v4/token.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go
similarity index 86%
rename from vendor/github.com/antlr4-go/antlr/v4/token.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go
index 9670efb82..f73b06bc6 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/token.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go
@@ -35,8 +35,6 @@ type Token interface {
GetTokenSource() TokenSource
GetInputStream() CharStream
-
- String() string
}
type BaseToken struct {
@@ -55,7 +53,7 @@ type BaseToken struct {
const (
TokenInvalidType = 0
- // TokenEpsilon - during lookahead operations, this "token" signifies we hit the rule end [ATN] state
+ // During lookahead operations, this "token" signifies we hit rule end ATN state
// and did not follow it despite needing to.
TokenEpsilon = -2
@@ -63,16 +61,15 @@ const (
TokenEOF = -1
- // TokenDefaultChannel is the default channel upon which tokens are sent to the parser.
- //
- // All tokens go to the parser (unless [Skip] is called in the lexer rule)
+ // All tokens go to the parser (unless Skip() is called in that rule)
// on a particular "channel". The parser tunes to a particular channel
// so that whitespace etc... can go to the parser on a "hidden" channel.
+
TokenDefaultChannel = 0
- // TokenHiddenChannel defines the normal hidden channel - the parser wil not see tokens that are not on [TokenDefaultChannel].
- //
- // Anything on a different channel than TokenDefaultChannel is not parsed by parser.
+ // Anything on different channel than DEFAULT_CHANNEL is not parsed
+ // by parser.
+
TokenHiddenChannel = 1
)
@@ -121,22 +118,21 @@ func (b *BaseToken) GetInputStream() CharStream {
}
type CommonToken struct {
- BaseToken
+ *BaseToken
}
func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken {
- t := &CommonToken{
- BaseToken: BaseToken{
- source: source,
- tokenType: tokenType,
- channel: channel,
- start: start,
- stop: stop,
- tokenIndex: -1,
- },
- }
+ t := new(CommonToken)
+
+ t.BaseToken = new(BaseToken)
+ t.source = source
+ t.tokenType = tokenType
+ t.channel = channel
+ t.start = start
+ t.stop = stop
+ t.tokenIndex = -1
if t.source.tokenSource != nil {
t.line = source.tokenSource.GetLine()
t.column = source.tokenSource.GetCharPositionInLine()
diff --git a/vendor/github.com/antlr4-go/antlr/v4/token_source.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go
similarity index 100%
rename from vendor/github.com/antlr4-go/antlr/v4/token_source.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go
diff --git a/vendor/github.com/antlr4-go/antlr/v4/token_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go
similarity index 90%
rename from vendor/github.com/antlr4-go/antlr/v4/token_stream.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go
index bf4ff6633..1527d43f6 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/token_stream.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go
@@ -8,14 +8,13 @@ type TokenStream interface {
IntStream
LT(k int) Token
- Reset()
Get(index int) Token
GetTokenSource() TokenSource
SetTokenSource(TokenSource)
GetAllText() string
- GetTextFromInterval(Interval) string
+ GetTextFromInterval(*Interval) string
GetTextFromRuleContext(RuleContext) string
GetTextFromTokens(Token, Token) string
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go
similarity index 73%
rename from vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go
index ccf59b465..b3e38af34 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go
@@ -86,15 +86,14 @@ import (
// first example shows.
const (
- DefaultProgramName = "default"
- ProgramInitSize = 100
- MinTokenIndex = 0
+ Default_Program_Name = "default"
+ Program_Init_Size = 100
+ Min_Token_Index = 0
)
// Define the rewrite operation hierarchy
type RewriteOperation interface {
-
// Execute the rewrite operation by possibly adding to the buffer.
// Return the index of the next token to operate on.
Execute(buffer *bytes.Buffer) int
@@ -113,19 +112,19 @@ type RewriteOperation interface {
type BaseRewriteOperation struct {
//Current index of rewrites list
- instructionIndex int
+ instruction_index int
//Token buffer index
index int
//Substitution text
text string
//Actual operation name
- opName string
+ op_name string
//Pointer to token steam
tokens TokenStream
}
func (op *BaseRewriteOperation) GetInstructionIndex() int {
- return op.instructionIndex
+ return op.instruction_index
}
func (op *BaseRewriteOperation) GetIndex() int {
@@ -137,7 +136,7 @@ func (op *BaseRewriteOperation) GetText() string {
}
func (op *BaseRewriteOperation) GetOpName() string {
- return op.opName
+ return op.op_name
}
func (op *BaseRewriteOperation) GetTokens() TokenStream {
@@ -145,7 +144,7 @@ func (op *BaseRewriteOperation) GetTokens() TokenStream {
}
func (op *BaseRewriteOperation) SetInstructionIndex(val int) {
- op.instructionIndex = val
+ op.instruction_index = val
}
func (op *BaseRewriteOperation) SetIndex(val int) {
@@ -157,20 +156,20 @@ func (op *BaseRewriteOperation) SetText(val string) {
}
func (op *BaseRewriteOperation) SetOpName(val string) {
- op.opName = val
+ op.op_name = val
}
func (op *BaseRewriteOperation) SetTokens(val TokenStream) {
op.tokens = val
}
-func (op *BaseRewriteOperation) Execute(_ *bytes.Buffer) int {
+func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int {
return op.index
}
func (op *BaseRewriteOperation) String() string {
return fmt.Sprintf("<%s@%d:\"%s\">",
- op.opName,
+ op.op_name,
op.tokens.Get(op.GetIndex()),
op.text,
)
@@ -183,10 +182,10 @@ type InsertBeforeOp struct {
func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp {
return &InsertBeforeOp{BaseRewriteOperation: BaseRewriteOperation{
- index: index,
- text: text,
- opName: "InsertBeforeOp",
- tokens: stream,
+ index: index,
+ text: text,
+ op_name: "InsertBeforeOp",
+ tokens: stream,
}}
}
@@ -202,21 +201,20 @@ func (op *InsertBeforeOp) String() string {
return op.BaseRewriteOperation.String()
}
-// InsertAfterOp distinguishes between insert after/before to do the "insert after" instructions
-// first and then the "insert before" instructions at same index. Implementation
-// of "insert after" is "insert before index+1".
+// Distinguish between insert after/before to do the "insert afters"
+// first and then the "insert befores" at same index. Implementation
+// of "insert after" is "insert before index+1".
+
type InsertAfterOp struct {
BaseRewriteOperation
}
func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp {
- return &InsertAfterOp{
- BaseRewriteOperation: BaseRewriteOperation{
- index: index + 1,
- text: text,
- tokens: stream,
- },
- }
+ return &InsertAfterOp{BaseRewriteOperation: BaseRewriteOperation{
+ index: index + 1,
+ text: text,
+ tokens: stream,
+ }}
}
func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int {
@@ -231,7 +229,7 @@ func (op *InsertAfterOp) String() string {
return op.BaseRewriteOperation.String()
}
-// ReplaceOp tries to replace range from x..y with (y-x)+1 ReplaceOp
+// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
// instructions.
type ReplaceOp struct {
BaseRewriteOperation
@@ -241,10 +239,10 @@ type ReplaceOp struct {
func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp {
return &ReplaceOp{
BaseRewriteOperation: BaseRewriteOperation{
- index: from,
- text: text,
- opName: "ReplaceOp",
- tokens: stream,
+ index: from,
+ text: text,
+ op_name: "ReplaceOp",
+ tokens: stream,
},
LastIndex: to,
}
@@ -272,17 +270,17 @@ type TokenStreamRewriter struct {
// You may have multiple, named streams of rewrite operations.
// I'm calling these things "programs."
// Maps String (name) → rewrite (List)
- programs map[string][]RewriteOperation
- lastRewriteTokenIndexes map[string]int
+ programs map[string][]RewriteOperation
+ last_rewrite_token_indexes map[string]int
}
func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter {
return &TokenStreamRewriter{
tokens: tokens,
programs: map[string][]RewriteOperation{
- DefaultProgramName: make([]RewriteOperation, 0, ProgramInitSize),
+ Default_Program_Name: make([]RewriteOperation, 0, Program_Init_Size),
},
- lastRewriteTokenIndexes: map[string]int{},
+ last_rewrite_token_indexes: map[string]int{},
}
}
@@ -293,110 +291,110 @@ func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream {
// Rollback the instruction stream for a program so that
// the indicated instruction (via instructionIndex) is no
// longer in the stream. UNTESTED!
-func (tsr *TokenStreamRewriter) Rollback(programName string, instructionIndex int) {
- is, ok := tsr.programs[programName]
+func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int) {
+ is, ok := tsr.programs[program_name]
if ok {
- tsr.programs[programName] = is[MinTokenIndex:instructionIndex]
+ tsr.programs[program_name] = is[Min_Token_Index:instruction_index]
}
}
-func (tsr *TokenStreamRewriter) RollbackDefault(instructionIndex int) {
- tsr.Rollback(DefaultProgramName, instructionIndex)
+func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int) {
+ tsr.Rollback(Default_Program_Name, instruction_index)
}
-// DeleteProgram Reset the program so that no instructions exist
-func (tsr *TokenStreamRewriter) DeleteProgram(programName string) {
- tsr.Rollback(programName, MinTokenIndex) //TODO: double test on that cause lower bound is not included
+// Reset the program so that no instructions exist
+func (tsr *TokenStreamRewriter) DeleteProgram(program_name string) {
+ tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included
}
func (tsr *TokenStreamRewriter) DeleteProgramDefault() {
- tsr.DeleteProgram(DefaultProgramName)
+ tsr.DeleteProgram(Default_Program_Name)
}
-func (tsr *TokenStreamRewriter) InsertAfter(programName string, index int, text string) {
+func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string) {
// to insert after, just insert before next index (even if past end)
var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens)
- rewrites := tsr.GetProgram(programName)
+ rewrites := tsr.GetProgram(program_name)
op.SetInstructionIndex(len(rewrites))
- tsr.AddToProgram(programName, op)
+ tsr.AddToProgram(program_name, op)
}
func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string) {
- tsr.InsertAfter(DefaultProgramName, index, text)
+ tsr.InsertAfter(Default_Program_Name, index, text)
}
-func (tsr *TokenStreamRewriter) InsertAfterToken(programName string, token Token, text string) {
- tsr.InsertAfter(programName, token.GetTokenIndex(), text)
+func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string) {
+ tsr.InsertAfter(program_name, token.GetTokenIndex(), text)
}
-func (tsr *TokenStreamRewriter) InsertBefore(programName string, index int, text string) {
+func (tsr *TokenStreamRewriter) InsertBefore(program_name string, index int, text string) {
var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens)
- rewrites := tsr.GetProgram(programName)
+ rewrites := tsr.GetProgram(program_name)
op.SetInstructionIndex(len(rewrites))
- tsr.AddToProgram(programName, op)
+ tsr.AddToProgram(program_name, op)
}
func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string) {
- tsr.InsertBefore(DefaultProgramName, index, text)
+ tsr.InsertBefore(Default_Program_Name, index, text)
}
-func (tsr *TokenStreamRewriter) InsertBeforeToken(programName string, token Token, text string) {
- tsr.InsertBefore(programName, token.GetTokenIndex(), text)
+func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string, token Token, text string) {
+ tsr.InsertBefore(program_name, token.GetTokenIndex(), text)
}
-func (tsr *TokenStreamRewriter) Replace(programName string, from, to int, text string) {
+func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string) {
if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size() {
panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)",
from, to, tsr.tokens.Size()))
}
var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens)
- rewrites := tsr.GetProgram(programName)
+ rewrites := tsr.GetProgram(program_name)
op.SetInstructionIndex(len(rewrites))
- tsr.AddToProgram(programName, op)
+ tsr.AddToProgram(program_name, op)
}
func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string) {
- tsr.Replace(DefaultProgramName, from, to, text)
+ tsr.Replace(Default_Program_Name, from, to, text)
}
func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string) {
tsr.ReplaceDefault(index, index, text)
}
-func (tsr *TokenStreamRewriter) ReplaceToken(programName string, from, to Token, text string) {
- tsr.Replace(programName, from.GetTokenIndex(), to.GetTokenIndex(), text)
+func (tsr *TokenStreamRewriter) ReplaceToken(program_name string, from, to Token, text string) {
+ tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text)
}
func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string) {
- tsr.ReplaceToken(DefaultProgramName, from, to, text)
+ tsr.ReplaceToken(Default_Program_Name, from, to, text)
}
func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string) {
tsr.ReplaceTokenDefault(index, index, text)
}
-func (tsr *TokenStreamRewriter) Delete(programName string, from, to int) {
- tsr.Replace(programName, from, to, "")
+func (tsr *TokenStreamRewriter) Delete(program_name string, from, to int) {
+ tsr.Replace(program_name, from, to, "")
}
func (tsr *TokenStreamRewriter) DeleteDefault(from, to int) {
- tsr.Delete(DefaultProgramName, from, to)
+ tsr.Delete(Default_Program_Name, from, to)
}
func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int) {
tsr.DeleteDefault(index, index)
}
-func (tsr *TokenStreamRewriter) DeleteToken(programName string, from, to Token) {
- tsr.ReplaceToken(programName, from, to, "")
+func (tsr *TokenStreamRewriter) DeleteToken(program_name string, from, to Token) {
+ tsr.ReplaceToken(program_name, from, to, "")
}
func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token) {
- tsr.DeleteToken(DefaultProgramName, from, to)
+ tsr.DeleteToken(Default_Program_Name, from, to)
}
-func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(programName string) int {
- i, ok := tsr.lastRewriteTokenIndexes[programName]
+func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(program_name string) int {
+ i, ok := tsr.last_rewrite_token_indexes[program_name]
if !ok {
return -1
}
@@ -404,15 +402,15 @@ func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(programName string) int
}
func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int {
- return tsr.GetLastRewriteTokenIndex(DefaultProgramName)
+ return tsr.GetLastRewriteTokenIndex(Default_Program_Name)
}
-func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(programName string, i int) {
- tsr.lastRewriteTokenIndexes[programName] = i
+func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(program_name string, i int) {
+ tsr.last_rewrite_token_indexes[program_name] = i
}
func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation {
- is := make([]RewriteOperation, 0, ProgramInitSize)
+ is := make([]RewriteOperation, 0, Program_Init_Size)
tsr.programs[name] = is
return is
}
@@ -431,24 +429,24 @@ func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation {
return is
}
-// GetTextDefault returns the text from the original tokens altered per the
+// Return the text from the original tokens altered per the
// instructions given to this rewriter.
func (tsr *TokenStreamRewriter) GetTextDefault() string {
return tsr.GetText(
- DefaultProgramName,
+ Default_Program_Name,
NewInterval(0, tsr.tokens.Size()-1))
}
-// GetText returns the text from the original tokens altered per the
+// Return the text from the original tokens altered per the
// instructions given to this rewriter.
-func (tsr *TokenStreamRewriter) GetText(programName string, interval Interval) string {
- rewrites := tsr.programs[programName]
+func (tsr *TokenStreamRewriter) GetText(program_name string, interval *Interval) string {
+ rewrites := tsr.programs[program_name]
start := interval.Start
stop := interval.Stop
// ensure start/end are in range
stop = min(stop, tsr.tokens.Size()-1)
start = max(start, 0)
- if len(rewrites) == 0 {
+ if rewrites == nil || len(rewrites) == 0 {
return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute
}
buf := bytes.Buffer{}
@@ -484,13 +482,11 @@ func (tsr *TokenStreamRewriter) GetText(programName string, interval Interval) s
return buf.String()
}
-// reduceToSingleOperationPerIndex combines operations and report invalid operations (like
-// overlapping replaces that are not completed nested). Inserts to
-// same index need to be combined etc...
-//
-// Here are the cases:
+// We need to combine operations and report invalid operations (like
+// overlapping replaces that are not completed nested). Inserts to
+// same index need to be combined etc... Here are the cases:
//
-// I.i.u I.j.v leave alone, non-overlapping
+// I.i.u I.j.v leave alone, nonoverlapping
// I.i.u I.i.v combine: Iivu
//
// R.i-j.u R.x-y.v | i-j in x-y delete first R
@@ -502,38 +498,38 @@ func (tsr *TokenStreamRewriter) GetText(programName string, interval Interval) s
// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
//
// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
-// we're not deleting i)
-// I.i.u R.x-y.v | i not in (x+1)-y leave alone, non-overlapping
+// we're not deleting i)
+// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping
// R.x-y.v I.i.u | i in x-y ERROR
// R.x-y.v I.x.u R.x-y.uv (combine, delete I)
-// R.x-y.v I.i.u | i not in x-y leave alone, non-overlapping
+// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping
//
// I.i.u = insert u before op @ index i
// R.x-y.u = replace x-y indexed tokens with u
//
-// First we need to examine replaces. For any replace op:
+// First we need to examine replaces. For any replace op:
//
-// 1. wipe out any insertions before op within that range.
-// 2. Drop any replace op before that is contained completely within
-// that range.
-// 3. Throw exception upon boundary overlap with any previous replace.
+// 1. wipe out any insertions before op within that range.
+// 2. Drop any replace op before that is contained completely within
+// that range.
+// 3. Throw exception upon boundary overlap with any previous replace.
//
-// Then we can deal with inserts:
+// Then we can deal with inserts:
//
-// 1. for any inserts to same index, combine even if not adjacent.
-// 2. for any prior replace with same left boundary, combine this
-// insert with replace and delete this 'replace'.
-// 3. throw exception if index in same range as previous replace
+// 1. for any inserts to same index, combine even if not adjacent.
+// 2. for any prior replace with same left boundary, combine this
+// insert with replace and delete this replace.
+// 3. throw exception if index in same range as previous replace
//
-// Don't actually delete; make op null in list. Easier to walk list.
-// Later we can throw as we add to index → op map.
+// Don't actually delete; make op null in list. Easier to walk list.
+// Later we can throw as we add to index → op map.
//
-// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
-// inserted stuff would be before the 'replace' range. But, if you
-// add tokens in front of a method body '{' and then delete the method
-// body, I think the stuff before the '{' you added should disappear too.
+// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
+// inserted stuff would be before the replace range. But, if you
+// add tokens in front of a method body '{' and then delete the method
+// body, I think the stuff before the '{' you added should disappear too.
//
-// The func returns a map from token index to operation.
+// Return a map from token index to operation.
func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation {
// WALK REPLACES
for i := 0; i < len(rewrites); i++ {
@@ -551,7 +547,7 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit
if iop.index == rop.index {
// E.g., insert before 2, delete 2..2; update replace
// text to include insert before, kill insert
- rewrites[iop.instructionIndex] = nil
+ rewrites[iop.instruction_index] = nil
if rop.text != "" {
rop.text = iop.text + rop.text
} else {
@@ -559,7 +555,7 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit
}
} else if iop.index > rop.index && iop.index <= rop.LastIndex {
// delete insert as it's a no-op.
- rewrites[iop.instructionIndex] = nil
+ rewrites[iop.instruction_index] = nil
}
}
}
@@ -568,7 +564,7 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit
if prevop, ok := rewrites[j].(*ReplaceOp); ok {
if prevop.index >= rop.index && prevop.LastIndex <= rop.LastIndex {
// delete replace as it's a no-op.
- rewrites[prevop.instructionIndex] = nil
+ rewrites[prevop.instruction_index] = nil
continue
}
// throw exception unless disjoint or identical
@@ -576,9 +572,10 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit
// Delete special case of replace (text==null):
// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
if prevop.text == "" && rop.text == "" && !disjoint {
- rewrites[prevop.instructionIndex] = nil
+ rewrites[prevop.instruction_index] = nil
rop.index = min(prevop.index, rop.index)
rop.LastIndex = max(prevop.LastIndex, rop.LastIndex)
+ println("new rop" + rop.String()) //TODO: remove console write, taken from Java version
} else if !disjoint {
panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String())
}
@@ -610,7 +607,7 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit
if prevIop, ok := rewrites[j].(*InsertBeforeOp); ok {
if prevIop.index == iop.GetIndex() {
iop.SetText(iop.GetText() + prevIop.text)
- rewrites[prevIop.instructionIndex] = nil
+ rewrites[prevIop.instruction_index] = nil
}
}
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/trace_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trace_listener.go
similarity index 100%
rename from vendor/github.com/antlr4-go/antlr/v4/trace_listener.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trace_listener.go
diff --git a/vendor/github.com/antlr4-go/antlr/v4/transition.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/transition.go
similarity index 67%
rename from vendor/github.com/antlr4-go/antlr/v4/transition.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/transition.go
index 313b0fc12..36be4f733 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/transition.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/transition.go
@@ -72,7 +72,7 @@ func (t *BaseTransition) getSerializationType() int {
return t.serializationType
}
-func (t *BaseTransition) Matches(_, _, _ int) bool {
+func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
panic("Not implemented")
}
@@ -89,7 +89,6 @@ const (
TransitionPRECEDENCE = 10
)
-//goland:noinspection GoUnusedGlobalVariable
var TransitionserializationNames = []string{
"INVALID",
"EPSILON",
@@ -128,22 +127,19 @@ var TransitionserializationNames = []string{
// TransitionPRECEDENCE
//}
-// AtomTransition
// TODO: make all transitions sets? no, should remove set edges
type AtomTransition struct {
- BaseTransition
+ *BaseTransition
}
func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition {
- t := &AtomTransition{
- BaseTransition: BaseTransition{
- target: target,
- serializationType: TransitionATOM,
- label: intervalSet,
- isEpsilon: false,
- },
- }
+
+ t := new(AtomTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.label = intervalSet // The token type or character value or, signifies special intervalSet.
t.intervalSet = t.makeLabel()
+ t.serializationType = TransitionATOM
return t
}
@@ -154,7 +150,7 @@ func (t *AtomTransition) makeLabel() *IntervalSet {
return s
}
-func (t *AtomTransition) Matches(symbol, _, _ int) bool {
+func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return t.label == symbol
}
@@ -163,45 +159,48 @@ func (t *AtomTransition) String() string {
}
type RuleTransition struct {
- BaseTransition
+ *BaseTransition
+
followState ATNState
ruleIndex, precedence int
}
func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition {
- return &RuleTransition{
- BaseTransition: BaseTransition{
- target: ruleStart,
- isEpsilon: true,
- serializationType: TransitionRULE,
- },
- ruleIndex: ruleIndex,
- precedence: precedence,
- followState: followState,
- }
+
+ t := new(RuleTransition)
+ t.BaseTransition = NewBaseTransition(ruleStart)
+
+ t.ruleIndex = ruleIndex
+ t.precedence = precedence
+ t.followState = followState
+ t.serializationType = TransitionRULE
+ t.isEpsilon = true
+
+ return t
}
-func (t *RuleTransition) Matches(_, _, _ int) bool {
+func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return false
}
type EpsilonTransition struct {
- BaseTransition
+ *BaseTransition
+
outermostPrecedenceReturn int
}
func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition {
- return &EpsilonTransition{
- BaseTransition: BaseTransition{
- target: target,
- serializationType: TransitionEPSILON,
- isEpsilon: true,
- },
- outermostPrecedenceReturn: outermostPrecedenceReturn,
- }
+
+ t := new(EpsilonTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.serializationType = TransitionEPSILON
+ t.isEpsilon = true
+ t.outermostPrecedenceReturn = outermostPrecedenceReturn
+ return t
}
-func (t *EpsilonTransition) Matches(_, _, _ int) bool {
+func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return false
}
@@ -210,20 +209,19 @@ func (t *EpsilonTransition) String() string {
}
type RangeTransition struct {
- BaseTransition
+ *BaseTransition
+
start, stop int
}
func NewRangeTransition(target ATNState, start, stop int) *RangeTransition {
- t := &RangeTransition{
- BaseTransition: BaseTransition{
- target: target,
- serializationType: TransitionRANGE,
- isEpsilon: false,
- },
- start: start,
- stop: stop,
- }
+
+ t := new(RangeTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.serializationType = TransitionRANGE
+ t.start = start
+ t.stop = stop
t.intervalSet = t.makeLabel()
return t
}
@@ -234,7 +232,7 @@ func (t *RangeTransition) makeLabel() *IntervalSet {
return s
}
-func (t *RangeTransition) Matches(symbol, _, _ int) bool {
+func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return symbol >= t.start && symbol <= t.stop
}
@@ -254,41 +252,40 @@ type AbstractPredicateTransition interface {
}
type BaseAbstractPredicateTransition struct {
- BaseTransition
+ *BaseTransition
}
func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition {
- return &BaseAbstractPredicateTransition{
- BaseTransition: BaseTransition{
- target: target,
- },
- }
+
+ t := new(BaseAbstractPredicateTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ return t
}
func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {}
type PredicateTransition struct {
- BaseAbstractPredicateTransition
+ *BaseAbstractPredicateTransition
+
isCtxDependent bool
ruleIndex, predIndex int
}
func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition {
- return &PredicateTransition{
- BaseAbstractPredicateTransition: BaseAbstractPredicateTransition{
- BaseTransition: BaseTransition{
- target: target,
- serializationType: TransitionPREDICATE,
- isEpsilon: true,
- },
- },
- isCtxDependent: isCtxDependent,
- ruleIndex: ruleIndex,
- predIndex: predIndex,
- }
+
+ t := new(PredicateTransition)
+ t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target)
+
+ t.serializationType = TransitionPREDICATE
+ t.ruleIndex = ruleIndex
+ t.predIndex = predIndex
+ t.isCtxDependent = isCtxDependent // e.g., $i ref in pred
+ t.isEpsilon = true
+ return t
}
-func (t *PredicateTransition) Matches(_, _, _ int) bool {
+func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return false
}
@@ -301,25 +298,26 @@ func (t *PredicateTransition) String() string {
}
type ActionTransition struct {
- BaseTransition
+ *BaseTransition
+
isCtxDependent bool
ruleIndex, actionIndex, predIndex int
}
func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition {
- return &ActionTransition{
- BaseTransition: BaseTransition{
- target: target,
- serializationType: TransitionACTION,
- isEpsilon: true,
- },
- isCtxDependent: isCtxDependent,
- ruleIndex: ruleIndex,
- actionIndex: actionIndex,
- }
+
+ t := new(ActionTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.serializationType = TransitionACTION
+ t.ruleIndex = ruleIndex
+ t.actionIndex = actionIndex
+ t.isCtxDependent = isCtxDependent // e.g., $i ref in pred
+ t.isEpsilon = true
+ return t
}
-func (t *ActionTransition) Matches(_, _, _ int) bool {
+func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return false
}
@@ -328,27 +326,26 @@ func (t *ActionTransition) String() string {
}
type SetTransition struct {
- BaseTransition
+ *BaseTransition
}
func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition {
- t := &SetTransition{
- BaseTransition: BaseTransition{
- target: target,
- serializationType: TransitionSET,
- },
- }
+ t := new(SetTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.serializationType = TransitionSET
if set != nil {
t.intervalSet = set
} else {
t.intervalSet = NewIntervalSet()
t.intervalSet.addOne(TokenInvalidType)
}
+
return t
}
-func (t *SetTransition) Matches(symbol, _, _ int) bool {
+func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return t.intervalSet.contains(symbol)
}
@@ -357,24 +354,16 @@ func (t *SetTransition) String() string {
}
type NotSetTransition struct {
- SetTransition
+ *SetTransition
}
func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition {
- t := &NotSetTransition{
- SetTransition: SetTransition{
- BaseTransition: BaseTransition{
- target: target,
- serializationType: TransitionNOTSET,
- },
- },
- }
- if set != nil {
- t.intervalSet = set
- } else {
- t.intervalSet = NewIntervalSet()
- t.intervalSet.addOne(TokenInvalidType)
- }
+
+ t := new(NotSetTransition)
+
+ t.SetTransition = NewSetTransition(target, set)
+
+ t.serializationType = TransitionNOTSET
return t
}
@@ -388,16 +377,16 @@ func (t *NotSetTransition) String() string {
}
type WildcardTransition struct {
- BaseTransition
+ *BaseTransition
}
func NewWildcardTransition(target ATNState) *WildcardTransition {
- return &WildcardTransition{
- BaseTransition: BaseTransition{
- target: target,
- serializationType: TransitionWILDCARD,
- },
- }
+
+ t := new(WildcardTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.serializationType = TransitionWILDCARD
+ return t
}
func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
@@ -409,24 +398,24 @@ func (t *WildcardTransition) String() string {
}
type PrecedencePredicateTransition struct {
- BaseAbstractPredicateTransition
+ *BaseAbstractPredicateTransition
+
precedence int
}
func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition {
- return &PrecedencePredicateTransition{
- BaseAbstractPredicateTransition: BaseAbstractPredicateTransition{
- BaseTransition: BaseTransition{
- target: target,
- serializationType: TransitionPRECEDENCE,
- isEpsilon: true,
- },
- },
- precedence: precedence,
- }
+
+ t := new(PrecedencePredicateTransition)
+ t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target)
+
+ t.serializationType = TransitionPRECEDENCE
+ t.precedence = precedence
+ t.isEpsilon = true
+
+ return t
}
-func (t *PrecedencePredicateTransition) Matches(_, _, _ int) bool {
+func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return false
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/tree.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go
similarity index 62%
rename from vendor/github.com/antlr4-go/antlr/v4/tree.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go
index c288420fb..85b4f137b 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/tree.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go
@@ -21,23 +21,29 @@ type Tree interface {
type SyntaxTree interface {
Tree
- GetSourceInterval() Interval
+
+ GetSourceInterval() *Interval
}
type ParseTree interface {
SyntaxTree
+
Accept(Visitor ParseTreeVisitor) interface{}
GetText() string
+
ToStringTree([]string, Recognizer) string
}
type RuleNode interface {
ParseTree
+
GetRuleContext() RuleContext
+ GetBaseRuleContext() *BaseRuleContext
}
type TerminalNode interface {
ParseTree
+
GetSymbol() Token
}
@@ -58,12 +64,12 @@ type BaseParseTreeVisitor struct{}
var _ ParseTreeVisitor = &BaseParseTreeVisitor{}
-func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) }
-func (v *BaseParseTreeVisitor) VisitChildren(_ RuleNode) interface{} { return nil }
-func (v *BaseParseTreeVisitor) VisitTerminal(_ TerminalNode) interface{} { return nil }
-func (v *BaseParseTreeVisitor) VisitErrorNode(_ ErrorNode) interface{} { return nil }
+func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) }
+func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{} { return nil }
+func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{} { return nil }
+func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{} { return nil }
-// TODO: Implement this?
+// TODO
//func (this ParseTreeVisitor) Visit(ctx) {
// if (Utils.isArray(ctx)) {
// self := this
@@ -95,14 +101,15 @@ type BaseParseTreeListener struct{}
var _ ParseTreeListener = &BaseParseTreeListener{}
-func (l *BaseParseTreeListener) VisitTerminal(_ TerminalNode) {}
-func (l *BaseParseTreeListener) VisitErrorNode(_ ErrorNode) {}
-func (l *BaseParseTreeListener) EnterEveryRule(_ ParserRuleContext) {}
-func (l *BaseParseTreeListener) ExitEveryRule(_ ParserRuleContext) {}
+func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode) {}
+func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode) {}
+func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext) {}
+func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext) {}
type TerminalNodeImpl struct {
parentCtx RuleContext
- symbol Token
+
+ symbol Token
}
var _ TerminalNode = &TerminalNodeImpl{}
@@ -116,7 +123,7 @@ func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl {
return tn
}
-func (t *TerminalNodeImpl) GetChild(_ int) Tree {
+func (t *TerminalNodeImpl) GetChild(i int) Tree {
return nil
}
@@ -124,7 +131,7 @@ func (t *TerminalNodeImpl) GetChildren() []Tree {
return nil
}
-func (t *TerminalNodeImpl) SetChildren(_ []Tree) {
+func (t *TerminalNodeImpl) SetChildren(tree []Tree) {
panic("Cannot set children on terminal node")
}
@@ -144,7 +151,7 @@ func (t *TerminalNodeImpl) GetPayload() interface{} {
return t.symbol
}
-func (t *TerminalNodeImpl) GetSourceInterval() Interval {
+func (t *TerminalNodeImpl) GetSourceInterval() *Interval {
if t.symbol == nil {
return TreeInvalidInterval
}
@@ -172,7 +179,7 @@ func (t *TerminalNodeImpl) String() string {
return t.symbol.GetText()
}
-func (t *TerminalNodeImpl) ToStringTree(_ []string, _ Recognizer) string {
+func (t *TerminalNodeImpl) ToStringTree(s []string, r Recognizer) string {
return t.String()
}
@@ -207,9 +214,10 @@ func NewParseTreeWalker() *ParseTreeWalker {
return new(ParseTreeWalker)
}
-// Walk performs a walk on the given parse tree starting at the root and going down recursively
-// with depth-first search. On each node, [EnterRule] is called before
-// recursively walking down into child nodes, then [ExitRule] is called after the recursive call to wind up.
+// Performs a walk on the given parse tree starting at the root and going down recursively
+// with depth-first search. On each node, EnterRule is called before
+// recursively walking down into child nodes, then
+// ExitRule is called after the recursive call to wind up.
func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) {
switch tt := t.(type) {
case ErrorNode:
@@ -226,7 +234,7 @@ func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) {
}
}
-// EnterRule enters a grammar rule by first triggering the generic event [ParseTreeListener].[EnterEveryRule]
+// Enters a grammar rule by first triggering the generic event {@link ParseTreeListener//EnterEveryRule}
// then by triggering the event specific to the given parse tree node
func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) {
ctx := r.GetRuleContext().(ParserRuleContext)
@@ -234,71 +242,12 @@ func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) {
ctx.EnterRule(listener)
}
-// ExitRule exits a grammar rule by first triggering the event specific to the given parse tree node
-// then by triggering the generic event [ParseTreeListener].ExitEveryRule
+// Exits a grammar rule by first triggering the event specific to the given parse tree node
+// then by triggering the generic event {@link ParseTreeListener//ExitEveryRule}
func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) {
ctx := r.GetRuleContext().(ParserRuleContext)
ctx.ExitRule(listener)
listener.ExitEveryRule(ctx)
}
-//goland:noinspection GoUnusedGlobalVariable
var ParseTreeWalkerDefault = NewParseTreeWalker()
-
-type IterativeParseTreeWalker struct {
- *ParseTreeWalker
-}
-
-//goland:noinspection GoUnusedExportedFunction
-func NewIterativeParseTreeWalker() *IterativeParseTreeWalker {
- return new(IterativeParseTreeWalker)
-}
-
-func (i *IterativeParseTreeWalker) Walk(listener ParseTreeListener, t Tree) {
- var stack []Tree
- var indexStack []int
- currentNode := t
- currentIndex := 0
-
- for currentNode != nil {
- // pre-order visit
- switch tt := currentNode.(type) {
- case ErrorNode:
- listener.VisitErrorNode(tt)
- case TerminalNode:
- listener.VisitTerminal(tt)
- default:
- i.EnterRule(listener, currentNode.(RuleNode))
- }
- // Move down to first child, if exists
- if currentNode.GetChildCount() > 0 {
- stack = append(stack, currentNode)
- indexStack = append(indexStack, currentIndex)
- currentIndex = 0
- currentNode = currentNode.GetChild(0)
- continue
- }
-
- for {
- // post-order visit
- if ruleNode, ok := currentNode.(RuleNode); ok {
- i.ExitRule(listener, ruleNode)
- }
- // No parent, so no siblings
- if len(stack) == 0 {
- currentNode = nil
- currentIndex = 0
- break
- }
- // Move to next sibling if possible
- currentIndex++
- if stack[len(stack)-1].GetChildCount() > currentIndex {
- currentNode = stack[len(stack)-1].GetChild(currentIndex)
- break
- }
- // No next, sibling, so move up
- currentNode, stack = stack[len(stack)-1], stack[:len(stack)-1]
- currentIndex, indexStack = indexStack[len(indexStack)-1], indexStack[:len(indexStack)-1]
- }
- }
-}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/trees.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trees.go
similarity index 81%
rename from vendor/github.com/antlr4-go/antlr/v4/trees.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trees.go
index f44c05d81..d7dbb0322 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/trees.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trees.go
@@ -8,8 +8,10 @@ import "fmt"
/** A set of utility routines useful for all kinds of ANTLR trees. */
-// TreesStringTree prints out a whole tree in LISP form. [getNodeText] is used on the
-// node payloads to get the text for the nodes. Detects parse trees and extracts data appropriately.
+// Print out a whole tree in LISP form. {@link //getNodeText} is used on the
+//
+// node payloads to get the text for the nodes. Detect
+// parse trees and extract data appropriately.
func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string {
if recog != nil {
@@ -30,7 +32,7 @@ func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string {
}
for i := 1; i < c; i++ {
s = TreesStringTree(tree.GetChild(i), ruleNames, nil)
- res += " " + s
+ res += (" " + s)
}
res += ")"
return res
@@ -60,7 +62,7 @@ func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string {
}
}
- // no recognition for rule names
+ // no recog for rule names
payload := t.GetPayload()
if p2, ok := payload.(Token); ok {
return p2.GetText()
@@ -69,9 +71,7 @@ func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string {
return fmt.Sprint(t.GetPayload())
}
-// TreesGetChildren returns am ordered list of all children of this node
-//
-//goland:noinspection GoUnusedExportedFunction
+// Return ordered list of all children of this node
func TreesGetChildren(t Tree) []Tree {
list := make([]Tree, 0)
for i := 0; i < t.GetChildCount(); i++ {
@@ -80,10 +80,9 @@ func TreesGetChildren(t Tree) []Tree {
return list
}
-// TreesgetAncestors returns a list of all ancestors of this node. The first node of list is the root
-// and the last node is the parent of this node.
+// Return a list of all ancestors of this node. The first node of
//
-//goland:noinspection GoUnusedExportedFunction
+// list is the root and the last is the parent of this node.
func TreesgetAncestors(t Tree) []Tree {
ancestors := make([]Tree, 0)
t = t.GetParent()
@@ -95,12 +94,10 @@ func TreesgetAncestors(t Tree) []Tree {
return ancestors
}
-//goland:noinspection GoUnusedExportedFunction
func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree {
return TreesfindAllNodes(t, ttype, true)
}
-//goland:noinspection GoUnusedExportedFunction
func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree {
return TreesfindAllNodes(t, ruleIndex, false)
}
@@ -132,7 +129,6 @@ func treesFindAllNodes(t ParseTree, index int, findTokens bool, nodes *[]ParseTr
}
}
-//goland:noinspection GoUnusedExportedFunction
func TreesDescendants(t ParseTree) []ParseTree {
nodes := []ParseTree{t}
for i := 0; i < t.GetChildCount(); i++ {
diff --git a/vendor/github.com/antlr4-go/antlr/v4/utils.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils.go
similarity index 85%
rename from vendor/github.com/antlr4-go/antlr/v4/utils.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils.go
index 733d7df9d..9fad5d916 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/utils.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils.go
@@ -9,10 +9,8 @@ import (
"errors"
"fmt"
"math/bits"
- "os"
"strconv"
"strings"
- "syscall"
)
func intMin(a, b int) int {
@@ -33,7 +31,7 @@ func intMax(a, b int) int {
type IntStack []int
-var ErrEmptyStack = errors.New("stack is empty")
+var ErrEmptyStack = errors.New("Stack is empty")
func (s *IntStack) Pop() (int, error) {
l := len(*s) - 1
@@ -49,13 +47,33 @@ func (s *IntStack) Push(e int) {
*s = append(*s, e)
}
+type comparable interface {
+ Equals(other Collectable[any]) bool
+}
+
+func standardEqualsFunction(a Collectable[any], b Collectable[any]) bool {
+
+ return a.Equals(b)
+}
+
+func standardHashFunction(a interface{}) int {
+ if h, ok := a.(hasher); ok {
+ return h.Hash()
+ }
+
+ panic("Not Hasher")
+}
+
+type hasher interface {
+ Hash() int
+}
+
const bitsPerWord = 64
func indexForBit(bit int) int {
return bit / bitsPerWord
}
-//goland:noinspection GoUnusedExportedFunction,GoUnusedFunction
func wordForBit(data []uint64, bit int) uint64 {
idx := indexForBit(bit)
if idx >= len(data) {
@@ -76,8 +94,6 @@ type BitSet struct {
data []uint64
}
-// NewBitSet creates a new bitwise set
-// TODO: See if we can replace with the standard library's BitSet
func NewBitSet() *BitSet {
return &BitSet{}
}
@@ -107,7 +123,7 @@ func (b *BitSet) or(set *BitSet) {
setLen := set.minLen()
maxLen := intMax(bLen, setLen)
if maxLen > len(b.data) {
- // Increase the size of len(b.data) to represent the bits in both sets.
+ // Increase the size of len(b.data) to repesent the bits in both sets.
data := make([]uint64, maxLen)
copy(data, b.data)
b.data = data
@@ -230,6 +246,37 @@ func (a *AltDict) values() []interface{} {
return vs
}
+type DoubleDict struct {
+ data map[int]map[int]interface{}
+}
+
+func NewDoubleDict() *DoubleDict {
+ dd := new(DoubleDict)
+ dd.data = make(map[int]map[int]interface{})
+ return dd
+}
+
+func (d *DoubleDict) Get(a, b int) interface{} {
+ data := d.data[a]
+
+ if data == nil {
+ return nil
+ }
+
+ return data[b]
+}
+
+func (d *DoubleDict) set(a, b int, o interface{}) {
+ data := d.data[a]
+
+ if data == nil {
+ data = make(map[int]interface{})
+ d.data[a] = data
+ }
+
+ data[b] = o
+}
+
func EscapeWhitespace(s string, escapeSpaces bool) string {
s = strings.Replace(s, "\t", "\\t", -1)
@@ -241,7 +288,6 @@ func EscapeWhitespace(s string, escapeSpaces bool) string {
return s
}
-//goland:noinspection GoUnusedExportedFunction
func TerminalNodeToStringArray(sa []TerminalNode) []string {
st := make([]string, len(sa))
@@ -252,7 +298,6 @@ func TerminalNodeToStringArray(sa []TerminalNode) []string {
return st
}
-//goland:noinspection GoUnusedExportedFunction
func PrintArrayJavaStyle(sa []string) string {
var buffer bytes.Buffer
@@ -305,24 +350,3 @@ func murmurFinish(h int, numberOfWords int) int {
return int(hash)
}
-
-func isDirectory(dir string) (bool, error) {
- fileInfo, err := os.Stat(dir)
- if err != nil {
- switch {
- case errors.Is(err, syscall.ENOENT):
- // The given directory does not exist, so we will try to create it
- //
- err = os.MkdirAll(dir, 0755)
- if err != nil {
- return false, err
- }
-
- return true, nil
- case err != nil:
- return false, err
- default:
- }
- }
- return fileInfo.IsDir(), err
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go
new file mode 100644
index 000000000..c9bd6751e
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go
@@ -0,0 +1,235 @@
+package antlr
+
+import "math"
+
+const (
+ _initalCapacity = 16
+ _initalBucketCapacity = 8
+ _loadFactor = 0.75
+)
+
+type Set interface {
+ Add(value interface{}) (added interface{})
+ Len() int
+ Get(value interface{}) (found interface{})
+ Contains(value interface{}) bool
+ Values() []interface{}
+ Each(f func(interface{}) bool)
+}
+
+type array2DHashSet struct {
+ buckets [][]Collectable[any]
+ hashcodeFunction func(interface{}) int
+ equalsFunction func(Collectable[any], Collectable[any]) bool
+
+ n int // How many elements in set
+ threshold int // when to expand
+
+ currentPrime int // jump by 4 primes each expand or whatever
+ initialBucketCapacity int
+}
+
+func (as *array2DHashSet) Each(f func(interface{}) bool) {
+ if as.Len() < 1 {
+ return
+ }
+
+ for _, bucket := range as.buckets {
+ for _, o := range bucket {
+ if o == nil {
+ break
+ }
+ if !f(o) {
+ return
+ }
+ }
+ }
+}
+
+func (as *array2DHashSet) Values() []interface{} {
+ if as.Len() < 1 {
+ return nil
+ }
+
+ values := make([]interface{}, 0, as.Len())
+ as.Each(func(i interface{}) bool {
+ values = append(values, i)
+ return true
+ })
+ return values
+}
+
+func (as *array2DHashSet) Contains(value Collectable[any]) bool {
+ return as.Get(value) != nil
+}
+
+func (as *array2DHashSet) Add(value Collectable[any]) interface{} {
+ if as.n > as.threshold {
+ as.expand()
+ }
+ return as.innerAdd(value)
+}
+
+func (as *array2DHashSet) expand() {
+ old := as.buckets
+
+ as.currentPrime += 4
+
+ var (
+ newCapacity = len(as.buckets) << 1
+ newTable = as.createBuckets(newCapacity)
+ newBucketLengths = make([]int, len(newTable))
+ )
+
+ as.buckets = newTable
+ as.threshold = int(float64(newCapacity) * _loadFactor)
+
+ for _, bucket := range old {
+ if bucket == nil {
+ continue
+ }
+
+ for _, o := range bucket {
+ if o == nil {
+ break
+ }
+
+ b := as.getBuckets(o)
+ bucketLength := newBucketLengths[b]
+ var newBucket []Collectable[any]
+ if bucketLength == 0 {
+ // new bucket
+ newBucket = as.createBucket(as.initialBucketCapacity)
+ newTable[b] = newBucket
+ } else {
+ newBucket = newTable[b]
+ if bucketLength == len(newBucket) {
+ // expand
+ newBucketCopy := make([]Collectable[any], len(newBucket)<<1)
+ copy(newBucketCopy[:bucketLength], newBucket)
+ newBucket = newBucketCopy
+ newTable[b] = newBucket
+ }
+ }
+
+ newBucket[bucketLength] = o
+ newBucketLengths[b]++
+ }
+ }
+}
+
+func (as *array2DHashSet) Len() int {
+ return as.n
+}
+
+func (as *array2DHashSet) Get(o Collectable[any]) interface{} {
+ if o == nil {
+ return nil
+ }
+
+ b := as.getBuckets(o)
+ bucket := as.buckets[b]
+ if bucket == nil { // no bucket
+ return nil
+ }
+
+ for _, e := range bucket {
+ if e == nil {
+ return nil // empty slot; not there
+ }
+ if as.equalsFunction(e, o) {
+ return e
+ }
+ }
+
+ return nil
+}
+
+func (as *array2DHashSet) innerAdd(o Collectable[any]) interface{} {
+ b := as.getBuckets(o)
+
+ bucket := as.buckets[b]
+
+ // new bucket
+ if bucket == nil {
+ bucket = as.createBucket(as.initialBucketCapacity)
+ bucket[0] = o
+
+ as.buckets[b] = bucket
+ as.n++
+ return o
+ }
+
+ // look for it in bucket
+ for i := 0; i < len(bucket); i++ {
+ existing := bucket[i]
+ if existing == nil { // empty slot; not there, add.
+ bucket[i] = o
+ as.n++
+ return o
+ }
+
+ if as.equalsFunction(existing, o) { // found existing, quit
+ return existing
+ }
+ }
+
+ // full bucket, expand and add to end
+ oldLength := len(bucket)
+ bucketCopy := make([]Collectable[any], oldLength<<1)
+ copy(bucketCopy[:oldLength], bucket)
+ bucket = bucketCopy
+ as.buckets[b] = bucket
+ bucket[oldLength] = o
+ as.n++
+ return o
+}
+
+func (as *array2DHashSet) getBuckets(value Collectable[any]) int {
+ hash := as.hashcodeFunction(value)
+ return hash & (len(as.buckets) - 1)
+}
+
+func (as *array2DHashSet) createBuckets(cap int) [][]Collectable[any] {
+ return make([][]Collectable[any], cap)
+}
+
+func (as *array2DHashSet) createBucket(cap int) []Collectable[any] {
+ return make([]Collectable[any], cap)
+}
+
+func newArray2DHashSetWithCap(
+ hashcodeFunction func(interface{}) int,
+ equalsFunction func(Collectable[any], Collectable[any]) bool,
+ initCap int,
+ initBucketCap int,
+) *array2DHashSet {
+ if hashcodeFunction == nil {
+ hashcodeFunction = standardHashFunction
+ }
+
+ if equalsFunction == nil {
+ equalsFunction = standardEqualsFunction
+ }
+
+ ret := &array2DHashSet{
+ hashcodeFunction: hashcodeFunction,
+ equalsFunction: equalsFunction,
+
+ n: 0,
+ threshold: int(math.Floor(_initalCapacity * _loadFactor)),
+
+ currentPrime: 1,
+ initialBucketCapacity: initBucketCap,
+ }
+
+ ret.buckets = ret.createBuckets(initCap)
+ return ret
+}
+
+func newArray2DHashSet(
+ hashcodeFunction func(interface{}) int,
+ equalsFunction func(Collectable[any], Collectable[any]) bool,
+) *array2DHashSet {
+ return newArray2DHashSetWithCap(hashcodeFunction, equalsFunction, _initalCapacity, _initalBucketCapacity)
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/.gitignore b/vendor/github.com/antlr4-go/antlr/v4/.gitignore
deleted file mode 100644
index 38ea34ff5..000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/.gitignore
+++ /dev/null
@@ -1,18 +0,0 @@
-### Go template
-
-# Binaries for programs and plugins
-*.exe
-*.exe~
-*.dll
-*.so
-*.dylib
-
-# Test binary, built with `go test -c`
-*.test
-
-
-# Go workspace file
-go.work
-
-# No Goland stuff in this repo
-.idea
diff --git a/vendor/github.com/antlr4-go/antlr/v4/LICENSE b/vendor/github.com/antlr4-go/antlr/v4/LICENSE
deleted file mode 100644
index a22292eb5..000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/LICENSE
+++ /dev/null
@@ -1,28 +0,0 @@
-Copyright (c) 2012-2023 The ANTLR Project. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions
-are met:
-
-1. Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright
-notice, this list of conditions and the following disclaimer in the
-documentation and/or other materials provided with the distribution.
-
-3. Neither name of copyright holders nor the names of its contributors
-may be used to endorse or promote products derived from this software
-without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
-CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/antlr4-go/antlr/v4/README.md b/vendor/github.com/antlr4-go/antlr/v4/README.md
deleted file mode 100644
index 03e5b83eb..000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/README.md
+++ /dev/null
@@ -1,54 +0,0 @@
-[![Go Report Card](https://goreportcard.com/badge/github.com/antlr4-go/antlr?style=flat-square)](https://goreportcard.com/report/github.com/antlr4-go/antlr)
-[![PkgGoDev](https://pkg.go.dev/badge/github.com/github.com/antlr4-go/antlr)](https://pkg.go.dev/github.com/antlr4-go/antlr)
-[![Release](https://img.shields.io/github/v/release/antlr4-go/antlr?sort=semver&style=flat-square)](https://github.com/antlr4-go/antlr/releases/latest)
-[![Release](https://img.shields.io/github/go-mod/go-version/antlr4-go/antlr?style=flat-square)](https://github.com/antlr4-go/antlr/releases/latest)
-[![Maintenance](https://img.shields.io/badge/Maintained%3F-yes-green.svg?style=flat-square)](https://github.com/antlr4-go/antlr/commit-activity)
-[![License](https://img.shields.io/badge/License-BSD_3--Clause-blue.svg)](https://opensource.org/licenses/BSD-3-Clause)
-[![GitHub stars](https://img.shields.io/github/stars/antlr4-go/antlr?style=flat-square&label=Star&maxAge=2592000)](https://GitHub.com/Naereen/StrapDown.js/stargazers/)
-# ANTLR4 Go Runtime Module Repo
-
-IMPORTANT: Please submit PRs via a clone of the https://github.com/antlr/antlr4 repo, and not here.
-
- - Do not submit PRs or any change requests to this repo
- - This repo is read only and is updated by the ANTLR team to create a new release of the Go Runtime for ANTLR
- - This repo contains the Go runtime that your generated projects should import
-
-## Introduction
-
-This repo contains the official modules for the Go Runtime for ANTLR. It is a copy of the runtime maintained
-at: https://github.com/antlr/antlr4/tree/master/runtime/Go/antlr and is automatically updated by the ANTLR team to create
-the official Go runtime release only. No development work is carried out in this repo and PRs are not accepted here.
-
-The dev branch of this repo is kept in sync with the dev branch of the main ANTLR repo and is updated periodically.
-
-### Why?
-
-The `go get` command is unable to retrieve the Go runtime when it is embedded so
-deeply in the main repo. A `go get` against the `antlr/antlr4` repo, while retrieving the correct source code for the runtime,
-does not correctly resolve tags and will create a reference in your `go.mod` file that is unclear, will not upgrade smoothly and
-causes confusion.
-
-For instance, the current Go runtime release, which is tagged with v4.13.0 in `antlr/antlr4` is retrieved by go get as:
-
-```sh
-require (
- github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230219212500-1f9a474cc2dc
-)
-```
-
-Where you would expect to see:
-
-```sh
-require (
- github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.13.0
-)
-```
-
-The decision was taken to create a separate org in a separate repo to hold the official Go runtime for ANTLR and
-from whence users can expect `go get` to behave as expected.
-
-
-# Documentation
-Please read the official documentation at: https://github.com/antlr/antlr4/blob/master/doc/index.md for tips on
-migrating existing projects to use the new module location and for information on how to use the Go runtime in
-general.
diff --git a/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go b/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go
deleted file mode 100644
index 3bb4fd7c4..000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
-Package antlr implements the Go version of the ANTLR 4 runtime.
-
-# The ANTLR Tool
-
-ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing,
-or translating structured text or binary files. It's widely used to build languages, tools, and frameworks.
-From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface
-(or visitor) that makes it easy to respond to the recognition of phrases of interest.
-
-# Go Runtime
-
-At version 4.11.x and prior, the Go runtime was not properly versioned for go modules. After this point, the runtime
-source code to be imported was held in the `runtime/Go/antlr/v4` directory, and the go.mod file was updated to reflect the version of
-ANTLR4 that it is compatible with (I.E. uses the /v4 path).
-
-However, this was found to be problematic, as it meant that with the runtime embedded so far underneath the root
-of the repo, the `go get` and related commands could not properly resolve the location of the go runtime source code.
-This meant that the reference to the runtime in your `go.mod` file would refer to the correct source code, but would not
-list the release tag such as @4.12.0 - this was confusing, to say the least.
-
-As of 4.12.1, the runtime is now available as a go module in its own repo, and can be imported as `github.com/antlr4-go/antlr`
-(the go get command should also be used with this path). See the main documentation for the ANTLR4 project for more information,
-which is available at [ANTLR docs]. The documentation for using the Go runtime is available at [Go runtime docs].
-
-This means that if you are using the source code without modules, you should also use the source code in the [new repo].
-Though we highly recommend that you use go modules, as they are now idiomatic for Go.
-
-I am aware that this change will prove Hyrum's Law, but am prepared to live with it for the common good.
-
-Go runtime author: [Jim Idle] jimi@idle.ws
-
-# Code Generation
-
-ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a
-runtime library, written specifically to support the generated code in the target language. This library is the
-runtime for the Go target.
-
-To generate code for the go target, it is generally recommended to place the source grammar files in a package of
-their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory
-it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean
-that the antlr tool JAR file will be checked in to your source code control though, so you are, of course, free to use any other
-way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in
-your IDE, or configuration in your CI system. Checking in the jar does mean that it is easy to reproduce the build as
-it was at any point in its history.
-
-Here is a general/recommended template for an ANTLR based recognizer in Go:
-
- .
- ├── parser
- │ ├── mygrammar.g4
- │ ├── antlr-4.12.1-complete.jar
- │ ├── generate.go
- │ └── generate.sh
- ├── parsing - generated code goes here
- │ └── error_listeners.go
- ├── go.mod
- ├── go.sum
- ├── main.go
- └── main_test.go
-
-Make sure that the package statement in your grammar file(s) reflects the go package the generated code will exist in.
-
-The generate.go file then looks like this:
-
- package parser
-
- //go:generate ./generate.sh
-
-And the generate.sh file will look similar to this:
-
- #!/bin/sh
-
- alias antlr4='java -Xmx500M -cp "./antlr4-4.12.1-complete.jar:$CLASSPATH" org.antlr.v4.Tool'
- antlr4 -Dlanguage=Go -no-visitor -package parsing *.g4
-
-depending on whether you want visitors or listeners or any other ANTLR options. Not that another option here
-is to generate the code into a
-
-From the command line at the root of your source package (location of go.mo)d) you can then simply issue the command:
-
- go generate ./...
-
-Which will generate the code for the parser, and place it in the parsing package. You can then use the generated code
-by importing the parsing package.
-
-There are no hard and fast rules on this. It is just a recommendation. You can generate the code in any way and to anywhere you like.
-
-# Copyright Notice
-
-Copyright (c) 2012-2023 The ANTLR Project. All rights reserved.
-
-Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root.
-
-[target languages]: https://github.com/antlr/antlr4/tree/master/runtime
-[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt
-[ANTLR docs]: https://github.com/antlr/antlr4/blob/master/doc/index.md
-[new repo]: https://github.com/antlr4-go/antlr
-[Jim Idle]: https://github.com/jimidle
-[Go runtime docs]: https://github.com/antlr/antlr4/blob/master/doc/go-target.md
-*/
-package antlr
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_config.go b/vendor/github.com/antlr4-go/antlr/v4/atn_config.go
deleted file mode 100644
index a83f25d34..000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/atn_config.go
+++ /dev/null
@@ -1,335 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
-)
-
-const (
- lexerConfig = iota // Indicates that this ATNConfig is for a lexer
- parserConfig // Indicates that this ATNConfig is for a parser
-)
-
-// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic
-// context). The syntactic context is a graph-structured stack node whose
-// path(s) to the root is the rule invocation(s) chain used to arrive in the
-// state. The semantic context is the tree of semantic predicates encountered
-// before reaching an ATN state.
-type ATNConfig struct {
- precedenceFilterSuppressed bool
- state ATNState
- alt int
- context *PredictionContext
- semanticContext SemanticContext
- reachesIntoOuterContext int
- cType int // lexerConfig or parserConfig
- lexerActionExecutor *LexerActionExecutor
- passedThroughNonGreedyDecision bool
-}
-
-// NewATNConfig6 creates a new ATNConfig instance given a state, alt and context only
-func NewATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig {
- return NewATNConfig5(state, alt, context, SemanticContextNone)
-}
-
-// NewATNConfig5 creates a new ATNConfig instance given a state, alt, context and semantic context
-func NewATNConfig5(state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) *ATNConfig {
- if semanticContext == nil {
- panic("semanticContext cannot be nil") // TODO: Necessary?
- }
-
- pac := &ATNConfig{}
- pac.state = state
- pac.alt = alt
- pac.context = context
- pac.semanticContext = semanticContext
- pac.cType = parserConfig
- return pac
-}
-
-// NewATNConfig4 creates a new ATNConfig instance given an existing config, and a state only
-func NewATNConfig4(c *ATNConfig, state ATNState) *ATNConfig {
- return NewATNConfig(c, state, c.GetContext(), c.GetSemanticContext())
-}
-
-// NewATNConfig3 creates a new ATNConfig instance given an existing config, a state and a semantic context
-func NewATNConfig3(c *ATNConfig, state ATNState, semanticContext SemanticContext) *ATNConfig {
- return NewATNConfig(c, state, c.GetContext(), semanticContext)
-}
-
-// NewATNConfig2 creates a new ATNConfig instance given an existing config, and a context only
-func NewATNConfig2(c *ATNConfig, semanticContext SemanticContext) *ATNConfig {
- return NewATNConfig(c, c.GetState(), c.GetContext(), semanticContext)
-}
-
-// NewATNConfig1 creates a new ATNConfig instance given an existing config, a state, and a context only
-func NewATNConfig1(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig {
- return NewATNConfig(c, state, context, c.GetSemanticContext())
-}
-
-// NewATNConfig creates a new ATNConfig instance given an existing config, a state, a context and a semantic context, other 'constructors'
-// are just wrappers around this one.
-func NewATNConfig(c *ATNConfig, state ATNState, context *PredictionContext, semanticContext SemanticContext) *ATNConfig {
- if semanticContext == nil {
- panic("semanticContext cannot be nil") // TODO: Remove this - probably put here for some bug that is now fixed
- }
- b := &ATNConfig{}
- b.InitATNConfig(c, state, c.GetAlt(), context, semanticContext)
- b.cType = parserConfig
- return b
-}
-
-func (a *ATNConfig) InitATNConfig(c *ATNConfig, state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) {
-
- a.state = state
- a.alt = alt
- a.context = context
- a.semanticContext = semanticContext
- a.reachesIntoOuterContext = c.GetReachesIntoOuterContext()
- a.precedenceFilterSuppressed = c.getPrecedenceFilterSuppressed()
-}
-
-func (a *ATNConfig) getPrecedenceFilterSuppressed() bool {
- return a.precedenceFilterSuppressed
-}
-
-func (a *ATNConfig) setPrecedenceFilterSuppressed(v bool) {
- a.precedenceFilterSuppressed = v
-}
-
-// GetState returns the ATN state associated with this configuration
-func (a *ATNConfig) GetState() ATNState {
- return a.state
-}
-
-// GetAlt returns the alternative associated with this configuration
-func (a *ATNConfig) GetAlt() int {
- return a.alt
-}
-
-// SetContext sets the rule invocation stack associated with this configuration
-func (a *ATNConfig) SetContext(v *PredictionContext) {
- a.context = v
-}
-
-// GetContext returns the rule invocation stack associated with this configuration
-func (a *ATNConfig) GetContext() *PredictionContext {
- return a.context
-}
-
-// GetSemanticContext returns the semantic context associated with this configuration
-func (a *ATNConfig) GetSemanticContext() SemanticContext {
- return a.semanticContext
-}
-
-// GetReachesIntoOuterContext returns the count of references to an outer context from this configuration
-func (a *ATNConfig) GetReachesIntoOuterContext() int {
- return a.reachesIntoOuterContext
-}
-
-// SetReachesIntoOuterContext sets the count of references to an outer context from this configuration
-func (a *ATNConfig) SetReachesIntoOuterContext(v int) {
- a.reachesIntoOuterContext = v
-}
-
-// Equals is the default comparison function for an ATNConfig when no specialist implementation is required
-// for a collection.
-//
-// An ATN configuration is equal to another if both have the same state, they
-// predict the same alternative, and syntactic/semantic contexts are the same.
-func (a *ATNConfig) Equals(o Collectable[*ATNConfig]) bool {
- switch a.cType {
- case lexerConfig:
- return a.LEquals(o)
- case parserConfig:
- return a.PEquals(o)
- default:
- panic("Invalid ATNConfig type")
- }
-}
-
-// PEquals is the default comparison function for a Parser ATNConfig when no specialist implementation is required
-// for a collection.
-//
-// An ATN configuration is equal to another if both have the same state, they
-// predict the same alternative, and syntactic/semantic contexts are the same.
-func (a *ATNConfig) PEquals(o Collectable[*ATNConfig]) bool {
- var other, ok = o.(*ATNConfig)
-
- if !ok {
- return false
- }
- if a == other {
- return true
- } else if other == nil {
- return false
- }
-
- var equal bool
-
- if a.context == nil {
- equal = other.context == nil
- } else {
- equal = a.context.Equals(other.context)
- }
-
- var (
- nums = a.state.GetStateNumber() == other.state.GetStateNumber()
- alts = a.alt == other.alt
- cons = a.semanticContext.Equals(other.semanticContext)
- sups = a.precedenceFilterSuppressed == other.precedenceFilterSuppressed
- )
-
- return nums && alts && cons && sups && equal
-}
-
-// Hash is the default hash function for a parser ATNConfig, when no specialist hash function
-// is required for a collection
-func (a *ATNConfig) Hash() int {
- switch a.cType {
- case lexerConfig:
- return a.LHash()
- case parserConfig:
- return a.PHash()
- default:
- panic("Invalid ATNConfig type")
- }
-}
-
-// PHash is the default hash function for a parser ATNConfig, when no specialist hash function
-// is required for a collection
-func (a *ATNConfig) PHash() int {
- var c int
- if a.context != nil {
- c = a.context.Hash()
- }
-
- h := murmurInit(7)
- h = murmurUpdate(h, a.state.GetStateNumber())
- h = murmurUpdate(h, a.alt)
- h = murmurUpdate(h, c)
- h = murmurUpdate(h, a.semanticContext.Hash())
- return murmurFinish(h, 4)
-}
-
-// String returns a string representation of the ATNConfig, usually used for debugging purposes
-func (a *ATNConfig) String() string {
- var s1, s2, s3 string
-
- if a.context != nil {
- s1 = ",[" + fmt.Sprint(a.context) + "]"
- }
-
- if a.semanticContext != SemanticContextNone {
- s2 = "," + fmt.Sprint(a.semanticContext)
- }
-
- if a.reachesIntoOuterContext > 0 {
- s3 = ",up=" + fmt.Sprint(a.reachesIntoOuterContext)
- }
-
- return fmt.Sprintf("(%v,%v%v%v%v)", a.state, a.alt, s1, s2, s3)
-}
-
-func NewLexerATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig {
- lac := &ATNConfig{}
- lac.state = state
- lac.alt = alt
- lac.context = context
- lac.semanticContext = SemanticContextNone
- lac.cType = lexerConfig
- return lac
-}
-
-func NewLexerATNConfig4(c *ATNConfig, state ATNState) *ATNConfig {
- lac := &ATNConfig{}
- lac.lexerActionExecutor = c.lexerActionExecutor
- lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state)
- lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext())
- lac.cType = lexerConfig
- return lac
-}
-
-func NewLexerATNConfig3(c *ATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *ATNConfig {
- lac := &ATNConfig{}
- lac.lexerActionExecutor = lexerActionExecutor
- lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state)
- lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext())
- lac.cType = lexerConfig
- return lac
-}
-
-func NewLexerATNConfig2(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig {
- lac := &ATNConfig{}
- lac.lexerActionExecutor = c.lexerActionExecutor
- lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state)
- lac.InitATNConfig(c, state, c.GetAlt(), context, c.GetSemanticContext())
- lac.cType = lexerConfig
- return lac
-}
-
-//goland:noinspection GoUnusedExportedFunction
-func NewLexerATNConfig1(state ATNState, alt int, context *PredictionContext) *ATNConfig {
- lac := &ATNConfig{}
- lac.state = state
- lac.alt = alt
- lac.context = context
- lac.semanticContext = SemanticContextNone
- lac.cType = lexerConfig
- return lac
-}
-
-// LHash is the default hash function for Lexer ATNConfig objects, it can be used directly or via
-// the default comparator [ObjEqComparator].
-func (a *ATNConfig) LHash() int {
- var f int
- if a.passedThroughNonGreedyDecision {
- f = 1
- } else {
- f = 0
- }
- h := murmurInit(7)
- h = murmurUpdate(h, a.state.GetStateNumber())
- h = murmurUpdate(h, a.alt)
- h = murmurUpdate(h, a.context.Hash())
- h = murmurUpdate(h, a.semanticContext.Hash())
- h = murmurUpdate(h, f)
- h = murmurUpdate(h, a.lexerActionExecutor.Hash())
- h = murmurFinish(h, 6)
- return h
-}
-
-// LEquals is the default comparison function for Lexer ATNConfig objects, it can be used directly or via
-// the default comparator [ObjEqComparator].
-func (a *ATNConfig) LEquals(other Collectable[*ATNConfig]) bool {
- var otherT, ok = other.(*ATNConfig)
- if !ok {
- return false
- } else if a == otherT {
- return true
- } else if a.passedThroughNonGreedyDecision != otherT.passedThroughNonGreedyDecision {
- return false
- }
-
- switch {
- case a.lexerActionExecutor == nil && otherT.lexerActionExecutor == nil:
- return true
- case a.lexerActionExecutor != nil && otherT.lexerActionExecutor != nil:
- if !a.lexerActionExecutor.Equals(otherT.lexerActionExecutor) {
- return false
- }
- default:
- return false // One but not both, are nil
- }
-
- return a.PEquals(otherT)
-}
-
-func checkNonGreedyDecision(source *ATNConfig, target ATNState) bool {
- var ds, ok = target.(DecisionState)
-
- return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy())
-}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go b/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go
deleted file mode 100644
index 52dbaf806..000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go
+++ /dev/null
@@ -1,301 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
-)
-
-// ATNConfigSet is a specialized set of ATNConfig that tracks information
-// about its elements and can combine similar configurations using a
-// graph-structured stack.
-type ATNConfigSet struct {
- cachedHash int
-
- // configLookup is used to determine whether two ATNConfigSets are equal. We
- // need all configurations with the same (s, i, _, semctx) to be equal. A key
- // effectively doubles the number of objects associated with ATNConfigs. All
- // keys are hashed by (s, i, _, pi), not including the context. Wiped out when
- // read-only because a set becomes a DFA state.
- configLookup *JStore[*ATNConfig, Comparator[*ATNConfig]]
-
- // configs is the added elements that did not match an existing key in configLookup
- configs []*ATNConfig
-
- // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
- // info together because it saves re-computation. Can we track conflicts as they
- // are added to save scanning configs later?
- conflictingAlts *BitSet
-
- // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates
- // we hit a pred while computing a closure operation. Do not make a DFA state
- // from the ATNConfigSet in this case. TODO: How is this used by parsers?
- dipsIntoOuterContext bool
-
- // fullCtx is whether it is part of a full context LL prediction. Used to
- // determine how to merge $. It is a wildcard with SLL, but not for an LL
- // context merge.
- fullCtx bool
-
- // Used in parser and lexer. In lexer, it indicates we hit a pred
- // while computing a closure operation. Don't make a DFA state from this set.
- hasSemanticContext bool
-
- // readOnly is whether it is read-only. Do not
- // allow any code to manipulate the set if true because DFA states will point at
- // sets and those must not change. It not, protect other fields; conflictingAlts
- // in particular, which is assigned after readOnly.
- readOnly bool
-
- // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
- // info together because it saves re-computation. Can we track conflicts as they
- // are added to save scanning configs later?
- uniqueAlt int
-}
-
-// Alts returns the combined set of alts for all the configurations in this set.
-func (b *ATNConfigSet) Alts() *BitSet {
- alts := NewBitSet()
- for _, it := range b.configs {
- alts.add(it.GetAlt())
- }
- return alts
-}
-
-// NewATNConfigSet creates a new ATNConfigSet instance.
-func NewATNConfigSet(fullCtx bool) *ATNConfigSet {
- return &ATNConfigSet{
- cachedHash: -1,
- configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()"),
- fullCtx: fullCtx,
- }
-}
-
-// Add merges contexts with existing configs for (s, i, pi, _),
-// where 's' is the ATNConfig.state, 'i' is the ATNConfig.alt, and
-// 'pi' is the [ATNConfig].semanticContext.
-//
-// We use (s,i,pi) as the key.
-// Updates dipsIntoOuterContext and hasSemanticContext when necessary.
-func (b *ATNConfigSet) Add(config *ATNConfig, mergeCache *JPCMap) bool {
- if b.readOnly {
- panic("set is read-only")
- }
-
- if config.GetSemanticContext() != SemanticContextNone {
- b.hasSemanticContext = true
- }
-
- if config.GetReachesIntoOuterContext() > 0 {
- b.dipsIntoOuterContext = true
- }
-
- existing, present := b.configLookup.Put(config)
-
- // The config was not already in the set
- //
- if !present {
- b.cachedHash = -1
- b.configs = append(b.configs, config) // Track order here
- return true
- }
-
- // Merge a previous (s, i, pi, _) with it and save the result
- rootIsWildcard := !b.fullCtx
- merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache)
-
- // No need to check for existing.context because config.context is in the cache,
- // since the only way to create new graphs is the "call rule" and here. We cache
- // at both places.
- existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext()))
-
- // Preserve the precedence filter suppression during the merge
- if config.getPrecedenceFilterSuppressed() {
- existing.setPrecedenceFilterSuppressed(true)
- }
-
- // Replace the context because there is no need to do alt mapping
- existing.SetContext(merged)
-
- return true
-}
-
-// GetStates returns the set of states represented by all configurations in this config set
-func (b *ATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] {
-
- // states uses the standard comparator and Hash() provided by the ATNState instance
- //
- states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst, ATNStateCollection, "ATNConfigSet.GetStates()")
-
- for i := 0; i < len(b.configs); i++ {
- states.Put(b.configs[i].GetState())
- }
-
- return states
-}
-
-func (b *ATNConfigSet) GetPredicates() []SemanticContext {
- predicates := make([]SemanticContext, 0)
-
- for i := 0; i < len(b.configs); i++ {
- c := b.configs[i].GetSemanticContext()
-
- if c != SemanticContextNone {
- predicates = append(predicates, c)
- }
- }
-
- return predicates
-}
-
-func (b *ATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) {
- if b.readOnly {
- panic("set is read-only")
- }
-
- // Empty indicate no optimization is possible
- if b.configLookup == nil || b.configLookup.Len() == 0 {
- return
- }
-
- for i := 0; i < len(b.configs); i++ {
- config := b.configs[i]
- config.SetContext(interpreter.getCachedContext(config.GetContext()))
- }
-}
-
-func (b *ATNConfigSet) AddAll(coll []*ATNConfig) bool {
- for i := 0; i < len(coll); i++ {
- b.Add(coll[i], nil)
- }
-
- return false
-}
-
-// Compare The configs are only equal if they are in the same order and their Equals function returns true.
-// Java uses ArrayList.equals(), which requires the same order.
-func (b *ATNConfigSet) Compare(bs *ATNConfigSet) bool {
- if len(b.configs) != len(bs.configs) {
- return false
- }
- for i := 0; i < len(b.configs); i++ {
- if !b.configs[i].Equals(bs.configs[i]) {
- return false
- }
- }
-
- return true
-}
-
-func (b *ATNConfigSet) Equals(other Collectable[ATNConfig]) bool {
- if b == other {
- return true
- } else if _, ok := other.(*ATNConfigSet); !ok {
- return false
- }
-
- other2 := other.(*ATNConfigSet)
- var eca bool
- switch {
- case b.conflictingAlts == nil && other2.conflictingAlts == nil:
- eca = true
- case b.conflictingAlts != nil && other2.conflictingAlts != nil:
- eca = b.conflictingAlts.equals(other2.conflictingAlts)
- }
- return b.configs != nil &&
- b.fullCtx == other2.fullCtx &&
- b.uniqueAlt == other2.uniqueAlt &&
- eca &&
- b.hasSemanticContext == other2.hasSemanticContext &&
- b.dipsIntoOuterContext == other2.dipsIntoOuterContext &&
- b.Compare(other2)
-}
-
-func (b *ATNConfigSet) Hash() int {
- if b.readOnly {
- if b.cachedHash == -1 {
- b.cachedHash = b.hashCodeConfigs()
- }
-
- return b.cachedHash
- }
-
- return b.hashCodeConfigs()
-}
-
-func (b *ATNConfigSet) hashCodeConfigs() int {
- h := 1
- for _, config := range b.configs {
- h = 31*h + config.Hash()
- }
- return h
-}
-
-func (b *ATNConfigSet) Contains(item *ATNConfig) bool {
- if b.readOnly {
- panic("not implemented for read-only sets")
- }
- if b.configLookup == nil {
- return false
- }
- return b.configLookup.Contains(item)
-}
-
-func (b *ATNConfigSet) ContainsFast(item *ATNConfig) bool {
- return b.Contains(item)
-}
-
-func (b *ATNConfigSet) Clear() {
- if b.readOnly {
- panic("set is read-only")
- }
- b.configs = make([]*ATNConfig, 0)
- b.cachedHash = -1
- b.configLookup = NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()")
-}
-
-func (b *ATNConfigSet) String() string {
-
- s := "["
-
- for i, c := range b.configs {
- s += c.String()
-
- if i != len(b.configs)-1 {
- s += ", "
- }
- }
-
- s += "]"
-
- if b.hasSemanticContext {
- s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext)
- }
-
- if b.uniqueAlt != ATNInvalidAltNumber {
- s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt)
- }
-
- if b.conflictingAlts != nil {
- s += ",conflictingAlts=" + b.conflictingAlts.String()
- }
-
- if b.dipsIntoOuterContext {
- s += ",dipsIntoOuterContext"
- }
-
- return s
-}
-
-// NewOrderedATNConfigSet creates a config set with a slightly different Hash/Equal pair
-// for use in lexers.
-func NewOrderedATNConfigSet() *ATNConfigSet {
- return &ATNConfigSet{
- cachedHash: -1,
- // This set uses the standard Hash() and Equals() from ATNConfig
- configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ATNConfigCollection, "ATNConfigSet.NewOrderedATNConfigSet()"),
- fullCtx: false,
- }
-}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/configuration.go b/vendor/github.com/antlr4-go/antlr/v4/configuration.go
deleted file mode 100644
index c2b724514..000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/configuration.go
+++ /dev/null
@@ -1,214 +0,0 @@
-package antlr
-
-type runtimeConfiguration struct {
- statsTraceStacks bool
- lexerATNSimulatorDebug bool
- lexerATNSimulatorDFADebug bool
- parserATNSimulatorDebug bool
- parserATNSimulatorTraceATNSim bool
- parserATNSimulatorDFADebug bool
- parserATNSimulatorRetryDebug bool
- lRLoopEntryBranchOpt bool
- memoryManager bool
-}
-
-// Global runtime configuration
-var runtimeConfig = runtimeConfiguration{
- lRLoopEntryBranchOpt: true,
-}
-
-type runtimeOption func(*runtimeConfiguration) error
-
-// ConfigureRuntime allows the runtime to be configured globally setting things like trace and statistics options.
-// It uses the functional options pattern for go. This is a package global function as it operates on the runtime
-// configuration regardless of the instantiation of anything higher up such as a parser or lexer. Generally this is
-// used for debugging/tracing/statistics options, which are usually used by the runtime maintainers (or rather the
-// only maintainer). However, it is possible that you might want to use this to set a global option concerning the
-// memory allocation type used by the runtime such as sync.Pool or not.
-//
-// The options are applied in the order they are passed in, so the last option will override any previous options.
-//
-// For example, if you want to turn on the collection create point stack flag to true, you can do:
-//
-// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true))
-//
-// If you want to turn it off, you can do:
-//
-// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false))
-func ConfigureRuntime(options ...runtimeOption) error {
- for _, option := range options {
- err := option(&runtimeConfig)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// WithStatsTraceStacks sets the global flag indicating whether to collect stack traces at the create-point of
-// certain structs, such as collections, or the use point of certain methods such as Put().
-// Because this can be expensive, it is turned off by default. However, it
-// can be useful to track down exactly where memory is being created and used.
-//
-// Use:
-//
-// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true))
-//
-// You can turn it off at any time using:
-//
-// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false))
-func WithStatsTraceStacks(trace bool) runtimeOption {
- return func(config *runtimeConfiguration) error {
- config.statsTraceStacks = trace
- return nil
- }
-}
-
-// WithLexerATNSimulatorDebug sets the global flag indicating whether to log debug information from the lexer [ATN]
-// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful
-// to the runtime maintainers.
-//
-// Use:
-//
-// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(true))
-//
-// You can turn it off at any time using:
-//
-// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(false))
-func WithLexerATNSimulatorDebug(debug bool) runtimeOption {
- return func(config *runtimeConfiguration) error {
- config.lexerATNSimulatorDebug = debug
- return nil
- }
-}
-
-// WithLexerATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the lexer [ATN] [DFA]
-// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful
-// to the runtime maintainers.
-//
-// Use:
-//
-// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(true))
-//
-// You can turn it off at any time using:
-//
-// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(false))
-func WithLexerATNSimulatorDFADebug(debug bool) runtimeOption {
- return func(config *runtimeConfiguration) error {
- config.lexerATNSimulatorDFADebug = debug
- return nil
- }
-}
-
-// WithParserATNSimulatorDebug sets the global flag indicating whether to log debug information from the parser [ATN]
-// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful
-// to the runtime maintainers.
-//
-// Use:
-//
-// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(true))
-//
-// You can turn it off at any time using:
-//
-// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(false))
-func WithParserATNSimulatorDebug(debug bool) runtimeOption {
- return func(config *runtimeConfiguration) error {
- config.parserATNSimulatorDebug = debug
- return nil
- }
-}
-
-// WithParserATNSimulatorTraceATNSim sets the global flag indicating whether to log trace information from the parser [ATN] simulator
-// [DFA]. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful
-// to the runtime maintainers.
-//
-// Use:
-//
-// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(true))
-//
-// You can turn it off at any time using:
-//
-// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(false))
-func WithParserATNSimulatorTraceATNSim(trace bool) runtimeOption {
- return func(config *runtimeConfiguration) error {
- config.parserATNSimulatorTraceATNSim = trace
- return nil
- }
-}
-
-// WithParserATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA]
-// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful
-// to the runtime maintainers.
-//
-// Use:
-//
-// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(true))
-//
-// You can turn it off at any time using:
-//
-// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(false))
-func WithParserATNSimulatorDFADebug(debug bool) runtimeOption {
- return func(config *runtimeConfiguration) error {
- config.parserATNSimulatorDFADebug = debug
- return nil
- }
-}
-
-// WithParserATNSimulatorRetryDebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA]
-// simulator when retrying a decision. This is useful for debugging parser issues by comparing the output with the Java runtime.
-// Only useful to the runtime maintainers.
-//
-// Use:
-//
-// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(true))
-//
-// You can turn it off at any time using:
-//
-// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(false))
-func WithParserATNSimulatorRetryDebug(debug bool) runtimeOption {
- return func(config *runtimeConfiguration) error {
- config.parserATNSimulatorRetryDebug = debug
- return nil
- }
-}
-
-// WithLRLoopEntryBranchOpt sets the global flag indicating whether let recursive loop operations should be
-// optimized or not. This is useful for debugging parser issues by comparing the output with the Java runtime.
-// It turns off the functionality of [canDropLoopEntryEdgeInLeftRecursiveRule] in [ParserATNSimulator].
-//
-// Note that default is to use this optimization.
-//
-// Use:
-//
-// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(true))
-//
-// You can turn it off at any time using:
-//
-// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(false))
-func WithLRLoopEntryBranchOpt(off bool) runtimeOption {
- return func(config *runtimeConfiguration) error {
- config.lRLoopEntryBranchOpt = off
- return nil
- }
-}
-
-// WithMemoryManager sets the global flag indicating whether to use the memory manager or not. This is useful
-// for poorly constructed grammars that create a lot of garbage. It turns on the functionality of [memoryManager], which
-// will intercept garbage collection and cause available memory to be reused. At the end of the day, this is no substitute
-// for fixing your grammar by ridding yourself of extreme ambiguity. BUt if you are just trying to reuse an opensource
-// grammar, this may help make it more practical.
-//
-// Note that default is to use normal Go memory allocation and not pool memory.
-//
-// Use:
-//
-// antlr.ConfigureRuntime(antlr.WithMemoryManager(true))
-//
-// Note that if you turn this on, you should probably leave it on. You should use only one memory strategy or the other
-// and should remember to nil out any references to the parser or lexer when you are done with them.
-func WithMemoryManager(use bool) runtimeOption {
- return func(config *runtimeConfiguration) error {
- config.memoryManager = use
- return nil
- }
-}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/input_stream.go b/vendor/github.com/antlr4-go/antlr/v4/input_stream.go
deleted file mode 100644
index b737fe85f..000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/input_stream.go
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "bufio"
- "io"
-)
-
-type InputStream struct {
- name string
- index int
- data []rune
- size int
-}
-
-// NewIoStream creates a new input stream from the given io.Reader reader.
-// Note that the reader is read completely into memory and so it must actually
-// have a stopping point - you cannot pass in a reader on an open-ended source such
-// as a socket for instance.
-func NewIoStream(reader io.Reader) *InputStream {
-
- rReader := bufio.NewReader(reader)
-
- is := &InputStream{
- name: "",
- index: 0,
- }
-
- // Pre-build the buffer and read runes reasonably efficiently given that
- // we don't exactly know how big the input is.
- //
- is.data = make([]rune, 0, 512)
- for {
- r, _, err := rReader.ReadRune()
- if err != nil {
- break
- }
- is.data = append(is.data, r)
- }
- is.size = len(is.data) // number of runes
- return is
-}
-
-// NewInputStream creates a new input stream from the given string
-func NewInputStream(data string) *InputStream {
-
- is := &InputStream{
- name: "",
- index: 0,
- data: []rune(data), // This is actually the most efficient way
- }
- is.size = len(is.data) // number of runes, but we could also use len(data), which is efficient too
- return is
-}
-
-func (is *InputStream) reset() {
- is.index = 0
-}
-
-// Consume moves the input pointer to the next character in the input stream
-func (is *InputStream) Consume() {
- if is.index >= is.size {
- // assert is.LA(1) == TokenEOF
- panic("cannot consume EOF")
- }
- is.index++
-}
-
-// LA returns the character at the given offset from the start of the input stream
-func (is *InputStream) LA(offset int) int {
-
- if offset == 0 {
- return 0 // nil
- }
- if offset < 0 {
- offset++ // e.g., translate LA(-1) to use offset=0
- }
- pos := is.index + offset - 1
-
- if pos < 0 || pos >= is.size { // invalid
- return TokenEOF
- }
-
- return int(is.data[pos])
-}
-
-// LT returns the character at the given offset from the start of the input stream
-func (is *InputStream) LT(offset int) int {
- return is.LA(offset)
-}
-
-// Index returns the current offset in to the input stream
-func (is *InputStream) Index() int {
- return is.index
-}
-
-// Size returns the total number of characters in the input stream
-func (is *InputStream) Size() int {
- return is.size
-}
-
-// Mark does nothing here as we have entire buffer
-func (is *InputStream) Mark() int {
- return -1
-}
-
-// Release does nothing here as we have entire buffer
-func (is *InputStream) Release(_ int) {
-}
-
-// Seek the input point to the provided index offset
-func (is *InputStream) Seek(index int) {
- if index <= is.index {
- is.index = index // just jump don't update stream state (line,...)
- return
- }
- // seek forward
- is.index = intMin(index, is.size)
-}
-
-// GetText returns the text from the input stream from the start to the stop index
-func (is *InputStream) GetText(start int, stop int) string {
- if stop >= is.size {
- stop = is.size - 1
- }
- if start >= is.size {
- return ""
- }
-
- return string(is.data[start : stop+1])
-}
-
-// GetTextFromTokens returns the text from the input stream from the first character of the start token to the last
-// character of the stop token
-func (is *InputStream) GetTextFromTokens(start, stop Token) string {
- if start != nil && stop != nil {
- return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex()))
- }
-
- return ""
-}
-
-func (is *InputStream) GetTextFromInterval(i Interval) string {
- return is.GetText(i.Start, i.Stop)
-}
-
-func (*InputStream) GetSourceName() string {
- return ""
-}
-
-// String returns the entire input stream as a string
-func (is *InputStream) String() string {
- return string(is.data)
-}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/jcollect.go b/vendor/github.com/antlr4-go/antlr/v4/jcollect.go
deleted file mode 100644
index ceccd96d2..000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/jcollect.go
+++ /dev/null
@@ -1,685 +0,0 @@
-package antlr
-
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-import (
- "container/list"
- "runtime/debug"
- "sort"
- "sync"
-)
-
-// Collectable is an interface that a struct should implement if it is to be
-// usable as a key in these collections.
-type Collectable[T any] interface {
- Hash() int
- Equals(other Collectable[T]) bool
-}
-
-type Comparator[T any] interface {
- Hash1(o T) int
- Equals2(T, T) bool
-}
-
-type CollectionSource int
-type CollectionDescriptor struct {
- SybolicName string
- Description string
-}
-
-const (
- UnknownCollection CollectionSource = iota
- ATNConfigLookupCollection
- ATNStateCollection
- DFAStateCollection
- ATNConfigCollection
- PredictionContextCollection
- SemanticContextCollection
- ClosureBusyCollection
- PredictionVisitedCollection
- MergeCacheCollection
- PredictionContextCacheCollection
- AltSetCollection
- ReachSetCollection
-)
-
-var CollectionDescriptors = map[CollectionSource]CollectionDescriptor{
- UnknownCollection: {
- SybolicName: "UnknownCollection",
- Description: "Unknown collection type. Only used if the target author thought it was an unimportant collection.",
- },
- ATNConfigCollection: {
- SybolicName: "ATNConfigCollection",
- Description: "ATNConfig collection. Used to store the ATNConfigs for a particular state in the ATN." +
- "For instance, it is used to store the results of the closure() operation in the ATN.",
- },
- ATNConfigLookupCollection: {
- SybolicName: "ATNConfigLookupCollection",
- Description: "ATNConfigLookup collection. Used to store the ATNConfigs for a particular state in the ATN." +
- "This is used to prevent duplicating equivalent states in an ATNConfigurationSet.",
- },
- ATNStateCollection: {
- SybolicName: "ATNStateCollection",
- Description: "ATNState collection. This is used to store the states of the ATN.",
- },
- DFAStateCollection: {
- SybolicName: "DFAStateCollection",
- Description: "DFAState collection. This is used to store the states of the DFA.",
- },
- PredictionContextCollection: {
- SybolicName: "PredictionContextCollection",
- Description: "PredictionContext collection. This is used to store the prediction contexts of the ATN and cache computes.",
- },
- SemanticContextCollection: {
- SybolicName: "SemanticContextCollection",
- Description: "SemanticContext collection. This is used to store the semantic contexts of the ATN.",
- },
- ClosureBusyCollection: {
- SybolicName: "ClosureBusyCollection",
- Description: "ClosureBusy collection. This is used to check and prevent infinite recursion right recursive rules." +
- "It stores ATNConfigs that are currently being processed in the closure() operation.",
- },
- PredictionVisitedCollection: {
- SybolicName: "PredictionVisitedCollection",
- Description: "A map that records whether we have visited a particular context when searching through cached entries.",
- },
- MergeCacheCollection: {
- SybolicName: "MergeCacheCollection",
- Description: "A map that records whether we have already merged two particular contexts and can save effort by not repeating it.",
- },
- PredictionContextCacheCollection: {
- SybolicName: "PredictionContextCacheCollection",
- Description: "A map that records whether we have already created a particular context and can save effort by not computing it again.",
- },
- AltSetCollection: {
- SybolicName: "AltSetCollection",
- Description: "Used to eliminate duplicate alternatives in an ATN config set.",
- },
- ReachSetCollection: {
- SybolicName: "ReachSetCollection",
- Description: "Used as merge cache to prevent us needing to compute the merge of two states if we have already done it.",
- },
-}
-
-// JStore implements a container that allows the use of a struct to calculate the key
-// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just
-// serve the needs of the ANTLR Go runtime.
-//
-// For ease of porting the logic of the runtime from the master target (Java), this collection
-// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals()
-// function as the key. The values are stored in a standard go map which internally is a form of hashmap
-// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with
-// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't
-// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and
-// we understand the requirements, then this is fine - this is not a general purpose collection.
-type JStore[T any, C Comparator[T]] struct {
- store map[int][]T
- len int
- comparator Comparator[T]
- stats *JStatRec
-}
-
-func NewJStore[T any, C Comparator[T]](comparator Comparator[T], cType CollectionSource, desc string) *JStore[T, C] {
-
- if comparator == nil {
- panic("comparator cannot be nil")
- }
-
- s := &JStore[T, C]{
- store: make(map[int][]T, 1),
- comparator: comparator,
- }
- if collectStats {
- s.stats = &JStatRec{
- Source: cType,
- Description: desc,
- }
-
- // Track where we created it from if we are being asked to do so
- if runtimeConfig.statsTraceStacks {
- s.stats.CreateStack = debug.Stack()
- }
- Statistics.AddJStatRec(s.stats)
- }
- return s
-}
-
-// Put will store given value in the collection. Note that the key for storage is generated from
-// the value itself - this is specifically because that is what ANTLR needs - this would not be useful
-// as any kind of general collection.
-//
-// If the key has a hash conflict, then the value will be added to the slice of values associated with the
-// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is
-// tested by calling the equals() method on the key.
-//
-// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true
-//
-// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false.
-func (s *JStore[T, C]) Put(value T) (v T, exists bool) {
-
- if collectStats {
- s.stats.Puts++
- }
- kh := s.comparator.Hash1(value)
-
- var hClash bool
- for _, v1 := range s.store[kh] {
- hClash = true
- if s.comparator.Equals2(value, v1) {
- if collectStats {
- s.stats.PutHits++
- s.stats.PutHashConflicts++
- }
- return v1, true
- }
- if collectStats {
- s.stats.PutMisses++
- }
- }
- if collectStats && hClash {
- s.stats.PutHashConflicts++
- }
- s.store[kh] = append(s.store[kh], value)
-
- if collectStats {
- if len(s.store[kh]) > s.stats.MaxSlotSize {
- s.stats.MaxSlotSize = len(s.store[kh])
- }
- }
- s.len++
- if collectStats {
- s.stats.CurSize = s.len
- if s.len > s.stats.MaxSize {
- s.stats.MaxSize = s.len
- }
- }
- return value, false
-}
-
-// Get will return the value associated with the key - the type of the key is the same type as the value
-// which would not generally be useful, but this is a specific thing for ANTLR where the key is
-// generated using the object we are going to store.
-func (s *JStore[T, C]) Get(key T) (T, bool) {
- if collectStats {
- s.stats.Gets++
- }
- kh := s.comparator.Hash1(key)
- var hClash bool
- for _, v := range s.store[kh] {
- hClash = true
- if s.comparator.Equals2(key, v) {
- if collectStats {
- s.stats.GetHits++
- s.stats.GetHashConflicts++
- }
- return v, true
- }
- if collectStats {
- s.stats.GetMisses++
- }
- }
- if collectStats {
- if hClash {
- s.stats.GetHashConflicts++
- }
- s.stats.GetNoEnt++
- }
- return key, false
-}
-
-// Contains returns true if the given key is present in the store
-func (s *JStore[T, C]) Contains(key T) bool {
- _, present := s.Get(key)
- return present
-}
-
-func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T {
- vs := make([]T, 0, len(s.store))
- for _, v := range s.store {
- vs = append(vs, v...)
- }
- sort.Slice(vs, func(i, j int) bool {
- return less(vs[i], vs[j])
- })
-
- return vs
-}
-
-func (s *JStore[T, C]) Each(f func(T) bool) {
- for _, e := range s.store {
- for _, v := range e {
- f(v)
- }
- }
-}
-
-func (s *JStore[T, C]) Len() int {
- return s.len
-}
-
-func (s *JStore[T, C]) Values() []T {
- vs := make([]T, 0, len(s.store))
- for _, e := range s.store {
- vs = append(vs, e...)
- }
- return vs
-}
-
-type entry[K, V any] struct {
- key K
- val V
-}
-
-type JMap[K, V any, C Comparator[K]] struct {
- store map[int][]*entry[K, V]
- len int
- comparator Comparator[K]
- stats *JStatRec
-}
-
-func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K], cType CollectionSource, desc string) *JMap[K, V, C] {
- m := &JMap[K, V, C]{
- store: make(map[int][]*entry[K, V], 1),
- comparator: comparator,
- }
- if collectStats {
- m.stats = &JStatRec{
- Source: cType,
- Description: desc,
- }
- // Track where we created it from if we are being asked to do so
- if runtimeConfig.statsTraceStacks {
- m.stats.CreateStack = debug.Stack()
- }
- Statistics.AddJStatRec(m.stats)
- }
- return m
-}
-
-func (m *JMap[K, V, C]) Put(key K, val V) (V, bool) {
- if collectStats {
- m.stats.Puts++
- }
- kh := m.comparator.Hash1(key)
-
- var hClash bool
- for _, e := range m.store[kh] {
- hClash = true
- if m.comparator.Equals2(e.key, key) {
- if collectStats {
- m.stats.PutHits++
- m.stats.PutHashConflicts++
- }
- return e.val, true
- }
- if collectStats {
- m.stats.PutMisses++
- }
- }
- if collectStats {
- if hClash {
- m.stats.PutHashConflicts++
- }
- }
- m.store[kh] = append(m.store[kh], &entry[K, V]{key, val})
- if collectStats {
- if len(m.store[kh]) > m.stats.MaxSlotSize {
- m.stats.MaxSlotSize = len(m.store[kh])
- }
- }
- m.len++
- if collectStats {
- m.stats.CurSize = m.len
- if m.len > m.stats.MaxSize {
- m.stats.MaxSize = m.len
- }
- }
- return val, false
-}
-
-func (m *JMap[K, V, C]) Values() []V {
- vs := make([]V, 0, len(m.store))
- for _, e := range m.store {
- for _, v := range e {
- vs = append(vs, v.val)
- }
- }
- return vs
-}
-
-func (m *JMap[K, V, C]) Get(key K) (V, bool) {
- if collectStats {
- m.stats.Gets++
- }
- var none V
- kh := m.comparator.Hash1(key)
- var hClash bool
- for _, e := range m.store[kh] {
- hClash = true
- if m.comparator.Equals2(e.key, key) {
- if collectStats {
- m.stats.GetHits++
- m.stats.GetHashConflicts++
- }
- return e.val, true
- }
- if collectStats {
- m.stats.GetMisses++
- }
- }
- if collectStats {
- if hClash {
- m.stats.GetHashConflicts++
- }
- m.stats.GetNoEnt++
- }
- return none, false
-}
-
-func (m *JMap[K, V, C]) Len() int {
- return m.len
-}
-
-func (m *JMap[K, V, C]) Delete(key K) {
- kh := m.comparator.Hash1(key)
- for i, e := range m.store[kh] {
- if m.comparator.Equals2(e.key, key) {
- m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...)
- m.len--
- return
- }
- }
-}
-
-func (m *JMap[K, V, C]) Clear() {
- m.store = make(map[int][]*entry[K, V])
-}
-
-type JPCMap struct {
- store *JMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]]
- size int
- stats *JStatRec
-}
-
-func NewJPCMap(cType CollectionSource, desc string) *JPCMap {
- m := &JPCMap{
- store: NewJMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]](pContextEqInst, cType, desc),
- }
- if collectStats {
- m.stats = &JStatRec{
- Source: cType,
- Description: desc,
- }
- // Track where we created it from if we are being asked to do so
- if runtimeConfig.statsTraceStacks {
- m.stats.CreateStack = debug.Stack()
- }
- Statistics.AddJStatRec(m.stats)
- }
- return m
-}
-
-func (pcm *JPCMap) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) {
- if collectStats {
- pcm.stats.Gets++
- }
- // Do we have a map stored by k1?
- //
- m2, present := pcm.store.Get(k1)
- if present {
- if collectStats {
- pcm.stats.GetHits++
- }
- // We found a map of values corresponding to k1, so now we need to look up k2 in that map
- //
- return m2.Get(k2)
- }
- if collectStats {
- pcm.stats.GetMisses++
- }
- return nil, false
-}
-
-func (pcm *JPCMap) Put(k1, k2, v *PredictionContext) {
-
- if collectStats {
- pcm.stats.Puts++
- }
- // First does a map already exist for k1?
- //
- if m2, present := pcm.store.Get(k1); present {
- if collectStats {
- pcm.stats.PutHits++
- }
- _, present = m2.Put(k2, v)
- if !present {
- pcm.size++
- if collectStats {
- pcm.stats.CurSize = pcm.size
- if pcm.size > pcm.stats.MaxSize {
- pcm.stats.MaxSize = pcm.size
- }
- }
- }
- } else {
- // No map found for k1, so we create it, add in our value, then store is
- //
- if collectStats {
- pcm.stats.PutMisses++
- m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, pcm.stats.Source, pcm.stats.Description+" map entry")
- } else {
- m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "map entry")
- }
-
- m2.Put(k2, v)
- pcm.store.Put(k1, m2)
- pcm.size++
- }
-}
-
-type JPCMap2 struct {
- store map[int][]JPCEntry
- size int
- stats *JStatRec
-}
-
-type JPCEntry struct {
- k1, k2, v *PredictionContext
-}
-
-func NewJPCMap2(cType CollectionSource, desc string) *JPCMap2 {
- m := &JPCMap2{
- store: make(map[int][]JPCEntry, 1000),
- }
- if collectStats {
- m.stats = &JStatRec{
- Source: cType,
- Description: desc,
- }
- // Track where we created it from if we are being asked to do so
- if runtimeConfig.statsTraceStacks {
- m.stats.CreateStack = debug.Stack()
- }
- Statistics.AddJStatRec(m.stats)
- }
- return m
-}
-
-func dHash(k1, k2 *PredictionContext) int {
- return k1.cachedHash*31 + k2.cachedHash
-}
-
-func (pcm *JPCMap2) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) {
- if collectStats {
- pcm.stats.Gets++
- }
-
- h := dHash(k1, k2)
- var hClash bool
- for _, e := range pcm.store[h] {
- hClash = true
- if e.k1.Equals(k1) && e.k2.Equals(k2) {
- if collectStats {
- pcm.stats.GetHits++
- pcm.stats.GetHashConflicts++
- }
- return e.v, true
- }
- if collectStats {
- pcm.stats.GetMisses++
- }
- }
- if collectStats {
- if hClash {
- pcm.stats.GetHashConflicts++
- }
- pcm.stats.GetNoEnt++
- }
- return nil, false
-}
-
-func (pcm *JPCMap2) Put(k1, k2, v *PredictionContext) (*PredictionContext, bool) {
- if collectStats {
- pcm.stats.Puts++
- }
- h := dHash(k1, k2)
- var hClash bool
- for _, e := range pcm.store[h] {
- hClash = true
- if e.k1.Equals(k1) && e.k2.Equals(k2) {
- if collectStats {
- pcm.stats.PutHits++
- pcm.stats.PutHashConflicts++
- }
- return e.v, true
- }
- if collectStats {
- pcm.stats.PutMisses++
- }
- }
- if collectStats {
- if hClash {
- pcm.stats.PutHashConflicts++
- }
- }
- pcm.store[h] = append(pcm.store[h], JPCEntry{k1, k2, v})
- pcm.size++
- if collectStats {
- pcm.stats.CurSize = pcm.size
- if pcm.size > pcm.stats.MaxSize {
- pcm.stats.MaxSize = pcm.size
- }
- }
- return nil, false
-}
-
-type VisitEntry struct {
- k *PredictionContext
- v *PredictionContext
-}
-type VisitRecord struct {
- store map[*PredictionContext]*PredictionContext
- len int
- stats *JStatRec
-}
-
-type VisitList struct {
- cache *list.List
- lock sync.RWMutex
-}
-
-var visitListPool = VisitList{
- cache: list.New(),
- lock: sync.RWMutex{},
-}
-
-// NewVisitRecord returns a new VisitRecord instance from the pool if available.
-// Note that this "map" uses a pointer as a key because we are emulating the behavior of
-// IdentityHashMap in Java, which uses the `==` operator to compare whether the keys are equal,
-// which means is the key the same reference to an object rather than is it .equals() to another
-// object.
-func NewVisitRecord() *VisitRecord {
- visitListPool.lock.Lock()
- el := visitListPool.cache.Front()
- defer visitListPool.lock.Unlock()
- var vr *VisitRecord
- if el == nil {
- vr = &VisitRecord{
- store: make(map[*PredictionContext]*PredictionContext),
- }
- if collectStats {
- vr.stats = &JStatRec{
- Source: PredictionContextCacheCollection,
- Description: "VisitRecord",
- }
- // Track where we created it from if we are being asked to do so
- if runtimeConfig.statsTraceStacks {
- vr.stats.CreateStack = debug.Stack()
- }
- }
- } else {
- vr = el.Value.(*VisitRecord)
- visitListPool.cache.Remove(el)
- vr.store = make(map[*PredictionContext]*PredictionContext)
- }
- if collectStats {
- Statistics.AddJStatRec(vr.stats)
- }
- return vr
-}
-
-func (vr *VisitRecord) Release() {
- vr.len = 0
- vr.store = nil
- if collectStats {
- vr.stats.MaxSize = 0
- vr.stats.CurSize = 0
- vr.stats.Gets = 0
- vr.stats.GetHits = 0
- vr.stats.GetMisses = 0
- vr.stats.GetHashConflicts = 0
- vr.stats.GetNoEnt = 0
- vr.stats.Puts = 0
- vr.stats.PutHits = 0
- vr.stats.PutMisses = 0
- vr.stats.PutHashConflicts = 0
- vr.stats.MaxSlotSize = 0
- }
- visitListPool.lock.Lock()
- visitListPool.cache.PushBack(vr)
- visitListPool.lock.Unlock()
-}
-
-func (vr *VisitRecord) Get(k *PredictionContext) (*PredictionContext, bool) {
- if collectStats {
- vr.stats.Gets++
- }
- v := vr.store[k]
- if v != nil {
- if collectStats {
- vr.stats.GetHits++
- }
- return v, true
- }
- if collectStats {
- vr.stats.GetNoEnt++
- }
- return nil, false
-}
-
-func (vr *VisitRecord) Put(k, v *PredictionContext) (*PredictionContext, bool) {
- if collectStats {
- vr.stats.Puts++
- }
- vr.store[k] = v
- vr.len++
- if collectStats {
- vr.stats.CurSize = vr.len
- if vr.len > vr.stats.MaxSize {
- vr.stats.MaxSize = vr.len
- }
- }
- return v, false
-}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go b/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go
deleted file mode 100644
index 923c7b52c..000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go
+++ /dev/null
@@ -1,47 +0,0 @@
-//go:build !antlr.stats
-
-package antlr
-
-// This file is compiled when the build configuration antlr.stats is not enabled.
-// which then allows the compiler to optimize out all the code that is not used.
-const collectStats = false
-
-// goRunStats is a dummy struct used when build configuration antlr.stats is not enabled.
-type goRunStats struct {
-}
-
-var Statistics = &goRunStats{}
-
-func (s *goRunStats) AddJStatRec(_ *JStatRec) {
- // Do nothing - compiler will optimize this out (hopefully)
-}
-
-func (s *goRunStats) CollectionAnomalies() {
- // Do nothing - compiler will optimize this out (hopefully)
-}
-
-func (s *goRunStats) Reset() {
- // Do nothing - compiler will optimize this out (hopefully)
-}
-
-func (s *goRunStats) Report(dir string, prefix string) error {
- // Do nothing - compiler will optimize this out (hopefully)
- return nil
-}
-
-func (s *goRunStats) Analyze() {
- // Do nothing - compiler will optimize this out (hopefully)
-}
-
-type statsOption func(*goRunStats) error
-
-func (s *goRunStats) Configure(options ...statsOption) error {
- // Do nothing - compiler will optimize this out (hopefully)
- return nil
-}
-
-func WithTopN(topN int) statsOption {
- return func(s *goRunStats) error {
- return nil
- }
-}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go
deleted file mode 100644
index c1b80cc1f..000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go
+++ /dev/null
@@ -1,727 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
- "golang.org/x/exp/slices"
- "strconv"
-)
-
-var _emptyPredictionContextHash int
-
-func init() {
- _emptyPredictionContextHash = murmurInit(1)
- _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0)
-}
-
-func calculateEmptyHash() int {
- return _emptyPredictionContextHash
-}
-
-const (
- // BasePredictionContextEmptyReturnState represents {@code $} in an array in full context mode, $
- // doesn't mean wildcard:
- //
- // $ + x = [$,x]
- //
- // Here,
- //
- // $ = EmptyReturnState
- BasePredictionContextEmptyReturnState = 0x7FFFFFFF
-)
-
-// TODO: JI These are meant to be atomics - this does not seem to match the Java runtime here
-//
-//goland:noinspection GoUnusedGlobalVariable
-var (
- BasePredictionContextglobalNodeCount = 1
- BasePredictionContextid = BasePredictionContextglobalNodeCount
-)
-
-const (
- PredictionContextEmpty = iota
- PredictionContextSingleton
- PredictionContextArray
-)
-
-// PredictionContext is a go idiomatic implementation of PredictionContext that does not rty to
-// emulate inheritance from Java, and can be used without an interface definition. An interface
-// is not required because no user code will ever need to implement this interface.
-type PredictionContext struct {
- cachedHash int
- pcType int
- parentCtx *PredictionContext
- returnState int
- parents []*PredictionContext
- returnStates []int
-}
-
-func NewEmptyPredictionContext() *PredictionContext {
- nep := &PredictionContext{}
- nep.cachedHash = calculateEmptyHash()
- nep.pcType = PredictionContextEmpty
- nep.returnState = BasePredictionContextEmptyReturnState
- return nep
-}
-
-func NewBaseSingletonPredictionContext(parent *PredictionContext, returnState int) *PredictionContext {
- pc := &PredictionContext{}
- pc.pcType = PredictionContextSingleton
- pc.returnState = returnState
- pc.parentCtx = parent
- if parent != nil {
- pc.cachedHash = calculateHash(parent, returnState)
- } else {
- pc.cachedHash = calculateEmptyHash()
- }
- return pc
-}
-
-func SingletonBasePredictionContextCreate(parent *PredictionContext, returnState int) *PredictionContext {
- if returnState == BasePredictionContextEmptyReturnState && parent == nil {
- // someone can pass in the bits of an array ctx that mean $
- return BasePredictionContextEMPTY
- }
- return NewBaseSingletonPredictionContext(parent, returnState)
-}
-
-func NewArrayPredictionContext(parents []*PredictionContext, returnStates []int) *PredictionContext {
- // Parent can be nil only if full ctx mode and we make an array
- // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using
- // nil parent and
- // returnState == {@link //EmptyReturnState}.
- hash := murmurInit(1)
- for _, parent := range parents {
- hash = murmurUpdate(hash, parent.Hash())
- }
- for _, returnState := range returnStates {
- hash = murmurUpdate(hash, returnState)
- }
- hash = murmurFinish(hash, len(parents)<<1)
-
- nec := &PredictionContext{}
- nec.cachedHash = hash
- nec.pcType = PredictionContextArray
- nec.parents = parents
- nec.returnStates = returnStates
- return nec
-}
-
-func (p *PredictionContext) Hash() int {
- return p.cachedHash
-}
-
-func (p *PredictionContext) Equals(other Collectable[*PredictionContext]) bool {
- switch p.pcType {
- case PredictionContextEmpty:
- otherP := other.(*PredictionContext)
- return other == nil || otherP == nil || otherP.isEmpty()
- case PredictionContextSingleton:
- return p.SingletonEquals(other)
- case PredictionContextArray:
- return p.ArrayEquals(other)
- }
- return false
-}
-
-func (p *PredictionContext) ArrayEquals(o Collectable[*PredictionContext]) bool {
- if o == nil {
- return false
- }
- other := o.(*PredictionContext)
- if other == nil || other.pcType != PredictionContextArray {
- return false
- }
- if p.cachedHash != other.Hash() {
- return false // can't be same if hash is different
- }
-
- // Must compare the actual array elements and not just the array address
- //
- return slices.Equal(p.returnStates, other.returnStates) &&
- slices.EqualFunc(p.parents, other.parents, func(x, y *PredictionContext) bool {
- return x.Equals(y)
- })
-}
-
-func (p *PredictionContext) SingletonEquals(other Collectable[*PredictionContext]) bool {
- if other == nil {
- return false
- }
- otherP := other.(*PredictionContext)
- if otherP == nil {
- return false
- }
-
- if p.cachedHash != otherP.Hash() {
- return false // Can't be same if hash is different
- }
-
- if p.returnState != otherP.getReturnState(0) {
- return false
- }
-
- // Both parents must be nil if one is
- if p.parentCtx == nil {
- return otherP.parentCtx == nil
- }
-
- return p.parentCtx.Equals(otherP.parentCtx)
-}
-
-func (p *PredictionContext) GetParent(i int) *PredictionContext {
- switch p.pcType {
- case PredictionContextEmpty:
- return nil
- case PredictionContextSingleton:
- return p.parentCtx
- case PredictionContextArray:
- return p.parents[i]
- }
- return nil
-}
-
-func (p *PredictionContext) getReturnState(i int) int {
- switch p.pcType {
- case PredictionContextArray:
- return p.returnStates[i]
- default:
- return p.returnState
- }
-}
-
-func (p *PredictionContext) GetReturnStates() []int {
- switch p.pcType {
- case PredictionContextArray:
- return p.returnStates
- default:
- return []int{p.returnState}
- }
-}
-
-func (p *PredictionContext) length() int {
- switch p.pcType {
- case PredictionContextArray:
- return len(p.returnStates)
- default:
- return 1
- }
-}
-
-func (p *PredictionContext) hasEmptyPath() bool {
- switch p.pcType {
- case PredictionContextSingleton:
- return p.returnState == BasePredictionContextEmptyReturnState
- }
- return p.getReturnState(p.length()-1) == BasePredictionContextEmptyReturnState
-}
-
-func (p *PredictionContext) String() string {
- switch p.pcType {
- case PredictionContextEmpty:
- return "$"
- case PredictionContextSingleton:
- var up string
-
- if p.parentCtx == nil {
- up = ""
- } else {
- up = p.parentCtx.String()
- }
-
- if len(up) == 0 {
- if p.returnState == BasePredictionContextEmptyReturnState {
- return "$"
- }
-
- return strconv.Itoa(p.returnState)
- }
-
- return strconv.Itoa(p.returnState) + " " + up
- case PredictionContextArray:
- if p.isEmpty() {
- return "[]"
- }
-
- s := "["
- for i := 0; i < len(p.returnStates); i++ {
- if i > 0 {
- s = s + ", "
- }
- if p.returnStates[i] == BasePredictionContextEmptyReturnState {
- s = s + "$"
- continue
- }
- s = s + strconv.Itoa(p.returnStates[i])
- if !p.parents[i].isEmpty() {
- s = s + " " + p.parents[i].String()
- } else {
- s = s + "nil"
- }
- }
- return s + "]"
-
- default:
- return "unknown"
- }
-}
-
-func (p *PredictionContext) isEmpty() bool {
- switch p.pcType {
- case PredictionContextEmpty:
- return true
- case PredictionContextArray:
- // since EmptyReturnState can only appear in the last position, we
- // don't need to verify that size==1
- return p.returnStates[0] == BasePredictionContextEmptyReturnState
- default:
- return false
- }
-}
-
-func (p *PredictionContext) Type() int {
- return p.pcType
-}
-
-func calculateHash(parent *PredictionContext, returnState int) int {
- h := murmurInit(1)
- h = murmurUpdate(h, parent.Hash())
- h = murmurUpdate(h, returnState)
- return murmurFinish(h, 2)
-}
-
-// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph.
-// Return {@link //EMPTY} if {@code outerContext} is empty or nil.
-// /
-func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) *PredictionContext {
- if outerContext == nil {
- outerContext = ParserRuleContextEmpty
- }
- // if we are in RuleContext of start rule, s, then BasePredictionContext
- // is EMPTY. Nobody called us. (if we are empty, return empty)
- if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty {
- return BasePredictionContextEMPTY
- }
- // If we have a parent, convert it to a BasePredictionContext graph
- parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext))
- state := a.states[outerContext.GetInvokingState()]
- transition := state.GetTransitions()[0]
-
- return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber())
-}
-
-func merge(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext {
-
- // Share same graph if both same
- //
- if a == b || a.Equals(b) {
- return a
- }
-
- if a.pcType == PredictionContextSingleton && b.pcType == PredictionContextSingleton {
- return mergeSingletons(a, b, rootIsWildcard, mergeCache)
- }
- // At least one of a or b is array
- // If one is $ and rootIsWildcard, return $ as wildcard
- if rootIsWildcard {
- if a.isEmpty() {
- return a
- }
- if b.isEmpty() {
- return b
- }
- }
-
- // Convert either Singleton or Empty to arrays, so that we can merge them
- //
- ara := convertToArray(a)
- arb := convertToArray(b)
- return mergeArrays(ara, arb, rootIsWildcard, mergeCache)
-}
-
-func convertToArray(pc *PredictionContext) *PredictionContext {
- switch pc.Type() {
- case PredictionContextEmpty:
- return NewArrayPredictionContext([]*PredictionContext{}, []int{})
- case PredictionContextSingleton:
- return NewArrayPredictionContext([]*PredictionContext{pc.GetParent(0)}, []int{pc.getReturnState(0)})
- default:
- // Already an array
- }
- return pc
-}
-
-// mergeSingletons merges two Singleton [PredictionContext] instances.
-//
-// Stack tops equal, parents merge is same return left graph.
-//
-//
-// Same stack top, parents differ merge parents giving array node, then
-// remainders of those graphs. A new root node is created to point to the
-// merged parents.
-//
-//
-// Different stack tops pointing to same parent. Make array node for the
-// root where both element in the root point to the same (original)
-// parent.
-//
-//
-// Different stack tops pointing to different parents. Make array node for
-// the root where each element points to the corresponding original
-// parent.
-//
-//
-// @param a the first {@link SingletonBasePredictionContext}
-// @param b the second {@link SingletonBasePredictionContext}
-// @param rootIsWildcard {@code true} if this is a local-context merge,
-// otherwise false to indicate a full-context merge
-// @param mergeCache
-// /
-func mergeSingletons(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext {
- if mergeCache != nil {
- previous, present := mergeCache.Get(a, b)
- if present {
- return previous
- }
- previous, present = mergeCache.Get(b, a)
- if present {
- return previous
- }
- }
-
- rootMerge := mergeRoot(a, b, rootIsWildcard)
- if rootMerge != nil {
- if mergeCache != nil {
- mergeCache.Put(a, b, rootMerge)
- }
- return rootMerge
- }
- if a.returnState == b.returnState {
- parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
- // if parent is same as existing a or b parent or reduced to a parent,
- // return it
- if parent.Equals(a.parentCtx) {
- return a // ax + bx = ax, if a=b
- }
- if parent.Equals(b.parentCtx) {
- return b // ax + bx = bx, if a=b
- }
- // else: ax + ay = a'[x,y]
- // merge parents x and y, giving array node with x,y then remainders
- // of those graphs. dup a, a' points at merged array.
- // New joined parent so create a new singleton pointing to it, a'
- spc := SingletonBasePredictionContextCreate(parent, a.returnState)
- if mergeCache != nil {
- mergeCache.Put(a, b, spc)
- }
- return spc
- }
- // a != b payloads differ
- // see if we can collapse parents due to $+x parents if local ctx
- var singleParent *PredictionContext
- if a.Equals(b) || (a.parentCtx != nil && a.parentCtx.Equals(b.parentCtx)) { // ax +
- // bx =
- // [a,b]x
- singleParent = a.parentCtx
- }
- if singleParent != nil { // parents are same
- // sort payloads and use same parent
- payloads := []int{a.returnState, b.returnState}
- if a.returnState > b.returnState {
- payloads[0] = b.returnState
- payloads[1] = a.returnState
- }
- parents := []*PredictionContext{singleParent, singleParent}
- apc := NewArrayPredictionContext(parents, payloads)
- if mergeCache != nil {
- mergeCache.Put(a, b, apc)
- }
- return apc
- }
- // parents differ and can't merge them. Just pack together
- // into array can't merge.
- // ax + by = [ax,by]
- payloads := []int{a.returnState, b.returnState}
- parents := []*PredictionContext{a.parentCtx, b.parentCtx}
- if a.returnState > b.returnState { // sort by payload
- payloads[0] = b.returnState
- payloads[1] = a.returnState
- parents = []*PredictionContext{b.parentCtx, a.parentCtx}
- }
- apc := NewArrayPredictionContext(parents, payloads)
- if mergeCache != nil {
- mergeCache.Put(a, b, apc)
- }
- return apc
-}
-
-// Handle case where at least one of {@code a} or {@code b} is
-// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used
-// to represent {@link //EMPTY}.
-//
-// Local-Context Merges
-//
-// These local-context merge operations are used when {@code rootIsWildcard}
-// is true.
-//
-// {@link //EMPTY} is superset of any graph return {@link //EMPTY}.
-//
-//
-// {@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
-// {@code //EMPTY} return left graph.
-//
-//
-// Special case of last merge if local context.
-//
-//
-// Full-Context Merges
-//
-// These full-context merge operations are used when {@code rootIsWildcard}
-// is false.
-//
-//
-//
-// Must keep all contexts {@link //EMPTY} in array is a special value (and
-// nil parent).
-//
-//
-//
-//
-// @param a the first {@link SingletonBasePredictionContext}
-// @param b the second {@link SingletonBasePredictionContext}
-// @param rootIsWildcard {@code true} if this is a local-context merge,
-// otherwise false to indicate a full-context merge
-// /
-func mergeRoot(a, b *PredictionContext, rootIsWildcard bool) *PredictionContext {
- if rootIsWildcard {
- if a.pcType == PredictionContextEmpty {
- return BasePredictionContextEMPTY // // + b =//
- }
- if b.pcType == PredictionContextEmpty {
- return BasePredictionContextEMPTY // a +// =//
- }
- } else {
- if a.isEmpty() && b.isEmpty() {
- return BasePredictionContextEMPTY // $ + $ = $
- } else if a.isEmpty() { // $ + x = [$,x]
- payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState}
- parents := []*PredictionContext{b.GetParent(-1), nil}
- return NewArrayPredictionContext(parents, payloads)
- } else if b.isEmpty() { // x + $ = [$,x] ($ is always first if present)
- payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState}
- parents := []*PredictionContext{a.GetParent(-1), nil}
- return NewArrayPredictionContext(parents, payloads)
- }
- }
- return nil
-}
-
-// Merge two {@link ArrayBasePredictionContext} instances.
-//
-// Different tops, different parents.
-//
-//
-// Shared top, same parents.
-//
-//
-// Shared top, different parents.
-//
-//
-// Shared top, all shared parents.
-//
-//
-// Equal tops, merge parents and reduce top to
-// {@link SingletonBasePredictionContext}.
-//
-//
-//goland:noinspection GoBoolExpressions
-func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext {
- if mergeCache != nil {
- previous, present := mergeCache.Get(a, b)
- if present {
- if runtimeConfig.parserATNSimulatorTraceATNSim {
- fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
- }
- return previous
- }
- previous, present = mergeCache.Get(b, a)
- if present {
- if runtimeConfig.parserATNSimulatorTraceATNSim {
- fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
- }
- return previous
- }
- }
- // merge sorted payloads a + b => M
- i := 0 // walks a
- j := 0 // walks b
- k := 0 // walks target M array
-
- mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates))
- mergedParents := make([]*PredictionContext, len(a.returnStates)+len(b.returnStates))
- // walk and merge to yield mergedParents, mergedReturnStates
- for i < len(a.returnStates) && j < len(b.returnStates) {
- aParent := a.parents[i]
- bParent := b.parents[j]
- if a.returnStates[i] == b.returnStates[j] {
- // same payload (stack tops are equal), must yield merged singleton
- payload := a.returnStates[i]
- // $+$ = $
- bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil
- axAX := aParent != nil && bParent != nil && aParent.Equals(bParent) // ax+ax
- // ->
- // ax
- if bothDollars || axAX {
- mergedParents[k] = aParent // choose left
- mergedReturnStates[k] = payload
- } else { // ax+ay -> a'[x,y]
- mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache)
- mergedParents[k] = mergedParent
- mergedReturnStates[k] = payload
- }
- i++ // hop over left one as usual
- j++ // but also Skip one in right side since we merge
- } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M
- mergedParents[k] = aParent
- mergedReturnStates[k] = a.returnStates[i]
- i++
- } else { // b > a, copy b[j] to M
- mergedParents[k] = bParent
- mergedReturnStates[k] = b.returnStates[j]
- j++
- }
- k++
- }
- // copy over any payloads remaining in either array
- if i < len(a.returnStates) {
- for p := i; p < len(a.returnStates); p++ {
- mergedParents[k] = a.parents[p]
- mergedReturnStates[k] = a.returnStates[p]
- k++
- }
- } else {
- for p := j; p < len(b.returnStates); p++ {
- mergedParents[k] = b.parents[p]
- mergedReturnStates[k] = b.returnStates[p]
- k++
- }
- }
- // trim merged if we combined a few that had same stack tops
- if k < len(mergedParents) { // write index < last position trim
- if k == 1 { // for just one merged element, return singleton top
- pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0])
- if mergeCache != nil {
- mergeCache.Put(a, b, pc)
- }
- return pc
- }
- mergedParents = mergedParents[0:k]
- mergedReturnStates = mergedReturnStates[0:k]
- }
-
- M := NewArrayPredictionContext(mergedParents, mergedReturnStates)
-
- // if we created same array as a or b, return that instead
- // TODO: JI track whether this is possible above during merge sort for speed and possibly avoid an allocation
- if M.Equals(a) {
- if mergeCache != nil {
- mergeCache.Put(a, b, a)
- }
- if runtimeConfig.parserATNSimulatorTraceATNSim {
- fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a")
- }
- return a
- }
- if M.Equals(b) {
- if mergeCache != nil {
- mergeCache.Put(a, b, b)
- }
- if runtimeConfig.parserATNSimulatorTraceATNSim {
- fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b")
- }
- return b
- }
- combineCommonParents(&mergedParents)
-
- if mergeCache != nil {
- mergeCache.Put(a, b, M)
- }
- if runtimeConfig.parserATNSimulatorTraceATNSim {
- fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String())
- }
- return M
-}
-
-// Make pass over all M parents and merge any Equals() ones.
-// Note that we pass a pointer to the slice as we want to modify it in place.
-//
-//goland:noinspection GoUnusedFunction
-func combineCommonParents(parents *[]*PredictionContext) {
- uniqueParents := NewJStore[*PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCollection, "combineCommonParents for PredictionContext")
-
- for p := 0; p < len(*parents); p++ {
- parent := (*parents)[p]
- _, _ = uniqueParents.Put(parent)
- }
- for q := 0; q < len(*parents); q++ {
- pc, _ := uniqueParents.Get((*parents)[q])
- (*parents)[q] = pc
- }
-}
-
-func getCachedBasePredictionContext(context *PredictionContext, contextCache *PredictionContextCache, visited *VisitRecord) *PredictionContext {
- if context.isEmpty() {
- return context
- }
- existing, present := visited.Get(context)
- if present {
- return existing
- }
-
- existing, present = contextCache.Get(context)
- if present {
- visited.Put(context, existing)
- return existing
- }
- changed := false
- parents := make([]*PredictionContext, context.length())
- for i := 0; i < len(parents); i++ {
- parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited)
- if changed || !parent.Equals(context.GetParent(i)) {
- if !changed {
- parents = make([]*PredictionContext, context.length())
- for j := 0; j < context.length(); j++ {
- parents[j] = context.GetParent(j)
- }
- changed = true
- }
- parents[i] = parent
- }
- }
- if !changed {
- contextCache.add(context)
- visited.Put(context, context)
- return context
- }
- var updated *PredictionContext
- if len(parents) == 0 {
- updated = BasePredictionContextEMPTY
- } else if len(parents) == 1 {
- updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0))
- } else {
- updated = NewArrayPredictionContext(parents, context.GetReturnStates())
- }
- contextCache.add(updated)
- visited.Put(updated, updated)
- visited.Put(context, updated)
-
- return updated
-}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go
deleted file mode 100644
index 25dfb11e8..000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package antlr
-
-var BasePredictionContextEMPTY = &PredictionContext{
- cachedHash: calculateEmptyHash(),
- pcType: PredictionContextEmpty,
- returnState: BasePredictionContextEmptyReturnState,
-}
-
-// PredictionContextCache is Used to cache [PredictionContext] objects. It is used for the shared
-// context cash associated with contexts in DFA states. This cache
-// can be used for both lexers and parsers.
-type PredictionContextCache struct {
- cache *JMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]]
-}
-
-func NewPredictionContextCache() *PredictionContextCache {
- return &PredictionContextCache{
- cache: NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "NewPredictionContextCache()"),
- }
-}
-
-// Add a context to the cache and return it. If the context already exists,
-// return that one instead and do not add a new context to the cache.
-// Protect shared cache from unsafe thread access.
-func (p *PredictionContextCache) add(ctx *PredictionContext) *PredictionContext {
- if ctx.isEmpty() {
- return BasePredictionContextEMPTY
- }
-
- // Put will return the existing entry if it is present (note this is done via Equals, not whether it is
- // the same pointer), otherwise it will add the new entry and return that.
- //
- existing, present := p.cache.Get(ctx)
- if present {
- return existing
- }
- p.cache.Put(ctx, ctx)
- return ctx
-}
-
-func (p *PredictionContextCache) Get(ctx *PredictionContext) (*PredictionContext, bool) {
- pc, exists := p.cache.Get(ctx)
- return pc, exists
-}
-
-func (p *PredictionContextCache) length() int {
- return p.cache.Len()
-}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go
deleted file mode 100644
index 3f85a6a52..000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go
+++ /dev/null
@@ -1,536 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-// This enumeration defines the prediction modes available in ANTLR 4 along with
-// utility methods for analyzing configuration sets for conflicts and/or
-// ambiguities.
-
-const (
- // PredictionModeSLL represents the SLL(*) prediction mode.
- // This prediction mode ignores the current
- // parser context when making predictions. This is the fastest prediction
- // mode, and provides correct results for many grammars. This prediction
- // mode is more powerful than the prediction mode provided by ANTLR 3, but
- // may result in syntax errors for grammar and input combinations which are
- // not SLL.
- //
- // When using this prediction mode, the parser will either return a correct
- // parse tree (i.e. the same parse tree that would be returned with the
- // [PredictionModeLL] prediction mode), or it will Report a syntax error. If a
- // syntax error is encountered when using the SLL prediction mode,
- // it may be due to either an actual syntax error in the input or indicate
- // that the particular combination of grammar and input requires the more
- // powerful LL prediction abilities to complete successfully.
- //
- // This prediction mode does not provide any guarantees for prediction
- // behavior for syntactically-incorrect inputs.
- //
- PredictionModeSLL = 0
-
- // PredictionModeLL represents the LL(*) prediction mode.
- // This prediction mode allows the current parser
- // context to be used for resolving SLL conflicts that occur during
- // prediction. This is the fastest prediction mode that guarantees correct
- // parse results for all combinations of grammars with syntactically correct
- // inputs.
- //
- // When using this prediction mode, the parser will make correct decisions
- // for all syntactically-correct grammar and input combinations. However, in
- // cases where the grammar is truly ambiguous this prediction mode might not
- // report a precise answer for exactly which alternatives are
- // ambiguous.
- //
- // This prediction mode does not provide any guarantees for prediction
- // behavior for syntactically-incorrect inputs.
- //
- PredictionModeLL = 1
-
- // PredictionModeLLExactAmbigDetection represents the LL(*) prediction mode
- // with exact ambiguity detection.
- //
- // In addition to the correctness guarantees provided by the [PredictionModeLL] prediction mode,
- // this prediction mode instructs the prediction algorithm to determine the
- // complete and exact set of ambiguous alternatives for every ambiguous
- // decision encountered while parsing.
- //
- // This prediction mode may be used for diagnosing ambiguities during
- // grammar development. Due to the performance overhead of calculating sets
- // of ambiguous alternatives, this prediction mode should be avoided when
- // the exact results are not necessary.
- //
- // This prediction mode does not provide any guarantees for prediction
- // behavior for syntactically-incorrect inputs.
- //
- PredictionModeLLExactAmbigDetection = 2
-)
-
-// PredictionModehasSLLConflictTerminatingPrediction computes the SLL prediction termination condition.
-//
-// This method computes the SLL prediction termination condition for both of
-// the following cases:
-//
-// - The usual SLL+LL fallback upon SLL conflict
-// - Pure SLL without LL fallback
-//
-// # Combined SLL+LL Parsing
-//
-// When LL-fallback is enabled upon SLL conflict, correct predictions are
-// ensured regardless of how the termination condition is computed by this
-// method. Due to the substantially higher cost of LL prediction, the
-// prediction should only fall back to LL when the additional lookahead
-// cannot lead to a unique SLL prediction.
-//
-// Assuming combined SLL+LL parsing, an SLL configuration set with only
-// conflicting subsets should fall back to full LL, even if the
-// configuration sets don't resolve to the same alternative, e.g.
-//
-// {1,2} and {3,4}
-//
-// If there is at least one non-conflicting
-// configuration, SLL could continue with the hopes that more lookahead will
-// resolve via one of those non-conflicting configurations.
-//
-// Here's the prediction termination rule them: SLL (for SLL+LL parsing)
-// stops when it sees only conflicting configuration subsets. In contrast,
-// full LL keeps going when there is uncertainty.
-//
-// # Heuristic
-//
-// As a heuristic, we stop prediction when we see any conflicting subset
-// unless we see a state that only has one alternative associated with it.
-// The single-alt-state thing lets prediction continue upon rules like
-// (otherwise, it would admit defeat too soon):
-//
-// [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ;
-//
-// When the [ATN] simulation reaches the state before ';', it has a
-// [DFA] state that looks like:
-//
-// [12|1|[], 6|2|[], 12|2|[]]
-//
-// Naturally
-//
-// 12|1|[] and 12|2|[]
-//
-// conflict, but we cannot stop processing this node because alternative to has another way to continue,
-// via
-//
-// [6|2|[]]
-//
-// It also let's us continue for this rule:
-//
-// [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ;
-//
-// After Matching input A, we reach the stop state for rule A, state 1.
-// State 8 is the state immediately before B. Clearly alternatives 1 and 2
-// conflict and no amount of further lookahead will separate the two.
-// However, alternative 3 will be able to continue, and so we do not stop
-// working on this state. In the previous example, we're concerned with
-// states associated with the conflicting alternatives. Here alt 3 is not
-// associated with the conflicting configs, but since we can continue
-// looking for input reasonably, don't declare the state done.
-//
-// # Pure SLL Parsing
-//
-// To handle pure SLL parsing, all we have to do is make sure that we
-// combine stack contexts for configurations that differ only by semantic
-// predicate. From there, we can do the usual SLL termination heuristic.
-//
-// # Predicates in SLL+LL Parsing
-//
-// SLL decisions don't evaluate predicates until after they reach [DFA] stop
-// states because they need to create the [DFA] cache that works in all
-// semantic situations. In contrast, full LL evaluates predicates collected
-// during start state computation, so it can ignore predicates thereafter.
-// This means that SLL termination detection can totally ignore semantic
-// predicates.
-//
-// Implementation-wise, [ATNConfigSet] combines stack contexts but not
-// semantic predicate contexts, so we might see two configurations like the
-// following:
-//
-// (s, 1, x, {}), (s, 1, x', {p})
-//
-// Before testing these configurations against others, we have to merge
-// x and x' (without modifying the existing configurations).
-// For example, we test (x+x')==x” when looking for conflicts in
-// the following configurations:
-//
-// (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})
-//
-// If the configuration set has predicates (as indicated by
-// [ATNConfigSet.hasSemanticContext]), this algorithm makes a copy of
-// the configurations to strip out all the predicates so that a standard
-// [ATNConfigSet] will merge everything ignoring predicates.
-func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs *ATNConfigSet) bool {
-
- // Configs in rule stop states indicate reaching the end of the decision
- // rule (local context) or end of start rule (full context). If all
- // configs meet this condition, then none of the configurations is able
- // to Match additional input, so we terminate prediction.
- //
- if PredictionModeallConfigsInRuleStopStates(configs) {
- return true
- }
-
- // pure SLL mode parsing
- if mode == PredictionModeSLL {
- // Don't bother with combining configs from different semantic
- // contexts if we can fail over to full LL costs more time
- // since we'll often fail over anyway.
- if configs.hasSemanticContext {
- // dup configs, tossing out semantic predicates
- dup := NewATNConfigSet(false)
- for _, c := range configs.configs {
-
- // NewATNConfig({semanticContext:}, c)
- c = NewATNConfig2(c, SemanticContextNone)
- dup.Add(c, nil)
- }
- configs = dup
- }
- // now we have combined contexts for configs with dissimilar predicates
- }
- // pure SLL or combined SLL+LL mode parsing
- altsets := PredictionModegetConflictingAltSubsets(configs)
- return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs)
-}
-
-// PredictionModehasConfigInRuleStopState checks if any configuration in the given configs is in a
-// [RuleStopState]. Configurations meeting this condition have reached
-// the end of the decision rule (local context) or end of start rule (full
-// context).
-//
-// The func returns true if any configuration in the supplied configs is in a [RuleStopState]
-func PredictionModehasConfigInRuleStopState(configs *ATNConfigSet) bool {
- for _, c := range configs.configs {
- if _, ok := c.GetState().(*RuleStopState); ok {
- return true
- }
- }
- return false
-}
-
-// PredictionModeallConfigsInRuleStopStates checks if all configurations in configs are in a
-// [RuleStopState]. Configurations meeting this condition have reached
-// the end of the decision rule (local context) or end of start rule (full
-// context).
-//
-// the func returns true if all configurations in configs are in a
-// [RuleStopState]
-func PredictionModeallConfigsInRuleStopStates(configs *ATNConfigSet) bool {
-
- for _, c := range configs.configs {
- if _, ok := c.GetState().(*RuleStopState); !ok {
- return false
- }
- }
- return true
-}
-
-// PredictionModeresolvesToJustOneViableAlt checks full LL prediction termination.
-//
-// Can we stop looking ahead during [ATN] simulation or is there some
-// uncertainty as to which alternative we will ultimately pick, after
-// consuming more input? Even if there are partial conflicts, we might know
-// that everything is going to resolve to the same minimum alternative. That
-// means we can stop since no more lookahead will change that fact. On the
-// other hand, there might be multiple conflicts that resolve to different
-// minimums. That means we need more look ahead to decide which of those
-// alternatives we should predict.
-//
-// The basic idea is to split the set of configurations 'C', into
-// conflicting subsets (s, _, ctx, _) and singleton subsets with
-// non-conflicting configurations. Two configurations conflict if they have
-// identical [ATNConfig].state and [ATNConfig].context values
-// but a different [ATNConfig].alt value, e.g.
-//
-// (s, i, ctx, _)
-//
-// and
-//
-// (s, j, ctx, _) ; for i != j
-//
-// Reduce these configuration subsets to the set of possible alternatives.
-// You can compute the alternative subsets in one pass as follows:
-//
-// A_s,ctx = {i | (s, i, ctx, _)}
-//
-// for each configuration in C holding s and ctx fixed.
-//
-// Or in pseudo-code:
-//
-// for each configuration c in C:
-// map[c] U = c.ATNConfig.alt alt // map hash/equals uses s and x, not alt and not pred
-//
-// The values in map are the set of
-//
-// A_s,ctx
-//
-// sets.
-//
-// If
-//
-// |A_s,ctx| = 1
-//
-// then there is no conflict associated with s and ctx.
-//
-// Reduce the subsets to singletons by choosing a minimum of each subset. If
-// the union of these alternative subsets is a singleton, then no amount of
-// further lookahead will help us. We will always pick that alternative. If,
-// however, there is more than one alternative, then we are uncertain which
-// alternative to predict and must continue looking for resolution. We may
-// or may not discover an ambiguity in the future, even if there are no
-// conflicting subsets this round.
-//
-// The biggest sin is to terminate early because it means we've made a
-// decision but were uncertain as to the eventual outcome. We haven't used
-// enough lookahead. On the other hand, announcing a conflict too late is no
-// big deal; you will still have the conflict. It's just inefficient. It
-// might even look until the end of file.
-//
-// No special consideration for semantic predicates is required because
-// predicates are evaluated on-the-fly for full LL prediction, ensuring that
-// no configuration contains a semantic context during the termination
-// check.
-//
-// # Conflicting Configs
-//
-// Two configurations:
-//
-// (s, i, x) and (s, j, x')
-//
-// conflict when i != j but x = x'. Because we merge all
-// (s, i, _) configurations together, that means that there are at
-// most n configurations associated with state s for
-// n possible alternatives in the decision. The merged stacks
-// complicate the comparison of configuration contexts x and x'.
-//
-// Sam checks to see if one is a subset of the other by calling
-// merge and checking to see if the merged result is either x or x'.
-// If the x associated with lowest alternative i
-// is the superset, then i is the only possible prediction since the
-// others resolve to min(i) as well. However, if x is
-// associated with j > i then at least one stack configuration for
-// j is not in conflict with alternative i. The algorithm
-// should keep going, looking for more lookahead due to the uncertainty.
-//
-// For simplicity, I'm doing an equality check between x and
-// x', which lets the algorithm continue to consume lookahead longer
-// than necessary. The reason I like the equality is of course the
-// simplicity but also because that is the test you need to detect the
-// alternatives that are actually in conflict.
-//
-// # Continue/Stop Rule
-//
-// Continue if the union of resolved alternative sets from non-conflicting and
-// conflicting alternative subsets has more than one alternative. We are
-// uncertain about which alternative to predict.
-//
-// The complete set of alternatives,
-//
-// [i for (_, i, _)]
-//
-// tells us which alternatives are still in the running for the amount of input we've
-// consumed at this point. The conflicting sets let us to strip away
-// configurations that won't lead to more states because we resolve
-// conflicts to the configuration with a minimum alternate for the
-// conflicting set.
-//
-// Cases
-//
-// - no conflicts and more than 1 alternative in set => continue
-// - (s, 1, x), (s, 2, x), (s, 3, z), (s', 1, y), (s', 2, y) yields non-conflicting set
-// {3} ∪ conflicting sets min({1,2}) ∪ min({1,2}) = {1,3} => continue
-// - (s, 1, x), (s, 2, x), (s', 1, y), (s', 2, y), (s”, 1, z) yields non-conflicting set
-// {1} ∪ conflicting sets min({1,2}) ∪ min({1,2}) = {1} => stop and predict 1
-// - (s, 1, x), (s, 2, x), (s', 1, y), (s', 2, y) yields conflicting, reduced sets
-// {1} ∪ {1} = {1} => stop and predict 1, can announce ambiguity {1,2}
-// - (s, 1, x), (s, 2, x), (s', 2, y), (s', 3, y) yields conflicting, reduced sets
-// {1} ∪ {2} = {1,2} => continue
-// - (s, 1, x), (s, 2, x), (s', 2, y), (s', 3, y) yields conflicting, reduced sets
-// {1} ∪ {2} = {1,2} => continue
-// - (s, 1, x), (s, 2, x), (s', 3, y), (s', 4, y) yields conflicting, reduced sets
-// {1} ∪ {3} = {1,3} => continue
-//
-// # Exact Ambiguity Detection
-//
-// If all states report the same conflicting set of alternatives, then we
-// know we have the exact ambiguity set:
-//
-// |A_i| > 1
-//
-// and
-//
-// A_i = A_j ; for all i, j
-//
-// In other words, we continue examining lookahead until all A_i
-// have more than one alternative and all A_i are the same. If
-//
-// A={{1,2}, {1,3}}
-//
-// then regular LL prediction would terminate because the resolved set is {1}.
-// To determine what the real ambiguity is, we have to know whether the ambiguity is between one and
-// two or one and three so we keep going. We can only stop prediction when
-// we need exact ambiguity detection when the sets look like:
-//
-// A={{1,2}}
-//
-// or
-//
-// {{1,2},{1,2}}, etc...
-func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int {
- return PredictionModegetSingleViableAlt(altsets)
-}
-
-// PredictionModeallSubsetsConflict determines if every alternative subset in altsets contains more
-// than one alternative.
-//
-// The func returns true if every [BitSet] in altsets has
-// [BitSet].cardinality cardinality > 1
-func PredictionModeallSubsetsConflict(altsets []*BitSet) bool {
- return !PredictionModehasNonConflictingAltSet(altsets)
-}
-
-// PredictionModehasNonConflictingAltSet determines if any single alternative subset in altsets contains
-// exactly one alternative.
-//
-// The func returns true if altsets contains at least one [BitSet] with
-// [BitSet].cardinality cardinality 1
-func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
- for i := 0; i < len(altsets); i++ {
- alts := altsets[i]
- if alts.length() == 1 {
- return true
- }
- }
- return false
-}
-
-// PredictionModehasConflictingAltSet determines if any single alternative subset in altsets contains
-// more than one alternative.
-//
-// The func returns true if altsets contains a [BitSet] with
-// [BitSet].cardinality cardinality > 1, otherwise false
-func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
- for i := 0; i < len(altsets); i++ {
- alts := altsets[i]
- if alts.length() > 1 {
- return true
- }
- }
- return false
-}
-
-// PredictionModeallSubsetsEqual determines if every alternative subset in altsets is equivalent.
-//
-// The func returns true if every member of altsets is equal to the others.
-func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
- var first *BitSet
-
- for i := 0; i < len(altsets); i++ {
- alts := altsets[i]
- if first == nil {
- first = alts
- } else if alts != first {
- return false
- }
- }
-
- return true
-}
-
-// PredictionModegetUniqueAlt returns the unique alternative predicted by all alternative subsets in
-// altsets. If no such alternative exists, this method returns
-// [ATNInvalidAltNumber].
-//
-// @param altsets a collection of alternative subsets
-func PredictionModegetUniqueAlt(altsets []*BitSet) int {
- all := PredictionModeGetAlts(altsets)
- if all.length() == 1 {
- return all.minValue()
- }
-
- return ATNInvalidAltNumber
-}
-
-// PredictionModeGetAlts returns the complete set of represented alternatives for a collection of
-// alternative subsets. This method returns the union of each [BitSet]
-// in altsets, being the set of represented alternatives in altsets.
-func PredictionModeGetAlts(altsets []*BitSet) *BitSet {
- all := NewBitSet()
- for _, alts := range altsets {
- all.or(alts)
- }
- return all
-}
-
-// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set.
-//
-// for each configuration c in configs:
-// map[c] U= c.ATNConfig.alt // map hash/equals uses s and x, not alt and not pred
-func PredictionModegetConflictingAltSubsets(configs *ATNConfigSet) []*BitSet {
- configToAlts := NewJMap[*ATNConfig, *BitSet, *ATNAltConfigComparator[*ATNConfig]](atnAltCfgEqInst, AltSetCollection, "PredictionModegetConflictingAltSubsets()")
-
- for _, c := range configs.configs {
-
- alts, ok := configToAlts.Get(c)
- if !ok {
- alts = NewBitSet()
- configToAlts.Put(c, alts)
- }
- alts.add(c.GetAlt())
- }
-
- return configToAlts.Values()
-}
-
-// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set.
-//
-// for each configuration c in configs:
-// map[c.ATNConfig.state] U= c.ATNConfig.alt}
-func PredictionModeGetStateToAltMap(configs *ATNConfigSet) *AltDict {
- m := NewAltDict()
-
- for _, c := range configs.configs {
- alts := m.Get(c.GetState().String())
- if alts == nil {
- alts = NewBitSet()
- m.put(c.GetState().String(), alts)
- }
- alts.(*BitSet).add(c.GetAlt())
- }
- return m
-}
-
-func PredictionModehasStateAssociatedWithOneAlt(configs *ATNConfigSet) bool {
- values := PredictionModeGetStateToAltMap(configs).values()
- for i := 0; i < len(values); i++ {
- if values[i].(*BitSet).length() == 1 {
- return true
- }
- }
- return false
-}
-
-// PredictionModegetSingleViableAlt gets the single alternative predicted by all alternative subsets in altsets
-// if there is one.
-//
-// TODO: JI - Review this code - it does not seem to do the same thing as the Java code - maybe because [BitSet] is not like the Java utils BitSet
-func PredictionModegetSingleViableAlt(altsets []*BitSet) int {
- result := ATNInvalidAltNumber
-
- for i := 0; i < len(altsets); i++ {
- alts := altsets[i]
- minAlt := alts.minValue()
- if result == ATNInvalidAltNumber {
- result = minAlt
- } else if result != minAlt { // more than 1 viable alt
- return ATNInvalidAltNumber
- }
- }
- return result
-}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/rule_context.go b/vendor/github.com/antlr4-go/antlr/v4/rule_context.go
deleted file mode 100644
index f2ad04793..000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/rule_context.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-// RuleContext is a record of a single rule invocation. It knows
-// which context invoked it, if any. If there is no parent context, then
-// naturally the invoking state is not valid. The parent link
-// provides a chain upwards from the current rule invocation to the root
-// of the invocation tree, forming a stack.
-//
-// We actually carry no information about the rule associated with this context (except
-// when parsing). We keep only the state number of the invoking state from
-// the [ATN] submachine that invoked this. Contrast this with the s
-// pointer inside [ParserRuleContext] that tracks the current state
-// being "executed" for the current rule.
-//
-// The parent contexts are useful for computing lookahead sets and
-// getting error information.
-//
-// These objects are used during parsing and prediction.
-// For the special case of parsers, we use the struct
-// [ParserRuleContext], which embeds a RuleContext.
-//
-// @see ParserRuleContext
-type RuleContext interface {
- RuleNode
-
- GetInvokingState() int
- SetInvokingState(int)
-
- GetRuleIndex() int
- IsEmpty() bool
-
- GetAltNumber() int
- SetAltNumber(altNumber int)
-
- String([]string, RuleContext) string
-}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/statistics.go b/vendor/github.com/antlr4-go/antlr/v4/statistics.go
deleted file mode 100644
index 70c0673a0..000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/statistics.go
+++ /dev/null
@@ -1,281 +0,0 @@
-//go:build antlr.stats
-
-package antlr
-
-import (
- "fmt"
- "log"
- "os"
- "path/filepath"
- "sort"
- "strconv"
- "sync"
-)
-
-// This file allows the user to collect statistics about the runtime of the ANTLR runtime. It is not enabled by default
-// and so incurs no time penalty. To enable it, you must build the runtime with the antlr.stats build tag.
-//
-
-// Tells various components to collect statistics - because it is only true when this file is included, it will
-// allow the compiler to completely eliminate all the code that is only used when collecting statistics.
-const collectStats = true
-
-// goRunStats is a collection of all the various data the ANTLR runtime has collected about a particular run.
-// It is exported so that it can be used by others to look for things that are not already looked for in the
-// runtime statistics.
-type goRunStats struct {
-
- // jStats is a slice of all the [JStatRec] records that have been created, which is one for EVERY collection created
- // during a run. It is exported so that it can be used by others to look for things that are not already looked for
- // within this package.
- //
- jStats []*JStatRec
- jStatsLock sync.RWMutex
- topN int
- topNByMax []*JStatRec
- topNByUsed []*JStatRec
- unusedCollections map[CollectionSource]int
- counts map[CollectionSource]int
-}
-
-const (
- collectionsFile = "collections"
-)
-
-var (
- Statistics = &goRunStats{
- topN: 10,
- }
-)
-
-type statsOption func(*goRunStats) error
-
-// Configure allows the statistics system to be configured as the user wants and override the defaults
-func (s *goRunStats) Configure(options ...statsOption) error {
- for _, option := range options {
- err := option(s)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// WithTopN sets the number of things to list in the report when we are concerned with the top N things.
-//
-// For example, if you want to see the top 20 collections by size, you can do:
-//
-// antlr.Statistics.Configure(antlr.WithTopN(20))
-func WithTopN(topN int) statsOption {
- return func(s *goRunStats) error {
- s.topN = topN
- return nil
- }
-}
-
-// Analyze looks through all the statistical records and computes all the outputs that might be useful to the user.
-//
-// The function gathers and analyzes a number of statistics about any particular run of
-// an ANTLR generated recognizer. In the vast majority of cases, the statistics are only
-// useful to maintainers of ANTLR itself, but they can be useful to users as well. They may be
-// especially useful in tracking down bugs or performance problems when an ANTLR user could
-// supply the output from this package, but cannot supply the grammar file(s) they are using, even
-// privately to the maintainers.
-//
-// The statistics are gathered by the runtime itself, and are not gathered by the parser or lexer, but the user
-// must call this function their selves to analyze the statistics. This is because none of the infrastructure is
-// extant unless the calling program is built with the antlr.stats tag like so:
-//
-// go build -tags antlr.stats .
-//
-// When a program is built with the antlr.stats tag, the Statistics object is created and available outside
-// the package. The user can then call the [Statistics.Analyze] function to analyze the statistics and then call the
-// [Statistics.Report] function to report the statistics.
-//
-// Please forward any questions about this package to the ANTLR discussion groups on GitHub or send to them to
-// me [Jim Idle] directly at jimi@idle.ws
-//
-// [Jim Idle]: https:://github.com/jim-idle
-func (s *goRunStats) Analyze() {
-
- // Look for anything that looks strange and record it in our local maps etc for the report to present it
- //
- s.CollectionAnomalies()
- s.TopNCollections()
-}
-
-// TopNCollections looks through all the statistical records and gathers the top ten collections by size.
-func (s *goRunStats) TopNCollections() {
-
- // Let's sort the stat records by MaxSize
- //
- sort.Slice(s.jStats, func(i, j int) bool {
- return s.jStats[i].MaxSize > s.jStats[j].MaxSize
- })
-
- for i := 0; i < len(s.jStats) && i < s.topN; i++ {
- s.topNByMax = append(s.topNByMax, s.jStats[i])
- }
-
- // Sort by the number of times used
- //
- sort.Slice(s.jStats, func(i, j int) bool {
- return s.jStats[i].Gets+s.jStats[i].Puts > s.jStats[j].Gets+s.jStats[j].Puts
- })
- for i := 0; i < len(s.jStats) && i < s.topN; i++ {
- s.topNByUsed = append(s.topNByUsed, s.jStats[i])
- }
-}
-
-// Report dumps a markdown formatted report of all the statistics collected during a run to the given dir output
-// path, which should represent a directory. Generated files will be prefixed with the given prefix and will be
-// given a type name such as `anomalies` and a time stamp such as `2021-09-01T12:34:56` and a .md suffix.
-func (s *goRunStats) Report(dir string, prefix string) error {
-
- isDir, err := isDirectory(dir)
- switch {
- case err != nil:
- return err
- case !isDir:
- return fmt.Errorf("output directory `%s` is not a directory", dir)
- }
- s.reportCollections(dir, prefix)
-
- // Clean out any old data in case the user forgets
- //
- s.Reset()
- return nil
-}
-
-func (s *goRunStats) Reset() {
- s.jStats = nil
- s.topNByUsed = nil
- s.topNByMax = nil
-}
-
-func (s *goRunStats) reportCollections(dir, prefix string) {
- cname := filepath.Join(dir, ".asciidoctor")
- // If the file doesn't exist, create it, or append to the file
- f, err := os.OpenFile(cname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
- if err != nil {
- log.Fatal(err)
- }
- _, _ = f.WriteString(`// .asciidoctorconfig
-++++
-
-++++`)
- _ = f.Close()
-
- fname := filepath.Join(dir, prefix+"_"+"_"+collectionsFile+"_"+".adoc")
- // If the file doesn't exist, create it, or append to the file
- f, err = os.OpenFile(fname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
- if err != nil {
- log.Fatal(err)
- }
- defer func(f *os.File) {
- err := f.Close()
- if err != nil {
- log.Fatal(err)
- }
- }(f)
- _, _ = f.WriteString("= Collections for " + prefix + "\n\n")
-
- _, _ = f.WriteString("== Summary\n")
-
- if s.unusedCollections != nil {
- _, _ = f.WriteString("=== Unused Collections\n")
- _, _ = f.WriteString("Unused collections incur a penalty for allocation that makes them a candidate for either\n")
- _, _ = f.WriteString(" removal or optimization. If you are using a collection that is not used, you should\n")
- _, _ = f.WriteString(" consider removing it. If you are using a collection that is used, but not very often,\n")
- _, _ = f.WriteString(" you should consider using lazy initialization to defer the allocation until it is\n")
- _, _ = f.WriteString(" actually needed.\n\n")
-
- _, _ = f.WriteString("\n.Unused collections\n")
- _, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n")
- _, _ = f.WriteString("|===\n")
- _, _ = f.WriteString("| Type | Count\n")
-
- for k, v := range s.unusedCollections {
- _, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n")
- }
- f.WriteString("|===\n\n")
- }
-
- _, _ = f.WriteString("\n.Summary of Collections\n")
- _, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n")
- _, _ = f.WriteString("|===\n")
- _, _ = f.WriteString("| Type | Count\n")
- for k, v := range s.counts {
- _, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n")
- }
- _, _ = f.WriteString("| Total | " + strconv.Itoa(len(s.jStats)) + "\n")
- _, _ = f.WriteString("|===\n\n")
-
- _, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by MaxSize\n")
- _, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1"]` + "\n\n")
- _, _ = f.WriteString("|===\n")
- _, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets\n")
- for _, c := range s.topNByMax {
- _, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n")
- _, _ = f.WriteString("| " + c.Description + "\n")
- _, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n")
- _, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n")
- _, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n")
- _, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n")
- _, _ = f.WriteString("\n")
- }
- _, _ = f.WriteString("|===\n\n")
-
- _, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by Access\n")
- _, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1,>1"]` + "\n\n")
- _, _ = f.WriteString("|===\n")
- _, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets | P+G\n")
- for _, c := range s.topNByUsed {
- _, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n")
- _, _ = f.WriteString("| " + c.Description + "\n")
- _, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n")
- _, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n")
- _, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n")
- _, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n")
- _, _ = f.WriteString("| " + strconv.Itoa(c.Gets+c.Puts) + "\n")
- _, _ = f.WriteString("\n")
- }
- _, _ = f.WriteString("|===\n\n")
-}
-
-// AddJStatRec adds a [JStatRec] record to the [goRunStats] collection when build runtimeConfig antlr.stats is enabled.
-func (s *goRunStats) AddJStatRec(rec *JStatRec) {
- s.jStatsLock.Lock()
- defer s.jStatsLock.Unlock()
- s.jStats = append(s.jStats, rec)
-}
-
-// CollectionAnomalies looks through all the statistical records and gathers any anomalies that have been found.
-func (s *goRunStats) CollectionAnomalies() {
- s.jStatsLock.RLock()
- defer s.jStatsLock.RUnlock()
- s.counts = make(map[CollectionSource]int, len(s.jStats))
- for _, c := range s.jStats {
-
- // Accumlate raw counts
- //
- s.counts[c.Source]++
-
- // Look for allocated but unused collections and count them
- if c.MaxSize == 0 && c.Puts == 0 {
- if s.unusedCollections == nil {
- s.unusedCollections = make(map[CollectionSource]int)
- }
- s.unusedCollections[c.Source]++
- }
- if c.MaxSize > 6000 {
- fmt.Println("Collection ", c.Description, "accumulated a max size of ", c.MaxSize, " - this is probably too large and indicates a poorly formed grammar")
- }
- }
-
-}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/stats_data.go b/vendor/github.com/antlr4-go/antlr/v4/stats_data.go
deleted file mode 100644
index 4d9eb94e5..000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/stats_data.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package antlr
-
-// A JStatRec is a record of a particular use of a [JStore], [JMap] or JPCMap] collection. Typically, it will be
-// used to look for unused collections that wre allocated anyway, problems with hash bucket clashes, and anomalies
-// such as huge numbers of Gets with no entries found GetNoEnt. You can refer to the CollectionAnomalies() function
-// for ideas on what can be gleaned from these statistics about collections.
-type JStatRec struct {
- Source CollectionSource
- MaxSize int
- CurSize int
- Gets int
- GetHits int
- GetMisses int
- GetHashConflicts int
- GetNoEnt int
- Puts int
- PutHits int
- PutMisses int
- PutHashConflicts int
- MaxSlotSize int
- Description string
- CreateStack []byte
-}
diff --git a/vendor/github.com/google/cel-go/cel/BUILD.bazel b/vendor/github.com/google/cel-go/cel/BUILD.bazel
index 33da21623..0905f6353 100644
--- a/vendor/github.com/google/cel-go/cel/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/cel/BUILD.bazel
@@ -10,12 +10,9 @@ go_library(
"cel.go",
"decls.go",
"env.go",
- "folding.go",
"io.go",
- "inlining.go",
"library.go",
"macro.go",
- "optimizer.go",
"options.go",
"program.go",
"validator.go",
@@ -59,11 +56,7 @@ go_test(
"cel_test.go",
"decls_test.go",
"env_test.go",
- "folding_test.go",
"io_test.go",
- "inlining_test.go",
- "optimizer_test.go",
- "validator_test.go",
],
data = [
"//cel/testdata:gen_test_fds",
diff --git a/vendor/github.com/google/cel-go/cel/decls.go b/vendor/github.com/google/cel-go/cel/decls.go
index b59e3708d..0f9501341 100644
--- a/vendor/github.com/google/cel-go/cel/decls.go
+++ b/vendor/github.com/google/cel-go/cel/decls.go
@@ -353,3 +353,43 @@ func ExprDeclToDeclaration(d *exprpb.Decl) (EnvOption, error) {
return nil, fmt.Errorf("unsupported decl: %v", d)
}
}
+
+func typeValueToKind(tv ref.Type) (Kind, error) {
+ switch tv {
+ case types.BoolType:
+ return BoolKind, nil
+ case types.DoubleType:
+ return DoubleKind, nil
+ case types.IntType:
+ return IntKind, nil
+ case types.UintType:
+ return UintKind, nil
+ case types.ListType:
+ return ListKind, nil
+ case types.MapType:
+ return MapKind, nil
+ case types.StringType:
+ return StringKind, nil
+ case types.BytesType:
+ return BytesKind, nil
+ case types.DurationType:
+ return DurationKind, nil
+ case types.TimestampType:
+ return TimestampKind, nil
+ case types.NullType:
+ return NullTypeKind, nil
+ case types.TypeType:
+ return TypeKind, nil
+ default:
+ switch tv.TypeName() {
+ case "dyn":
+ return DynKind, nil
+ case "google.protobuf.Any":
+ return AnyKind, nil
+ case "optional":
+ return OpaqueKind, nil
+ default:
+ return 0, fmt.Errorf("no known conversion for type of %s", tv.TypeName())
+ }
+ }
+}
diff --git a/vendor/github.com/google/cel-go/cel/env.go b/vendor/github.com/google/cel-go/cel/env.go
index 6568a8b80..b5c3b4cc5 100644
--- a/vendor/github.com/google/cel-go/cel/env.go
+++ b/vendor/github.com/google/cel-go/cel/env.go
@@ -38,42 +38,26 @@ type Source = common.Source
// Ast representing the checked or unchecked expression, its source, and related metadata such as
// source position information.
type Ast struct {
- source Source
- impl *celast.AST
-}
-
-// NativeRep converts the AST to a Go-native representation.
-func (ast *Ast) NativeRep() *celast.AST {
- return ast.impl
+ expr *exprpb.Expr
+ info *exprpb.SourceInfo
+ source Source
+ refMap map[int64]*celast.ReferenceInfo
+ typeMap map[int64]*types.Type
}
// Expr returns the proto serializable instance of the parsed/checked expression.
-//
-// Deprecated: prefer cel.AstToCheckedExpr() or cel.AstToParsedExpr() and call GetExpr()
-// the result instead.
func (ast *Ast) Expr() *exprpb.Expr {
- if ast == nil {
- return nil
- }
- pbExpr, _ := celast.ExprToProto(ast.impl.Expr())
- return pbExpr
+ return ast.expr
}
// IsChecked returns whether the Ast value has been successfully type-checked.
func (ast *Ast) IsChecked() bool {
- if ast == nil {
- return false
- }
- return ast.impl.IsChecked()
+ return ast.typeMap != nil && len(ast.typeMap) > 0
}
// SourceInfo returns character offset and newline position information about expression elements.
func (ast *Ast) SourceInfo() *exprpb.SourceInfo {
- if ast == nil {
- return nil
- }
- pbInfo, _ := celast.SourceInfoToProto(ast.impl.SourceInfo())
- return pbInfo
+ return ast.info
}
// ResultType returns the output type of the expression if the Ast has been type-checked, else
@@ -81,6 +65,9 @@ func (ast *Ast) SourceInfo() *exprpb.SourceInfo {
//
// Deprecated: use OutputType
func (ast *Ast) ResultType() *exprpb.Type {
+ if !ast.IsChecked() {
+ return chkdecls.Dyn
+ }
out := ast.OutputType()
t, err := TypeToExprType(out)
if err != nil {
@@ -92,18 +79,16 @@ func (ast *Ast) ResultType() *exprpb.Type {
// OutputType returns the output type of the expression if the Ast has been type-checked, else
// returns cel.DynType as the parse step cannot infer types.
func (ast *Ast) OutputType() *Type {
- if ast == nil {
- return types.ErrorType
+ t, found := ast.typeMap[ast.expr.GetId()]
+ if !found {
+ return DynType
}
- return ast.impl.GetType(ast.impl.Expr().ID())
+ return t
}
// Source returns a view of the input used to create the Ast. This source may be complete or
// constructed from the SourceInfo.
func (ast *Ast) Source() Source {
- if ast == nil {
- return nil
- }
return ast.source
}
@@ -213,28 +198,29 @@ func NewCustomEnv(opts ...EnvOption) (*Env, error) {
// It is possible to have both non-nil Ast and Issues values returned from this call: however,
// the mere presence of an Ast does not imply that it is valid for use.
func (e *Env) Check(ast *Ast) (*Ast, *Issues) {
+ // Note, errors aren't currently possible on the Ast to ParsedExpr conversion.
+ pe, _ := AstToParsedExpr(ast)
+
// Construct the internal checker env, erroring if there is an issue adding the declarations.
chk, err := e.initChecker()
if err != nil {
errs := common.NewErrors(ast.Source())
errs.ReportError(common.NoLocation, err.Error())
- return nil, NewIssuesWithSourceInfo(errs, ast.impl.SourceInfo())
+ return nil, NewIssuesWithSourceInfo(errs, ast.SourceInfo())
}
- checked, errs := checker.Check(ast.impl, ast.Source(), chk)
+ res, errs := checker.Check(pe, ast.Source(), chk)
if len(errs.GetErrors()) > 0 {
- return nil, NewIssuesWithSourceInfo(errs, ast.impl.SourceInfo())
+ return nil, NewIssuesWithSourceInfo(errs, ast.SourceInfo())
}
// Manually create the Ast to ensure that the Ast source information (which may be more
// detailed than the information provided by Check), is returned to the caller.
ast = &Ast{
- source: ast.Source(),
- impl: checked}
-
- // Avoid creating a validator config if it's not needed.
- if len(e.validators) == 0 {
- return ast, nil
- }
+ source: ast.Source(),
+ expr: res.Expr,
+ info: res.SourceInfo,
+ refMap: res.ReferenceMap,
+ typeMap: res.TypeMap}
// Generate a validator configuration from the set of configured validators.
vConfig := newValidatorConfig()
@@ -244,9 +230,9 @@ func (e *Env) Check(ast *Ast) (*Ast, *Issues) {
}
}
// Apply additional validators on the type-checked result.
- iss := NewIssuesWithSourceInfo(errs, ast.impl.SourceInfo())
+ iss := NewIssuesWithSourceInfo(errs, ast.SourceInfo())
for _, v := range e.validators {
- v.Validate(e, vConfig, checked, iss)
+ v.Validate(e, vConfig, res, iss)
}
if iss.Err() != nil {
return nil, iss
@@ -443,11 +429,16 @@ func (e *Env) Parse(txt string) (*Ast, *Issues) {
// It is possible to have both non-nil Ast and Issues values returned from this call; however,
// the mere presence of an Ast does not imply that it is valid for use.
func (e *Env) ParseSource(src Source) (*Ast, *Issues) {
- parsed, errs := e.prsr.Parse(src)
+ res, errs := e.prsr.Parse(src)
if len(errs.GetErrors()) > 0 {
return nil, &Issues{errs: errs}
}
- return &Ast{source: src, impl: parsed}, nil
+ // Manually create the Ast to ensure that the text source information is propagated on
+ // subsequent calls to Check.
+ return &Ast{
+ source: src,
+ expr: res.GetExpr(),
+ info: res.GetSourceInfo()}, nil
}
// Program generates an evaluable instance of the Ast within the environment (Env).
@@ -543,9 +534,8 @@ func (e *Env) PartialVars(vars any) (interpreter.PartialActivation, error) {
// TODO: Consider adding an option to generate a Program.Residual to avoid round-tripping to an
// Ast format and then Program again.
func (e *Env) ResidualAst(a *Ast, details *EvalDetails) (*Ast, error) {
- pruned := interpreter.PruneAst(a.impl.Expr(), a.impl.SourceInfo().MacroCalls(), details.State())
- newAST := &Ast{source: a.Source(), impl: pruned}
- expr, err := AstToString(newAST)
+ pruned := interpreter.PruneAst(a.Expr(), a.SourceInfo().GetMacroCalls(), details.State())
+ expr, err := AstToString(ParsedExprToAst(pruned))
if err != nil {
return nil, err
}
@@ -566,10 +556,16 @@ func (e *Env) ResidualAst(a *Ast, details *EvalDetails) (*Ast, error) {
// EstimateCost estimates the cost of a type checked CEL expression using the length estimates of input data and
// extension functions provided by estimator.
func (e *Env) EstimateCost(ast *Ast, estimator checker.CostEstimator, opts ...checker.CostOption) (checker.CostEstimate, error) {
+ checked := &celast.CheckedAST{
+ Expr: ast.Expr(),
+ SourceInfo: ast.SourceInfo(),
+ TypeMap: ast.typeMap,
+ ReferenceMap: ast.refMap,
+ }
extendedOpts := make([]checker.CostOption, 0, len(e.costOptions))
extendedOpts = append(extendedOpts, opts...)
extendedOpts = append(extendedOpts, e.costOptions...)
- return checker.Cost(ast.impl, estimator, extendedOpts...)
+ return checker.Cost(checked, estimator, extendedOpts...)
}
// configure applies a series of EnvOptions to the current environment.
@@ -711,7 +707,7 @@ type Error = common.Error
// Note: in the future, non-fatal warnings and notices may be inspectable via the Issues struct.
type Issues struct {
errs *common.Errors
- info *celast.SourceInfo
+ info *exprpb.SourceInfo
}
// NewIssues returns an Issues struct from a common.Errors object.
@@ -722,7 +718,7 @@ func NewIssues(errs *common.Errors) *Issues {
// NewIssuesWithSourceInfo returns an Issues struct from a common.Errors object with SourceInfo metatata
// which can be used with the `ReportErrorAtID` method for additional error reports within the context
// information that's inferred from an expression id.
-func NewIssuesWithSourceInfo(errs *common.Errors, info *celast.SourceInfo) *Issues {
+func NewIssuesWithSourceInfo(errs *common.Errors, info *exprpb.SourceInfo) *Issues {
return &Issues{
errs: errs,
info: info,
@@ -772,7 +768,30 @@ func (i *Issues) String() string {
// The source metadata for the expression at `id`, if present, is attached to the error report.
// To ensure that source metadata is attached to error reports, use NewIssuesWithSourceInfo.
func (i *Issues) ReportErrorAtID(id int64, message string, args ...any) {
- i.errs.ReportErrorAtID(id, i.info.GetStartLocation(id), message, args...)
+ i.errs.ReportErrorAtID(id, locationByID(id, i.info), message, args...)
+}
+
+// locationByID returns a common.Location given an expression id.
+//
+// TODO: move this functionality into the native SourceInfo and an overhaul of the common.Source
+// as this implementation relies on the abstractions present in the protobuf SourceInfo object,
+// and is replicated in the checker.
+func locationByID(id int64, sourceInfo *exprpb.SourceInfo) common.Location {
+ positions := sourceInfo.GetPositions()
+ var line = 1
+ if offset, found := positions[id]; found {
+ col := int(offset)
+ for _, lineOffset := range sourceInfo.GetLineOffsets() {
+ if lineOffset < offset {
+ line++
+ col = int(offset - lineOffset)
+ } else {
+ break
+ }
+ }
+ return common.NewLocation(line, col)
+ }
+ return common.NoLocation
}
// getStdEnv lazy initializes the CEL standard environment.
@@ -803,13 +822,6 @@ func (p *interopCELTypeProvider) FindStructType(typeName string) (*types.Type, b
return nil, false
}
-// FindStructFieldNames returns an empty set of field for the interop provider.
-//
-// To inspect the field names, migrate to a `types.Provider` implementation.
-func (p *interopCELTypeProvider) FindStructFieldNames(typeName string) ([]string, bool) {
- return []string{}, false
-}
-
// FindStructFieldType returns a types.FieldType instance for the given fully-qualified typeName and field
// name, if one exists.
//
diff --git a/vendor/github.com/google/cel-go/cel/folding.go b/vendor/github.com/google/cel-go/cel/folding.go
deleted file mode 100644
index d7060896d..000000000
--- a/vendor/github.com/google/cel-go/cel/folding.go
+++ /dev/null
@@ -1,559 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cel
-
-import (
- "fmt"
-
- "github.com/google/cel-go/common/ast"
- "github.com/google/cel-go/common/operators"
- "github.com/google/cel-go/common/overloads"
- "github.com/google/cel-go/common/types"
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/common/types/traits"
-)
-
-// ConstantFoldingOption defines a functional option for configuring constant folding.
-type ConstantFoldingOption func(opt *constantFoldingOptimizer) (*constantFoldingOptimizer, error)
-
-// MaxConstantFoldIterations limits the number of times literals may be folding during optimization.
-//
-// Defaults to 100 if not set.
-func MaxConstantFoldIterations(limit int) ConstantFoldingOption {
- return func(opt *constantFoldingOptimizer) (*constantFoldingOptimizer, error) {
- opt.maxFoldIterations = limit
- return opt, nil
- }
-}
-
-// NewConstantFoldingOptimizer creates an optimizer which inlines constant scalar an aggregate
-// literal values within function calls and select statements with their evaluated result.
-func NewConstantFoldingOptimizer(opts ...ConstantFoldingOption) (ASTOptimizer, error) {
- folder := &constantFoldingOptimizer{
- maxFoldIterations: defaultMaxConstantFoldIterations,
- }
- var err error
- for _, o := range opts {
- folder, err = o(folder)
- if err != nil {
- return nil, err
- }
- }
- return folder, nil
-}
-
-type constantFoldingOptimizer struct {
- maxFoldIterations int
-}
-
-// Optimize queries the expression graph for scalar and aggregate literal expressions within call and
-// select statements and then evaluates them and replaces the call site with the literal result.
-//
-// Note: only values which can be represented as literals in CEL syntax are supported.
-func (opt *constantFoldingOptimizer) Optimize(ctx *OptimizerContext, a *ast.AST) *ast.AST {
- root := ast.NavigateAST(a)
-
- // Walk the list of foldable expression and continue to fold until there are no more folds left.
- // All of the fold candidates returned by the constantExprMatcher should succeed unless there's
- // a logic bug with the selection of expressions.
- foldableExprs := ast.MatchDescendants(root, constantExprMatcher)
- foldCount := 0
- for len(foldableExprs) != 0 && foldCount < opt.maxFoldIterations {
- for _, fold := range foldableExprs {
- // If the expression could be folded because it's a non-strict call, and the
- // branches are pruned, continue to the next fold.
- if fold.Kind() == ast.CallKind && maybePruneBranches(ctx, fold) {
- continue
- }
- // Otherwise, assume all context is needed to evaluate the expression.
- err := tryFold(ctx, a, fold)
- if err != nil {
- ctx.ReportErrorAtID(fold.ID(), "constant-folding evaluation failed: %v", err.Error())
- return a
- }
- }
- foldCount++
- foldableExprs = ast.MatchDescendants(root, constantExprMatcher)
- }
- // Once all of the constants have been folded, try to run through the remaining comprehensions
- // one last time. In this case, there's no guarantee they'll run, so we only update the
- // target comprehension node with the literal value if the evaluation succeeds.
- for _, compre := range ast.MatchDescendants(root, ast.KindMatcher(ast.ComprehensionKind)) {
- tryFold(ctx, a, compre)
- }
-
- // If the output is a list, map, or struct which contains optional entries, then prune it
- // to make sure that the optionals, if resolved, do not surface in the output literal.
- pruneOptionalElements(ctx, root)
-
- // Ensure that all intermediate values in the folded expression can be represented as valid
- // CEL literals within the AST structure. Use `PostOrderVisit` rather than `MatchDescendents`
- // to avoid extra allocations during this final pass through the AST.
- ast.PostOrderVisit(root, ast.NewExprVisitor(func(e ast.Expr) {
- if e.Kind() != ast.LiteralKind {
- return
- }
- val := e.AsLiteral()
- adapted, err := adaptLiteral(ctx, val)
- if err != nil {
- ctx.ReportErrorAtID(root.ID(), "constant-folding evaluation failed: %v", err.Error())
- return
- }
- ctx.UpdateExpr(e, adapted)
- }))
-
- return a
-}
-
-// tryFold attempts to evaluate a sub-expression to a literal.
-//
-// If the evaluation succeeds, the input expr value will be modified to become a literal, otherwise
-// the method will return an error.
-func tryFold(ctx *OptimizerContext, a *ast.AST, expr ast.Expr) error {
- // Assume all context is needed to evaluate the expression.
- subAST := &Ast{
- impl: ast.NewCheckedAST(ast.NewAST(expr, a.SourceInfo()), a.TypeMap(), a.ReferenceMap()),
- }
- prg, err := ctx.Program(subAST)
- if err != nil {
- return err
- }
- out, _, err := prg.Eval(NoVars())
- if err != nil {
- return err
- }
- // Update the fold expression to be a literal.
- ctx.UpdateExpr(expr, ctx.NewLiteral(out))
- return nil
-}
-
-// maybePruneBranches inspects the non-strict call expression to determine whether
-// a branch can be removed. Evaluation will naturally prune logical and / or calls,
-// but conditional will not be pruned cleanly, so this is one small area where the
-// constant folding step reimplements a portion of the evaluator.
-func maybePruneBranches(ctx *OptimizerContext, expr ast.NavigableExpr) bool {
- call := expr.AsCall()
- args := call.Args()
- switch call.FunctionName() {
- case operators.LogicalAnd, operators.LogicalOr:
- return maybeShortcircuitLogic(ctx, call.FunctionName(), args, expr)
- case operators.Conditional:
- cond := args[0]
- truthy := args[1]
- falsy := args[2]
- if cond.Kind() != ast.LiteralKind {
- return false
- }
- if cond.AsLiteral() == types.True {
- ctx.UpdateExpr(expr, truthy)
- } else {
- ctx.UpdateExpr(expr, falsy)
- }
- return true
- case operators.In:
- haystack := args[1]
- if haystack.Kind() == ast.ListKind && haystack.AsList().Size() == 0 {
- ctx.UpdateExpr(expr, ctx.NewLiteral(types.False))
- return true
- }
- needle := args[0]
- if needle.Kind() == ast.LiteralKind && haystack.Kind() == ast.ListKind {
- needleValue := needle.AsLiteral()
- list := haystack.AsList()
- for _, e := range list.Elements() {
- if e.Kind() == ast.LiteralKind && e.AsLiteral().Equal(needleValue) == types.True {
- ctx.UpdateExpr(expr, ctx.NewLiteral(types.True))
- return true
- }
- }
- }
- }
- return false
-}
-
-func maybeShortcircuitLogic(ctx *OptimizerContext, function string, args []ast.Expr, expr ast.NavigableExpr) bool {
- shortcircuit := types.False
- skip := types.True
- if function == operators.LogicalOr {
- shortcircuit = types.True
- skip = types.False
- }
- newArgs := []ast.Expr{}
- for _, arg := range args {
- if arg.Kind() != ast.LiteralKind {
- newArgs = append(newArgs, arg)
- continue
- }
- if arg.AsLiteral() == skip {
- continue
- }
- if arg.AsLiteral() == shortcircuit {
- ctx.UpdateExpr(expr, arg)
- return true
- }
- }
- if len(newArgs) == 0 {
- newArgs = append(newArgs, args[0])
- ctx.UpdateExpr(expr, newArgs[0])
- return true
- }
- if len(newArgs) == 1 {
- ctx.UpdateExpr(expr, newArgs[0])
- return true
- }
- ctx.UpdateExpr(expr, ctx.NewCall(function, newArgs...))
- return true
-}
-
-// pruneOptionalElements works from the bottom up to resolve optional elements within
-// aggregate literals.
-//
-// Note, many aggregate literals will be resolved as arguments to functions or select
-// statements, so this method exists to handle the case where the literal could not be
-// fully resolved or exists outside of a call, select, or comprehension context.
-func pruneOptionalElements(ctx *OptimizerContext, root ast.NavigableExpr) {
- aggregateLiterals := ast.MatchDescendants(root, aggregateLiteralMatcher)
- for _, lit := range aggregateLiterals {
- switch lit.Kind() {
- case ast.ListKind:
- pruneOptionalListElements(ctx, lit)
- case ast.MapKind:
- pruneOptionalMapEntries(ctx, lit)
- case ast.StructKind:
- pruneOptionalStructFields(ctx, lit)
- }
- }
-}
-
-func pruneOptionalListElements(ctx *OptimizerContext, e ast.Expr) {
- l := e.AsList()
- elems := l.Elements()
- optIndices := l.OptionalIndices()
- if len(optIndices) == 0 {
- return
- }
- updatedElems := []ast.Expr{}
- updatedIndices := []int32{}
- newOptIndex := -1
- for _, e := range elems {
- newOptIndex++
- if !l.IsOptional(int32(newOptIndex)) {
- updatedElems = append(updatedElems, e)
- continue
- }
- if e.Kind() != ast.LiteralKind {
- updatedElems = append(updatedElems, e)
- updatedIndices = append(updatedIndices, int32(newOptIndex))
- continue
- }
- optElemVal, ok := e.AsLiteral().(*types.Optional)
- if !ok {
- updatedElems = append(updatedElems, e)
- updatedIndices = append(updatedIndices, int32(newOptIndex))
- continue
- }
- if !optElemVal.HasValue() {
- newOptIndex-- // Skipping causes the list to get smaller.
- continue
- }
- ctx.UpdateExpr(e, ctx.NewLiteral(optElemVal.GetValue()))
- updatedElems = append(updatedElems, e)
- }
- ctx.UpdateExpr(e, ctx.NewList(updatedElems, updatedIndices))
-}
-
-func pruneOptionalMapEntries(ctx *OptimizerContext, e ast.Expr) {
- m := e.AsMap()
- entries := m.Entries()
- updatedEntries := []ast.EntryExpr{}
- modified := false
- for _, e := range entries {
- entry := e.AsMapEntry()
- key := entry.Key()
- val := entry.Value()
- // If the entry is not optional, or the value-side of the optional hasn't
- // been resolved to a literal, then preserve the entry as-is.
- if !entry.IsOptional() || val.Kind() != ast.LiteralKind {
- updatedEntries = append(updatedEntries, e)
- continue
- }
- optElemVal, ok := val.AsLiteral().(*types.Optional)
- if !ok {
- updatedEntries = append(updatedEntries, e)
- continue
- }
- // When the key is not a literal, but the value is, then it needs to be
- // restored to an optional value.
- if key.Kind() != ast.LiteralKind {
- undoOptVal, err := adaptLiteral(ctx, optElemVal)
- if err != nil {
- ctx.ReportErrorAtID(val.ID(), "invalid map value literal %v: %v", optElemVal, err)
- }
- ctx.UpdateExpr(val, undoOptVal)
- updatedEntries = append(updatedEntries, e)
- continue
- }
- modified = true
- if !optElemVal.HasValue() {
- continue
- }
- ctx.UpdateExpr(val, ctx.NewLiteral(optElemVal.GetValue()))
- updatedEntry := ctx.NewMapEntry(key, val, false)
- updatedEntries = append(updatedEntries, updatedEntry)
- }
- if modified {
- ctx.UpdateExpr(e, ctx.NewMap(updatedEntries))
- }
-}
-
-func pruneOptionalStructFields(ctx *OptimizerContext, e ast.Expr) {
- s := e.AsStruct()
- fields := s.Fields()
- updatedFields := []ast.EntryExpr{}
- modified := false
- for _, f := range fields {
- field := f.AsStructField()
- val := field.Value()
- if !field.IsOptional() || val.Kind() != ast.LiteralKind {
- updatedFields = append(updatedFields, f)
- continue
- }
- optElemVal, ok := val.AsLiteral().(*types.Optional)
- if !ok {
- updatedFields = append(updatedFields, f)
- continue
- }
- modified = true
- if !optElemVal.HasValue() {
- continue
- }
- ctx.UpdateExpr(val, ctx.NewLiteral(optElemVal.GetValue()))
- updatedField := ctx.NewStructField(field.Name(), val, false)
- updatedFields = append(updatedFields, updatedField)
- }
- if modified {
- ctx.UpdateExpr(e, ctx.NewStruct(s.TypeName(), updatedFields))
- }
-}
-
-// adaptLiteral converts a runtime CEL value to its equivalent literal expression.
-//
-// For strongly typed values, the type-provider will be used to reconstruct the fields
-// which are present in the literal and their equivalent initialization values.
-func adaptLiteral(ctx *OptimizerContext, val ref.Val) (ast.Expr, error) {
- switch t := val.Type().(type) {
- case *types.Type:
- switch t {
- case types.BoolType, types.BytesType, types.DoubleType, types.IntType,
- types.NullType, types.StringType, types.UintType:
- return ctx.NewLiteral(val), nil
- case types.DurationType:
- return ctx.NewCall(
- overloads.TypeConvertDuration,
- ctx.NewLiteral(val.ConvertToType(types.StringType)),
- ), nil
- case types.TimestampType:
- return ctx.NewCall(
- overloads.TypeConvertTimestamp,
- ctx.NewLiteral(val.ConvertToType(types.StringType)),
- ), nil
- case types.OptionalType:
- opt := val.(*types.Optional)
- if !opt.HasValue() {
- return ctx.NewCall("optional.none"), nil
- }
- target, err := adaptLiteral(ctx, opt.GetValue())
- if err != nil {
- return nil, err
- }
- return ctx.NewCall("optional.of", target), nil
- case types.TypeType:
- return ctx.NewIdent(val.(*types.Type).TypeName()), nil
- case types.ListType:
- l, ok := val.(traits.Lister)
- if !ok {
- return nil, fmt.Errorf("failed to adapt %v to literal", val)
- }
- elems := make([]ast.Expr, l.Size().(types.Int))
- idx := 0
- it := l.Iterator()
- for it.HasNext() == types.True {
- elemVal := it.Next()
- elemExpr, err := adaptLiteral(ctx, elemVal)
- if err != nil {
- return nil, err
- }
- elems[idx] = elemExpr
- idx++
- }
- return ctx.NewList(elems, []int32{}), nil
- case types.MapType:
- m, ok := val.(traits.Mapper)
- if !ok {
- return nil, fmt.Errorf("failed to adapt %v to literal", val)
- }
- entries := make([]ast.EntryExpr, m.Size().(types.Int))
- idx := 0
- it := m.Iterator()
- for it.HasNext() == types.True {
- keyVal := it.Next()
- keyExpr, err := adaptLiteral(ctx, keyVal)
- if err != nil {
- return nil, err
- }
- valVal := m.Get(keyVal)
- valExpr, err := adaptLiteral(ctx, valVal)
- if err != nil {
- return nil, err
- }
- entries[idx] = ctx.NewMapEntry(keyExpr, valExpr, false)
- idx++
- }
- return ctx.NewMap(entries), nil
- default:
- provider := ctx.CELTypeProvider()
- fields, found := provider.FindStructFieldNames(t.TypeName())
- if !found {
- return nil, fmt.Errorf("failed to adapt %v to literal", val)
- }
- tester := val.(traits.FieldTester)
- indexer := val.(traits.Indexer)
- fieldInits := []ast.EntryExpr{}
- for _, f := range fields {
- field := types.String(f)
- if tester.IsSet(field) != types.True {
- continue
- }
- fieldVal := indexer.Get(field)
- fieldExpr, err := adaptLiteral(ctx, fieldVal)
- if err != nil {
- return nil, err
- }
- fieldInits = append(fieldInits, ctx.NewStructField(f, fieldExpr, false))
- }
- return ctx.NewStruct(t.TypeName(), fieldInits), nil
- }
- }
- return nil, fmt.Errorf("failed to adapt %v to literal", val)
-}
-
-// constantExprMatcher matches calls, select statements, and comprehensions whose arguments
-// are all constant scalar or aggregate literal values.
-//
-// Only comprehensions which are not nested are included as possible constant folds, and only
-// if all variables referenced in the comprehension stack exist are only iteration or
-// accumulation variables.
-func constantExprMatcher(e ast.NavigableExpr) bool {
- switch e.Kind() {
- case ast.CallKind:
- return constantCallMatcher(e)
- case ast.SelectKind:
- sel := e.AsSelect() // guaranteed to be a navigable value
- return constantMatcher(sel.Operand().(ast.NavigableExpr))
- case ast.ComprehensionKind:
- if isNestedComprehension(e) {
- return false
- }
- vars := map[string]bool{}
- constantExprs := true
- visitor := ast.NewExprVisitor(func(e ast.Expr) {
- if e.Kind() == ast.ComprehensionKind {
- nested := e.AsComprehension()
- vars[nested.AccuVar()] = true
- vars[nested.IterVar()] = true
- }
- if e.Kind() == ast.IdentKind && !vars[e.AsIdent()] {
- constantExprs = false
- }
- })
- ast.PreOrderVisit(e, visitor)
- return constantExprs
- default:
- return false
- }
-}
-
-// constantCallMatcher identifies strict and non-strict calls which can be folded.
-func constantCallMatcher(e ast.NavigableExpr) bool {
- call := e.AsCall()
- children := e.Children()
- fnName := call.FunctionName()
- if fnName == operators.LogicalAnd {
- for _, child := range children {
- if child.Kind() == ast.LiteralKind {
- return true
- }
- }
- }
- if fnName == operators.LogicalOr {
- for _, child := range children {
- if child.Kind() == ast.LiteralKind {
- return true
- }
- }
- }
- if fnName == operators.Conditional {
- cond := children[0]
- if cond.Kind() == ast.LiteralKind && cond.AsLiteral().Type() == types.BoolType {
- return true
- }
- }
- if fnName == operators.In {
- haystack := children[1]
- if haystack.Kind() == ast.ListKind && haystack.AsList().Size() == 0 {
- return true
- }
- needle := children[0]
- if needle.Kind() == ast.LiteralKind && haystack.Kind() == ast.ListKind {
- needleValue := needle.AsLiteral()
- list := haystack.AsList()
- for _, e := range list.Elements() {
- if e.Kind() == ast.LiteralKind && e.AsLiteral().Equal(needleValue) == types.True {
- return true
- }
- }
- }
- }
- // convert all other calls with constant arguments
- for _, child := range children {
- if !constantMatcher(child) {
- return false
- }
- }
- return true
-}
-
-func isNestedComprehension(e ast.NavigableExpr) bool {
- parent, found := e.Parent()
- for found {
- if parent.Kind() == ast.ComprehensionKind {
- return true
- }
- parent, found = parent.Parent()
- }
- return false
-}
-
-func aggregateLiteralMatcher(e ast.NavigableExpr) bool {
- return e.Kind() == ast.ListKind || e.Kind() == ast.MapKind || e.Kind() == ast.StructKind
-}
-
-var (
- constantMatcher = ast.ConstantValueMatcher()
-)
-
-const (
- defaultMaxConstantFoldIterations = 100
-)
diff --git a/vendor/github.com/google/cel-go/cel/inlining.go b/vendor/github.com/google/cel-go/cel/inlining.go
deleted file mode 100644
index 8c8335d3b..000000000
--- a/vendor/github.com/google/cel-go/cel/inlining.go
+++ /dev/null
@@ -1,240 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cel
-
-import (
- "github.com/google/cel-go/common/ast"
- "github.com/google/cel-go/common/containers"
- "github.com/google/cel-go/common/operators"
- "github.com/google/cel-go/common/overloads"
- "github.com/google/cel-go/common/types"
- "github.com/google/cel-go/common/types/traits"
-)
-
-// InlineVariable holds a variable name to be matched and an AST representing
-// the expression graph which should be used to replace it.
-type InlineVariable struct {
- name string
- alias string
- def *ast.AST
-}
-
-// Name returns the qualified variable or field selection to replace.
-func (v *InlineVariable) Name() string {
- return v.name
-}
-
-// Alias returns the alias to use when performing cel.bind() calls during inlining.
-func (v *InlineVariable) Alias() string {
- return v.alias
-}
-
-// Expr returns the inlined expression value.
-func (v *InlineVariable) Expr() ast.Expr {
- return v.def.Expr()
-}
-
-// Type indicates the inlined expression type.
-func (v *InlineVariable) Type() *Type {
- return v.def.GetType(v.def.Expr().ID())
-}
-
-// NewInlineVariable declares a variable name to be replaced by a checked expression.
-func NewInlineVariable(name string, definition *Ast) *InlineVariable {
- return NewInlineVariableWithAlias(name, name, definition)
-}
-
-// NewInlineVariableWithAlias declares a variable name to be replaced by a checked expression.
-// If the variable occurs more than once, the provided alias will be used to replace the expressions
-// where the variable name occurs.
-func NewInlineVariableWithAlias(name, alias string, definition *Ast) *InlineVariable {
- return &InlineVariable{name: name, alias: alias, def: definition.impl}
-}
-
-// NewInliningOptimizer creates and optimizer which replaces variables with expression definitions.
-//
-// If a variable occurs one time, the variable is replaced by the inline definition. If the
-// variable occurs more than once, the variable occurences are replaced by a cel.bind() call.
-func NewInliningOptimizer(inlineVars ...*InlineVariable) ASTOptimizer {
- return &inliningOptimizer{variables: inlineVars}
-}
-
-type inliningOptimizer struct {
- variables []*InlineVariable
-}
-
-func (opt *inliningOptimizer) Optimize(ctx *OptimizerContext, a *ast.AST) *ast.AST {
- root := ast.NavigateAST(a)
- for _, inlineVar := range opt.variables {
- matches := ast.MatchDescendants(root, opt.matchVariable(inlineVar.Name()))
- // Skip cases where the variable isn't in the expression graph
- if len(matches) == 0 {
- continue
- }
-
- // For a single match, do a direct replacement of the expression sub-graph.
- if len(matches) == 1 || !isBindable(matches, inlineVar.Expr(), inlineVar.Type()) {
- for _, match := range matches {
- // Copy the inlined AST expr and source info.
- copyExpr := copyASTAndMetadata(ctx, inlineVar.def)
- opt.inlineExpr(ctx, match, copyExpr, inlineVar.Type())
- }
- continue
- }
-
- // For multiple matches, find the least common ancestor (lca) and insert the
- // variable as a cel.bind() macro.
- var lca ast.NavigableExpr = root
- lcaAncestorCount := 0
- ancestors := map[int64]int{}
- for _, match := range matches {
- // Update the identifier matches with the provided alias.
- parent, found := match, true
- for found {
- ancestorCount, hasAncestor := ancestors[parent.ID()]
- if !hasAncestor {
- ancestors[parent.ID()] = 1
- parent, found = parent.Parent()
- continue
- }
- if lcaAncestorCount < ancestorCount || (lcaAncestorCount == ancestorCount && lca.Depth() < parent.Depth()) {
- lca = parent
- lcaAncestorCount = ancestorCount
- }
- ancestors[parent.ID()] = ancestorCount + 1
- parent, found = parent.Parent()
- }
- aliasExpr := ctx.NewIdent(inlineVar.Alias())
- opt.inlineExpr(ctx, match, aliasExpr, inlineVar.Type())
- }
-
- // Copy the inlined AST expr and source info.
- copyExpr := copyASTAndMetadata(ctx, inlineVar.def)
- // Update the least common ancestor by inserting a cel.bind() call to the alias.
- inlined, bindMacro := ctx.NewBindMacro(lca.ID(), inlineVar.Alias(), copyExpr, lca)
- opt.inlineExpr(ctx, lca, inlined, inlineVar.Type())
- ctx.sourceInfo.SetMacroCall(lca.ID(), bindMacro)
- }
- return a
-}
-
-// copyASTAndMetadata copies the input AST and propagates the macro metadata into the AST being
-// optimized.
-func copyASTAndMetadata(ctx *OptimizerContext, a *ast.AST) ast.Expr {
- copyExpr, copyInfo := ctx.CopyAST(a)
- // Add in the macro calls from the inlined AST
- for id, call := range copyInfo.MacroCalls() {
- ctx.sourceInfo.SetMacroCall(id, call)
- }
- return copyExpr
-}
-
-// inlineExpr replaces the current expression with the inlined one, unless the location of the inlining
-// happens within a presence test, e.g. has(a.b.c) -> inline alpha for a.b.c in which case an attempt is
-// made to determine whether the inlined value can be presence or existence tested.
-func (opt *inliningOptimizer) inlineExpr(ctx *OptimizerContext, prev ast.NavigableExpr, inlined ast.Expr, inlinedType *Type) {
- switch prev.Kind() {
- case ast.SelectKind:
- sel := prev.AsSelect()
- if !sel.IsTestOnly() {
- ctx.UpdateExpr(prev, inlined)
- return
- }
- opt.rewritePresenceExpr(ctx, prev, inlined, inlinedType)
- default:
- ctx.UpdateExpr(prev, inlined)
- }
-}
-
-// rewritePresenceExpr converts the inlined expression, when it occurs within a has() macro, to type-safe
-// expression appropriate for the inlined type, if possible.
-//
-// If the rewrite is not possible an error is reported at the inline expression site.
-func (opt *inliningOptimizer) rewritePresenceExpr(ctx *OptimizerContext, prev, inlined ast.Expr, inlinedType *Type) {
- // If the input inlined expression is not a select expression it won't work with the has()
- // macro. Attempt to rewrite the presence test in terms of the typed input, otherwise error.
- if inlined.Kind() == ast.SelectKind {
- presenceTest, hasMacro := ctx.NewHasMacro(prev.ID(), inlined)
- ctx.UpdateExpr(prev, presenceTest)
- ctx.sourceInfo.SetMacroCall(prev.ID(), hasMacro)
- return
- }
-
- ctx.sourceInfo.ClearMacroCall(prev.ID())
- if inlinedType.IsAssignableType(NullType) {
- ctx.UpdateExpr(prev,
- ctx.NewCall(operators.NotEquals,
- inlined,
- ctx.NewLiteral(types.NullValue),
- ))
- return
- }
- if inlinedType.HasTrait(traits.SizerType) {
- ctx.UpdateExpr(prev,
- ctx.NewCall(operators.NotEquals,
- ctx.NewMemberCall(overloads.Size, inlined),
- ctx.NewLiteral(types.IntZero),
- ))
- return
- }
- ctx.ReportErrorAtID(prev.ID(), "unable to inline expression type %v into presence test", inlinedType)
-}
-
-// isBindable indicates whether the inlined type can be used within a cel.bind() if the expression
-// being replaced occurs within a presence test. Value types with a size() method or field selection
-// support can be bound.
-//
-// In future iterations, support may also be added for indexer types which can be rewritten as an `in`
-// expression; however, this would imply a rewrite of the inlined expression that may not be necessary
-// in most cases.
-func isBindable(matches []ast.NavigableExpr, inlined ast.Expr, inlinedType *Type) bool {
- if inlinedType.IsAssignableType(NullType) ||
- inlinedType.HasTrait(traits.SizerType) ||
- inlinedType.HasTrait(traits.FieldTesterType) {
- return true
- }
- for _, m := range matches {
- if m.Kind() != ast.SelectKind {
- continue
- }
- sel := m.AsSelect()
- if sel.IsTestOnly() {
- return false
- }
- }
- return true
-}
-
-// matchVariable matches simple identifiers, select expressions, and presence test expressions
-// which match the (potentially) qualified variable name provided as input.
-//
-// Note, this function does not support inlining against select expressions which includes optional
-// field selection. This may be a future refinement.
-func (opt *inliningOptimizer) matchVariable(varName string) ast.ExprMatcher {
- return func(e ast.NavigableExpr) bool {
- if e.Kind() == ast.IdentKind && e.AsIdent() == varName {
- return true
- }
- if e.Kind() == ast.SelectKind {
- sel := e.AsSelect()
- // While the `ToQualifiedName` call could take the select directly, this
- // would skip presence tests from possible matches, which we would like
- // to include.
- qualName, found := containers.ToQualifiedName(sel.Operand())
- return found && qualName+"."+sel.FieldName() == varName
- }
- return false
- }
-}
diff --git a/vendor/github.com/google/cel-go/cel/io.go b/vendor/github.com/google/cel-go/cel/io.go
index 3133fb9d7..80f63140e 100644
--- a/vendor/github.com/google/cel-go/cel/io.go
+++ b/vendor/github.com/google/cel-go/cel/io.go
@@ -47,11 +47,17 @@ func CheckedExprToAst(checkedExpr *exprpb.CheckedExpr) *Ast {
//
// Prefer CheckedExprToAst if loading expressions from storage.
func CheckedExprToAstWithSource(checkedExpr *exprpb.CheckedExpr, src Source) (*Ast, error) {
- checked, err := ast.ToAST(checkedExpr)
+ checkedAST, err := ast.CheckedExprToCheckedAST(checkedExpr)
if err != nil {
return nil, err
}
- return &Ast{source: src, impl: checked}, nil
+ return &Ast{
+ expr: checkedAST.Expr,
+ info: checkedAST.SourceInfo,
+ source: src,
+ refMap: checkedAST.ReferenceMap,
+ typeMap: checkedAST.TypeMap,
+ }, nil
}
// AstToCheckedExpr converts an Ast to an protobuf CheckedExpr value.
@@ -61,7 +67,13 @@ func AstToCheckedExpr(a *Ast) (*exprpb.CheckedExpr, error) {
if !a.IsChecked() {
return nil, fmt.Errorf("cannot convert unchecked ast")
}
- return ast.ToProto(a.impl)
+ cAst := &ast.CheckedAST{
+ Expr: a.expr,
+ SourceInfo: a.info,
+ ReferenceMap: a.refMap,
+ TypeMap: a.typeMap,
+ }
+ return ast.CheckedASTToCheckedExpr(cAst)
}
// ParsedExprToAst converts a parsed expression proto message to an Ast.
@@ -77,12 +89,18 @@ func ParsedExprToAst(parsedExpr *exprpb.ParsedExpr) *Ast {
//
// Prefer ParsedExprToAst if loading expressions from storage.
func ParsedExprToAstWithSource(parsedExpr *exprpb.ParsedExpr, src Source) *Ast {
- info, _ := ast.ProtoToSourceInfo(parsedExpr.GetSourceInfo())
+ si := parsedExpr.GetSourceInfo()
+ if si == nil {
+ si = &exprpb.SourceInfo{}
+ }
if src == nil {
- src = common.NewInfoSource(parsedExpr.GetSourceInfo())
+ src = common.NewInfoSource(si)
+ }
+ return &Ast{
+ expr: parsedExpr.GetExpr(),
+ info: si,
+ source: src,
}
- e, _ := ast.ProtoToExpr(parsedExpr.GetExpr())
- return &Ast{source: src, impl: ast.NewAST(e, info)}
}
// AstToParsedExpr converts an Ast to an protobuf ParsedExpr value.
@@ -98,7 +116,9 @@ func AstToParsedExpr(a *Ast) (*exprpb.ParsedExpr, error) {
// Note, the conversion may not be an exact replica of the original expression, but will produce
// a string that is semantically equivalent and whose textual representation is stable.
func AstToString(a *Ast) (string, error) {
- return parser.Unparse(a.impl.Expr(), a.impl.SourceInfo())
+ expr := a.Expr()
+ info := a.SourceInfo()
+ return parser.Unparse(expr, info)
}
// RefValueToValue converts between ref.Val and api.expr.Value.
diff --git a/vendor/github.com/google/cel-go/cel/library.go b/vendor/github.com/google/cel-go/cel/library.go
index deddc14e5..4d232085c 100644
--- a/vendor/github.com/google/cel-go/cel/library.go
+++ b/vendor/github.com/google/cel-go/cel/library.go
@@ -20,7 +20,6 @@ import (
"strings"
"time"
- "github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/operators"
"github.com/google/cel-go/common/overloads"
"github.com/google/cel-go/common/stdlib"
@@ -29,6 +28,8 @@ import (
"github.com/google/cel-go/common/types/traits"
"github.com/google/cel-go/interpreter"
"github.com/google/cel-go/parser"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
const (
@@ -312,7 +313,7 @@ func (lib *optionalLib) CompileOptions() []EnvOption {
Types(types.OptionalType),
// Configure the optMap and optFlatMap macros.
- Macros(ReceiverMacro(optMapMacro, 2, optMap)),
+ Macros(NewReceiverMacro(optMapMacro, 2, optMap)),
// Global and member functions for working with optional values.
Function(optionalOfFunc,
@@ -373,7 +374,7 @@ func (lib *optionalLib) CompileOptions() []EnvOption {
Overload("optional_map_index_value", []*Type{OptionalType(mapTypeKV), paramTypeK}, optionalTypeV)),
}
if lib.version >= 1 {
- opts = append(opts, Macros(ReceiverMacro(optFlatMapMacro, 2, optFlatMap)))
+ opts = append(opts, Macros(NewReceiverMacro(optFlatMapMacro, 2, optFlatMap)))
}
return opts
}
@@ -385,57 +386,57 @@ func (lib *optionalLib) ProgramOptions() []ProgramOption {
}
}
-func optMap(meh MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *Error) {
+func optMap(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
varIdent := args[0]
varName := ""
- switch varIdent.Kind() {
- case ast.IdentKind:
- varName = varIdent.AsIdent()
+ switch varIdent.GetExprKind().(type) {
+ case *exprpb.Expr_IdentExpr:
+ varName = varIdent.GetIdentExpr().GetName()
default:
- return nil, meh.NewError(varIdent.ID(), "optMap() variable name must be a simple identifier")
+ return nil, meh.NewError(varIdent.GetId(), "optMap() variable name must be a simple identifier")
}
mapExpr := args[1]
- return meh.NewCall(
+ return meh.GlobalCall(
operators.Conditional,
- meh.NewMemberCall(hasValueFunc, target),
- meh.NewCall(optionalOfFunc,
- meh.NewComprehension(
- meh.NewList(),
+ meh.ReceiverCall(hasValueFunc, target),
+ meh.GlobalCall(optionalOfFunc,
+ meh.Fold(
unusedIterVar,
+ meh.NewList(),
varName,
- meh.NewMemberCall(valueFunc, target),
- meh.NewLiteral(types.False),
- meh.NewIdent(varName),
+ meh.ReceiverCall(valueFunc, target),
+ meh.LiteralBool(false),
+ meh.Ident(varName),
mapExpr,
),
),
- meh.NewCall(optionalNoneFunc),
+ meh.GlobalCall(optionalNoneFunc),
), nil
}
-func optFlatMap(meh MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *Error) {
+func optFlatMap(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
varIdent := args[0]
varName := ""
- switch varIdent.Kind() {
- case ast.IdentKind:
- varName = varIdent.AsIdent()
+ switch varIdent.GetExprKind().(type) {
+ case *exprpb.Expr_IdentExpr:
+ varName = varIdent.GetIdentExpr().GetName()
default:
- return nil, meh.NewError(varIdent.ID(), "optFlatMap() variable name must be a simple identifier")
+ return nil, meh.NewError(varIdent.GetId(), "optFlatMap() variable name must be a simple identifier")
}
mapExpr := args[1]
- return meh.NewCall(
+ return meh.GlobalCall(
operators.Conditional,
- meh.NewMemberCall(hasValueFunc, target),
- meh.NewComprehension(
- meh.NewList(),
+ meh.ReceiverCall(hasValueFunc, target),
+ meh.Fold(
unusedIterVar,
+ meh.NewList(),
varName,
- meh.NewMemberCall(valueFunc, target),
- meh.NewLiteral(types.False),
- meh.NewIdent(varName),
+ meh.ReceiverCall(valueFunc, target),
+ meh.LiteralBool(false),
+ meh.Ident(varName),
mapExpr,
),
- meh.NewCall(optionalNoneFunc),
+ meh.GlobalCall(optionalNoneFunc),
), nil
}
diff --git a/vendor/github.com/google/cel-go/cel/macro.go b/vendor/github.com/google/cel-go/cel/macro.go
index 4db1fd57a..1eb414c8b 100644
--- a/vendor/github.com/google/cel-go/cel/macro.go
+++ b/vendor/github.com/google/cel-go/cel/macro.go
@@ -15,11 +15,6 @@
package cel
import (
- "fmt"
-
- "github.com/google/cel-go/common"
- "github.com/google/cel-go/common/ast"
- "github.com/google/cel-go/common/types"
"github.com/google/cel-go/parser"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
@@ -31,14 +26,7 @@ import (
// a Macro should be created per arg-count or as a var arg macro.
type Macro = parser.Macro
-// MacroFactory defines an expansion function which converts a call and its arguments to a cel.Expr value.
-type MacroFactory = parser.MacroExpander
-
-// MacroExprFactory assists with the creation of Expr values in a manner which is consistent
-// the internal semantics and id generation behaviors of the parser and checker libraries.
-type MacroExprFactory = parser.ExprHelper
-
-// MacroExpander converts a call and its associated arguments into a protobuf Expr representation.
+// MacroExpander converts a call and its associated arguments into a new CEL abstract syntax tree.
//
// If the MacroExpander determines within the implementation that an expansion is not needed it may return
// a nil Expr value to indicate a non-match. However, if an expansion is to be performed, but the arguments
@@ -48,197 +36,48 @@ type MacroExprFactory = parser.ExprHelper
// and produces as output an Expr ast node.
//
// Note: when the Macro.IsReceiverStyle() method returns true, the target argument will be nil.
-type MacroExpander func(eh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error)
+type MacroExpander = parser.MacroExpander
// MacroExprHelper exposes helper methods for creating new expressions within a CEL abstract syntax tree.
-// ExprHelper assists with the manipulation of proto-based Expr values in a manner which is
-// consistent with the source position and expression id generation code leveraged by both
-// the parser and type-checker.
-type MacroExprHelper interface {
- // Copy the input expression with a brand new set of identifiers.
- Copy(*exprpb.Expr) *exprpb.Expr
-
- // LiteralBool creates an Expr value for a bool literal.
- LiteralBool(value bool) *exprpb.Expr
-
- // LiteralBytes creates an Expr value for a byte literal.
- LiteralBytes(value []byte) *exprpb.Expr
-
- // LiteralDouble creates an Expr value for double literal.
- LiteralDouble(value float64) *exprpb.Expr
-
- // LiteralInt creates an Expr value for an int literal.
- LiteralInt(value int64) *exprpb.Expr
-
- // LiteralString creates am Expr value for a string literal.
- LiteralString(value string) *exprpb.Expr
-
- // LiteralUint creates an Expr value for a uint literal.
- LiteralUint(value uint64) *exprpb.Expr
-
- // NewList creates a CreateList instruction where the list is comprised of the optional set
- // of elements provided as arguments.
- NewList(elems ...*exprpb.Expr) *exprpb.Expr
-
- // NewMap creates a CreateStruct instruction for a map where the map is comprised of the
- // optional set of key, value entries.
- NewMap(entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr
-
- // NewMapEntry creates a Map Entry for the key, value pair.
- NewMapEntry(key *exprpb.Expr, val *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry
-
- // NewObject creates a CreateStruct instruction for an object with a given type name and
- // optional set of field initializers.
- NewObject(typeName string, fieldInits ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr
-
- // NewObjectFieldInit creates a new Object field initializer from the field name and value.
- NewObjectFieldInit(field string, init *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry
-
- // Fold creates a fold comprehension instruction.
- //
- // - iterVar is the iteration variable name.
- // - iterRange represents the expression that resolves to a list or map where the elements or
- // keys (respectively) will be iterated over.
- // - accuVar is the accumulation variable name, typically parser.AccumulatorName.
- // - accuInit is the initial expression whose value will be set for the accuVar prior to
- // folding.
- // - condition is the expression to test to determine whether to continue folding.
- // - step is the expression to evaluation at the conclusion of a single fold iteration.
- // - result is the computation to evaluate at the conclusion of the fold.
- //
- // The accuVar should not shadow variable names that you would like to reference within the
- // environment in the step and condition expressions. Presently, the name __result__ is commonly
- // used by built-in macros but this may change in the future.
- Fold(iterVar string,
- iterRange *exprpb.Expr,
- accuVar string,
- accuInit *exprpb.Expr,
- condition *exprpb.Expr,
- step *exprpb.Expr,
- result *exprpb.Expr) *exprpb.Expr
-
- // Ident creates an identifier Expr value.
- Ident(name string) *exprpb.Expr
-
- // AccuIdent returns an accumulator identifier for use with comprehension results.
- AccuIdent() *exprpb.Expr
-
- // GlobalCall creates a function call Expr value for a global (free) function.
- GlobalCall(function string, args ...*exprpb.Expr) *exprpb.Expr
-
- // ReceiverCall creates a function call Expr value for a receiver-style function.
- ReceiverCall(function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr
-
- // PresenceTest creates a Select TestOnly Expr value for modelling has() semantics.
- PresenceTest(operand *exprpb.Expr, field string) *exprpb.Expr
-
- // Select create a field traversal Expr value.
- Select(operand *exprpb.Expr, field string) *exprpb.Expr
-
- // OffsetLocation returns the Location of the expression identifier.
- OffsetLocation(exprID int64) common.Location
-
- // NewError associates an error message with a given expression id.
- NewError(exprID int64, message string) *Error
-}
-
-// GlobalMacro creates a Macro for a global function with the specified arg count.
-func GlobalMacro(function string, argCount int, factory MacroFactory) Macro {
- return parser.NewGlobalMacro(function, argCount, factory)
-}
-
-// ReceiverMacro creates a Macro for a receiver function matching the specified arg count.
-func ReceiverMacro(function string, argCount int, factory MacroFactory) Macro {
- return parser.NewReceiverMacro(function, argCount, factory)
-}
-
-// GlobalVarArgMacro creates a Macro for a global function with a variable arg count.
-func GlobalVarArgMacro(function string, factory MacroFactory) Macro {
- return parser.NewGlobalVarArgMacro(function, factory)
-}
-
-// ReceiverVarArgMacro creates a Macro for a receiver function matching a variable arg count.
-func ReceiverVarArgMacro(function string, factory MacroFactory) Macro {
- return parser.NewReceiverVarArgMacro(function, factory)
-}
+type MacroExprHelper = parser.ExprHelper
// NewGlobalMacro creates a Macro for a global function with the specified arg count.
-//
-// Deprecated: use GlobalMacro
func NewGlobalMacro(function string, argCount int, expander MacroExpander) Macro {
- expand := adaptingExpander{expander}
- return parser.NewGlobalMacro(function, argCount, expand.Expander)
+ return parser.NewGlobalMacro(function, argCount, expander)
}
// NewReceiverMacro creates a Macro for a receiver function matching the specified arg count.
-//
-// Deprecated: use ReceiverMacro
func NewReceiverMacro(function string, argCount int, expander MacroExpander) Macro {
- expand := adaptingExpander{expander}
- return parser.NewReceiverMacro(function, argCount, expand.Expander)
+ return parser.NewReceiverMacro(function, argCount, expander)
}
// NewGlobalVarArgMacro creates a Macro for a global function with a variable arg count.
-//
-// Deprecated: use GlobalVarArgMacro
func NewGlobalVarArgMacro(function string, expander MacroExpander) Macro {
- expand := adaptingExpander{expander}
- return parser.NewGlobalVarArgMacro(function, expand.Expander)
+ return parser.NewGlobalVarArgMacro(function, expander)
}
// NewReceiverVarArgMacro creates a Macro for a receiver function matching a variable arg count.
-//
-// Deprecated: use ReceiverVarArgMacro
func NewReceiverVarArgMacro(function string, expander MacroExpander) Macro {
- expand := adaptingExpander{expander}
- return parser.NewReceiverVarArgMacro(function, expand.Expander)
+ return parser.NewReceiverVarArgMacro(function, expander)
}
// HasMacroExpander expands the input call arguments into a presence test, e.g. has(.field)
func HasMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
- ph, err := toParserHelper(meh)
- if err != nil {
- return nil, err
- }
- arg, err := adaptToExpr(args[0])
- if err != nil {
- return nil, err
- }
- if arg.Kind() == ast.SelectKind {
- s := arg.AsSelect()
- return adaptToProto(ph.NewPresenceTest(s.Operand(), s.FieldName()))
- }
- return nil, ph.NewError(arg.ID(), "invalid argument to has() macro")
+ return parser.MakeHas(meh, target, args)
}
// ExistsMacroExpander expands the input call arguments into a comprehension that returns true if any of the
// elements in the range match the predicate expressions:
// .exists(, )
func ExistsMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
- ph, err := toParserHelper(meh)
- if err != nil {
- return nil, err
- }
- out, err := parser.MakeExists(ph, mustAdaptToExpr(target), mustAdaptToExprs(args))
- if err != nil {
- return nil, err
- }
- return adaptToProto(out)
+ return parser.MakeExists(meh, target, args)
}
// ExistsOneMacroExpander expands the input call arguments into a comprehension that returns true if exactly
// one of the elements in the range match the predicate expressions:
// .exists_one(, )
func ExistsOneMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
- ph, err := toParserHelper(meh)
- if err != nil {
- return nil, err
- }
- out, err := parser.MakeExistsOne(ph, mustAdaptToExpr(target), mustAdaptToExprs(args))
- if err != nil {
- return nil, err
- }
- return adaptToProto(out)
+ return parser.MakeExistsOne(meh, target, args)
}
// MapMacroExpander expands the input call arguments into a comprehension that transforms each element in the
@@ -252,30 +91,14 @@ func ExistsOneMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*ex
// In the second form only iterVar values which return true when provided to the predicate expression
// are transformed.
func MapMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
- ph, err := toParserHelper(meh)
- if err != nil {
- return nil, err
- }
- out, err := parser.MakeMap(ph, mustAdaptToExpr(target), mustAdaptToExprs(args))
- if err != nil {
- return nil, err
- }
- return adaptToProto(out)
+ return parser.MakeMap(meh, target, args)
}
// FilterMacroExpander expands the input call arguments into a comprehension which produces a list which contains
// only elements which match the provided predicate expression:
// .filter(, )
func FilterMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
- ph, err := toParserHelper(meh)
- if err != nil {
- return nil, err
- }
- out, err := parser.MakeFilter(ph, mustAdaptToExpr(target), mustAdaptToExprs(args))
- if err != nil {
- return nil, err
- }
- return adaptToProto(out)
+ return parser.MakeFilter(meh, target, args)
}
var (
@@ -319,258 +142,3 @@ var (
// NoMacros provides an alias to an empty list of macros
NoMacros = []Macro{}
)
-
-type adaptingExpander struct {
- legacyExpander MacroExpander
-}
-
-func (adapt *adaptingExpander) Expander(eh parser.ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) {
- var legacyTarget *exprpb.Expr = nil
- var err *Error = nil
- if target != nil {
- legacyTarget, err = adaptToProto(target)
- if err != nil {
- return nil, err
- }
- }
- legacyArgs := make([]*exprpb.Expr, len(args))
- for i, arg := range args {
- legacyArgs[i], err = adaptToProto(arg)
- if err != nil {
- return nil, err
- }
- }
- ah := &adaptingHelper{modernHelper: eh}
- legacyExpr, err := adapt.legacyExpander(ah, legacyTarget, legacyArgs)
- if err != nil {
- return nil, err
- }
- ex, err := adaptToExpr(legacyExpr)
- if err != nil {
- return nil, err
- }
- return ex, nil
-}
-
-func wrapErr(id int64, message string, err error) *common.Error {
- return &common.Error{
- Location: common.NoLocation,
- Message: fmt.Sprintf("%s: %v", message, err),
- ExprID: id,
- }
-}
-
-type adaptingHelper struct {
- modernHelper parser.ExprHelper
-}
-
-// Copy the input expression with a brand new set of identifiers.
-func (ah *adaptingHelper) Copy(e *exprpb.Expr) *exprpb.Expr {
- return mustAdaptToProto(ah.modernHelper.Copy(mustAdaptToExpr(e)))
-}
-
-// LiteralBool creates an Expr value for a bool literal.
-func (ah *adaptingHelper) LiteralBool(value bool) *exprpb.Expr {
- return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Bool(value)))
-}
-
-// LiteralBytes creates an Expr value for a byte literal.
-func (ah *adaptingHelper) LiteralBytes(value []byte) *exprpb.Expr {
- return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Bytes(value)))
-}
-
-// LiteralDouble creates an Expr value for double literal.
-func (ah *adaptingHelper) LiteralDouble(value float64) *exprpb.Expr {
- return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Double(value)))
-}
-
-// LiteralInt creates an Expr value for an int literal.
-func (ah *adaptingHelper) LiteralInt(value int64) *exprpb.Expr {
- return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Int(value)))
-}
-
-// LiteralString creates am Expr value for a string literal.
-func (ah *adaptingHelper) LiteralString(value string) *exprpb.Expr {
- return mustAdaptToProto(ah.modernHelper.NewLiteral(types.String(value)))
-}
-
-// LiteralUint creates an Expr value for a uint literal.
-func (ah *adaptingHelper) LiteralUint(value uint64) *exprpb.Expr {
- return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Uint(value)))
-}
-
-// NewList creates a CreateList instruction where the list is comprised of the optional set
-// of elements provided as arguments.
-func (ah *adaptingHelper) NewList(elems ...*exprpb.Expr) *exprpb.Expr {
- return mustAdaptToProto(ah.modernHelper.NewList(mustAdaptToExprs(elems)...))
-}
-
-// NewMap creates a CreateStruct instruction for a map where the map is comprised of the
-// optional set of key, value entries.
-func (ah *adaptingHelper) NewMap(entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr {
- adaptedEntries := make([]ast.EntryExpr, len(entries))
- for i, e := range entries {
- adaptedEntries[i] = mustAdaptToEntryExpr(e)
- }
- return mustAdaptToProto(ah.modernHelper.NewMap(adaptedEntries...))
-}
-
-// NewMapEntry creates a Map Entry for the key, value pair.
-func (ah *adaptingHelper) NewMapEntry(key *exprpb.Expr, val *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry {
- return mustAdaptToProtoEntry(
- ah.modernHelper.NewMapEntry(mustAdaptToExpr(key), mustAdaptToExpr(val), optional))
-}
-
-// NewObject creates a CreateStruct instruction for an object with a given type name and
-// optional set of field initializers.
-func (ah *adaptingHelper) NewObject(typeName string, fieldInits ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr {
- adaptedEntries := make([]ast.EntryExpr, len(fieldInits))
- for i, e := range fieldInits {
- adaptedEntries[i] = mustAdaptToEntryExpr(e)
- }
- return mustAdaptToProto(ah.modernHelper.NewStruct(typeName, adaptedEntries...))
-}
-
-// NewObjectFieldInit creates a new Object field initializer from the field name and value.
-func (ah *adaptingHelper) NewObjectFieldInit(field string, init *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry {
- return mustAdaptToProtoEntry(
- ah.modernHelper.NewStructField(field, mustAdaptToExpr(init), optional))
-}
-
-// Fold creates a fold comprehension instruction.
-//
-// - iterVar is the iteration variable name.
-// - iterRange represents the expression that resolves to a list or map where the elements or
-// keys (respectively) will be iterated over.
-// - accuVar is the accumulation variable name, typically parser.AccumulatorName.
-// - accuInit is the initial expression whose value will be set for the accuVar prior to
-// folding.
-// - condition is the expression to test to determine whether to continue folding.
-// - step is the expression to evaluation at the conclusion of a single fold iteration.
-// - result is the computation to evaluate at the conclusion of the fold.
-//
-// The accuVar should not shadow variable names that you would like to reference within the
-// environment in the step and condition expressions. Presently, the name __result__ is commonly
-// used by built-in macros but this may change in the future.
-func (ah *adaptingHelper) Fold(iterVar string,
- iterRange *exprpb.Expr,
- accuVar string,
- accuInit *exprpb.Expr,
- condition *exprpb.Expr,
- step *exprpb.Expr,
- result *exprpb.Expr) *exprpb.Expr {
- return mustAdaptToProto(
- ah.modernHelper.NewComprehension(
- mustAdaptToExpr(iterRange),
- iterVar,
- accuVar,
- mustAdaptToExpr(accuInit),
- mustAdaptToExpr(condition),
- mustAdaptToExpr(step),
- mustAdaptToExpr(result),
- ),
- )
-}
-
-// Ident creates an identifier Expr value.
-func (ah *adaptingHelper) Ident(name string) *exprpb.Expr {
- return mustAdaptToProto(ah.modernHelper.NewIdent(name))
-}
-
-// AccuIdent returns an accumulator identifier for use with comprehension results.
-func (ah *adaptingHelper) AccuIdent() *exprpb.Expr {
- return mustAdaptToProto(ah.modernHelper.NewAccuIdent())
-}
-
-// GlobalCall creates a function call Expr value for a global (free) function.
-func (ah *adaptingHelper) GlobalCall(function string, args ...*exprpb.Expr) *exprpb.Expr {
- return mustAdaptToProto(ah.modernHelper.NewCall(function, mustAdaptToExprs(args)...))
-}
-
-// ReceiverCall creates a function call Expr value for a receiver-style function.
-func (ah *adaptingHelper) ReceiverCall(function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr {
- return mustAdaptToProto(
- ah.modernHelper.NewMemberCall(function, mustAdaptToExpr(target), mustAdaptToExprs(args)...))
-}
-
-// PresenceTest creates a Select TestOnly Expr value for modelling has() semantics.
-func (ah *adaptingHelper) PresenceTest(operand *exprpb.Expr, field string) *exprpb.Expr {
- op := mustAdaptToExpr(operand)
- return mustAdaptToProto(ah.modernHelper.NewPresenceTest(op, field))
-}
-
-// Select create a field traversal Expr value.
-func (ah *adaptingHelper) Select(operand *exprpb.Expr, field string) *exprpb.Expr {
- op := mustAdaptToExpr(operand)
- return mustAdaptToProto(ah.modernHelper.NewSelect(op, field))
-}
-
-// OffsetLocation returns the Location of the expression identifier.
-func (ah *adaptingHelper) OffsetLocation(exprID int64) common.Location {
- return ah.modernHelper.OffsetLocation(exprID)
-}
-
-// NewError associates an error message with a given expression id.
-func (ah *adaptingHelper) NewError(exprID int64, message string) *Error {
- return ah.modernHelper.NewError(exprID, message)
-}
-
-func mustAdaptToExprs(exprs []*exprpb.Expr) []ast.Expr {
- adapted := make([]ast.Expr, len(exprs))
- for i, e := range exprs {
- adapted[i] = mustAdaptToExpr(e)
- }
- return adapted
-}
-
-func mustAdaptToExpr(e *exprpb.Expr) ast.Expr {
- out, _ := adaptToExpr(e)
- return out
-}
-
-func adaptToExpr(e *exprpb.Expr) (ast.Expr, *Error) {
- if e == nil {
- return nil, nil
- }
- out, err := ast.ProtoToExpr(e)
- if err != nil {
- return nil, wrapErr(e.GetId(), "proto conversion failure", err)
- }
- return out, nil
-}
-
-func mustAdaptToEntryExpr(e *exprpb.Expr_CreateStruct_Entry) ast.EntryExpr {
- out, _ := ast.ProtoToEntryExpr(e)
- return out
-}
-
-func mustAdaptToProto(e ast.Expr) *exprpb.Expr {
- out, _ := adaptToProto(e)
- return out
-}
-
-func adaptToProto(e ast.Expr) (*exprpb.Expr, *Error) {
- if e == nil {
- return nil, nil
- }
- out, err := ast.ExprToProto(e)
- if err != nil {
- return nil, wrapErr(e.ID(), "expr conversion failure", err)
- }
- return out, nil
-}
-
-func mustAdaptToProtoEntry(e ast.EntryExpr) *exprpb.Expr_CreateStruct_Entry {
- out, _ := ast.EntryExprToProto(e)
- return out
-}
-
-func toParserHelper(meh MacroExprHelper) (parser.ExprHelper, *Error) {
- ah, ok := meh.(*adaptingHelper)
- if !ok {
- return nil, common.NewError(0,
- fmt.Sprintf("unsupported macro helper: %v (%T)", meh, meh),
- common.NoLocation)
- }
- return ah.modernHelper, nil
-}
diff --git a/vendor/github.com/google/cel-go/cel/optimizer.go b/vendor/github.com/google/cel-go/cel/optimizer.go
deleted file mode 100644
index 99aeeb815..000000000
--- a/vendor/github.com/google/cel-go/cel/optimizer.go
+++ /dev/null
@@ -1,482 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cel
-
-import (
- "github.com/google/cel-go/common"
- "github.com/google/cel-go/common/ast"
- "github.com/google/cel-go/common/types"
- "github.com/google/cel-go/common/types/ref"
-)
-
-// StaticOptimizer contains a sequence of ASTOptimizer instances which will be applied in order.
-//
-// The static optimizer normalizes expression ids and type-checking run between optimization
-// passes to ensure that the final optimized output is a valid expression with metadata consistent
-// with what would have been generated from a parsed and checked expression.
-//
-// Note: source position information is best-effort and likely wrong, but optimized expressions
-// should be suitable for calls to parser.Unparse.
-type StaticOptimizer struct {
- optimizers []ASTOptimizer
-}
-
-// NewStaticOptimizer creates a StaticOptimizer with a sequence of ASTOptimizer's to be applied
-// to a checked expression.
-func NewStaticOptimizer(optimizers ...ASTOptimizer) *StaticOptimizer {
- return &StaticOptimizer{
- optimizers: optimizers,
- }
-}
-
-// Optimize applies a sequence of optimizations to an Ast within a given environment.
-//
-// If issues are encountered, the Issues.Err() return value will be non-nil.
-func (opt *StaticOptimizer) Optimize(env *Env, a *Ast) (*Ast, *Issues) {
- // Make a copy of the AST to be optimized.
- optimized := ast.Copy(a.impl)
- ids := newIDGenerator(ast.MaxID(a.impl))
-
- // Create the optimizer context, could be pooled in the future.
- issues := NewIssues(common.NewErrors(a.Source()))
- baseFac := ast.NewExprFactory()
- exprFac := &optimizerExprFactory{
- idGenerator: ids,
- fac: baseFac,
- sourceInfo: optimized.SourceInfo(),
- }
- ctx := &OptimizerContext{
- optimizerExprFactory: exprFac,
- Env: env,
- Issues: issues,
- }
-
- // Apply the optimizations sequentially.
- for _, o := range opt.optimizers {
- optimized = o.Optimize(ctx, optimized)
- if issues.Err() != nil {
- return nil, issues
- }
- // Normalize expression id metadata including coordination with macro call metadata.
- freshIDGen := newIDGenerator(0)
- info := optimized.SourceInfo()
- expr := optimized.Expr()
- normalizeIDs(freshIDGen.renumberStable, expr, info)
- cleanupMacroRefs(expr, info)
-
- // Recheck the updated expression for any possible type-agreement or validation errors.
- parsed := &Ast{
- source: a.Source(),
- impl: ast.NewAST(expr, info)}
- checked, iss := ctx.Check(parsed)
- if iss.Err() != nil {
- return nil, iss
- }
- optimized = checked.impl
- }
-
- // Return the optimized result.
- return &Ast{
- source: a.Source(),
- impl: optimized,
- }, nil
-}
-
-// normalizeIDs ensures that the metadata present with an AST is reset in a manner such
-// that the ids within the expression correspond to the ids within macros.
-func normalizeIDs(idGen ast.IDGenerator, optimized ast.Expr, info *ast.SourceInfo) {
- optimized.RenumberIDs(idGen)
-
- if len(info.MacroCalls()) == 0 {
- return
- }
-
- // First, update the macro call ids themselves.
- callIDMap := map[int64]int64{}
- for id := range info.MacroCalls() {
- callIDMap[id] = idGen(id)
- }
- // Then update the macro call definitions which refer to these ids, but
- // ensure that the updates don't collide and remove macro entries which haven't
- // been visited / updated yet.
- type macroUpdate struct {
- id int64
- call ast.Expr
- }
- macroUpdates := []macroUpdate{}
- for oldID, newID := range callIDMap {
- call, found := info.GetMacroCall(oldID)
- if !found {
- continue
- }
- call.RenumberIDs(idGen)
- macroUpdates = append(macroUpdates, macroUpdate{id: newID, call: call})
- info.ClearMacroCall(oldID)
- }
- for _, u := range macroUpdates {
- info.SetMacroCall(u.id, u.call)
- }
-}
-
-func cleanupMacroRefs(expr ast.Expr, info *ast.SourceInfo) {
- if len(info.MacroCalls()) == 0 {
- return
- }
- // Sanitize the macro call references once the optimized expression has been computed
- // and the ids normalized between the expression and the macros.
- exprRefMap := make(map[int64]struct{})
- ast.PostOrderVisit(expr, ast.NewExprVisitor(func(e ast.Expr) {
- if e.ID() == 0 {
- return
- }
- exprRefMap[e.ID()] = struct{}{}
- }))
- // Update the macro call id references to ensure that macro pointers are
- // updated consistently across macros.
- for _, call := range info.MacroCalls() {
- ast.PostOrderVisit(call, ast.NewExprVisitor(func(e ast.Expr) {
- if e.ID() == 0 {
- return
- }
- exprRefMap[e.ID()] = struct{}{}
- }))
- }
- for id := range info.MacroCalls() {
- if _, found := exprRefMap[id]; !found {
- info.ClearMacroCall(id)
- }
- }
-}
-
-// newIDGenerator ensures that new ids are only created the first time they are encountered.
-func newIDGenerator(seed int64) *idGenerator {
- return &idGenerator{
- idMap: make(map[int64]int64),
- seed: seed,
- }
-}
-
-type idGenerator struct {
- idMap map[int64]int64
- seed int64
-}
-
-func (gen *idGenerator) nextID() int64 {
- gen.seed++
- return gen.seed
-}
-
-func (gen *idGenerator) renumberStable(id int64) int64 {
- if id == 0 {
- return 0
- }
- if newID, found := gen.idMap[id]; found {
- return newID
- }
- nextID := gen.nextID()
- gen.idMap[id] = nextID
- return nextID
-}
-
-// OptimizerContext embeds Env and Issues instances to make it easy to type-check and evaluate
-// subexpressions and report any errors encountered along the way. The context also embeds the
-// optimizerExprFactory which can be used to generate new sub-expressions with expression ids
-// consistent with the expectations of a parsed expression.
-type OptimizerContext struct {
- *Env
- *optimizerExprFactory
- *Issues
-}
-
-// ASTOptimizer applies an optimization over an AST and returns the optimized result.
-type ASTOptimizer interface {
- // Optimize optimizes a type-checked AST within an Environment and accumulates any issues.
- Optimize(*OptimizerContext, *ast.AST) *ast.AST
-}
-
-type optimizerExprFactory struct {
- *idGenerator
- fac ast.ExprFactory
- sourceInfo *ast.SourceInfo
-}
-
-// CopyAST creates a renumbered copy of `Expr` and `SourceInfo` values of the input AST, where the
-// renumbering uses the same scheme as the core optimizer logic ensuring there are no collisions
-// between copies.
-//
-// Use this method before attempting to merge the expression from AST into another.
-func (opt *optimizerExprFactory) CopyAST(a *ast.AST) (ast.Expr, *ast.SourceInfo) {
- idGen := newIDGenerator(opt.nextID())
- defer func() { opt.seed = idGen.nextID() }()
- copyExpr := opt.fac.CopyExpr(a.Expr())
- copyInfo := ast.CopySourceInfo(a.SourceInfo())
- normalizeIDs(idGen.renumberStable, copyExpr, copyInfo)
- return copyExpr, copyInfo
-}
-
-// NewBindMacro creates an AST expression representing the expanded bind() macro, and a macro expression
-// representing the unexpanded call signature to be inserted into the source info macro call metadata.
-func (opt *optimizerExprFactory) NewBindMacro(macroID int64, varName string, varInit, remaining ast.Expr) (astExpr, macroExpr ast.Expr) {
- varID := opt.nextID()
- remainingID := opt.nextID()
- remaining = opt.fac.CopyExpr(remaining)
- remaining.RenumberIDs(func(id int64) int64 {
- if id == macroID {
- return remainingID
- }
- return id
- })
- if call, exists := opt.sourceInfo.GetMacroCall(macroID); exists {
- opt.sourceInfo.SetMacroCall(remainingID, opt.fac.CopyExpr(call))
- }
-
- astExpr = opt.fac.NewComprehension(macroID,
- opt.fac.NewList(opt.nextID(), []ast.Expr{}, []int32{}),
- "#unused",
- varName,
- opt.fac.CopyExpr(varInit),
- opt.fac.NewLiteral(opt.nextID(), types.False),
- opt.fac.NewIdent(varID, varName),
- remaining)
-
- macroExpr = opt.fac.NewMemberCall(0, "bind",
- opt.fac.NewIdent(opt.nextID(), "cel"),
- opt.fac.NewIdent(varID, varName),
- opt.fac.CopyExpr(varInit),
- opt.fac.CopyExpr(remaining))
- opt.sanitizeMacro(macroID, macroExpr)
- return
-}
-
-// NewCall creates a global function call invocation expression.
-//
-// Example:
-//
-// countByField(list, fieldName)
-// - function: countByField
-// - args: [list, fieldName]
-func (opt *optimizerExprFactory) NewCall(function string, args ...ast.Expr) ast.Expr {
- return opt.fac.NewCall(opt.nextID(), function, args...)
-}
-
-// NewMemberCall creates a member function call invocation expression where 'target' is the receiver of the call.
-//
-// Example:
-//
-// list.countByField(fieldName)
-// - function: countByField
-// - target: list
-// - args: [fieldName]
-func (opt *optimizerExprFactory) NewMemberCall(function string, target ast.Expr, args ...ast.Expr) ast.Expr {
- return opt.fac.NewMemberCall(opt.nextID(), function, target, args...)
-}
-
-// NewIdent creates a new identifier expression.
-//
-// Examples:
-//
-// - simple_var_name
-// - qualified.subpackage.var_name
-func (opt *optimizerExprFactory) NewIdent(name string) ast.Expr {
- return opt.fac.NewIdent(opt.nextID(), name)
-}
-
-// NewLiteral creates a new literal expression value.
-//
-// The range of valid values for a literal generated during optimization is different than for expressions
-// generated via parsing / type-checking, as the ref.Val may be _any_ CEL value so long as the value can
-// be converted back to a literal-like form.
-func (opt *optimizerExprFactory) NewLiteral(value ref.Val) ast.Expr {
- return opt.fac.NewLiteral(opt.nextID(), value)
-}
-
-// NewList creates a list expression with a set of optional indices.
-//
-// Examples:
-//
-// [a, b]
-// - elems: [a, b]
-// - optIndices: []
-//
-// [a, ?b, ?c]
-// - elems: [a, b, c]
-// - optIndices: [1, 2]
-func (opt *optimizerExprFactory) NewList(elems []ast.Expr, optIndices []int32) ast.Expr {
- return opt.fac.NewList(opt.nextID(), elems, optIndices)
-}
-
-// NewMap creates a map from a set of entry expressions which contain a key and value expression.
-func (opt *optimizerExprFactory) NewMap(entries []ast.EntryExpr) ast.Expr {
- return opt.fac.NewMap(opt.nextID(), entries)
-}
-
-// NewMapEntry creates a map entry with a key and value expression and a flag to indicate whether the
-// entry is optional.
-//
-// Examples:
-//
-// {a: b}
-// - key: a
-// - value: b
-// - optional: false
-//
-// {?a: ?b}
-// - key: a
-// - value: b
-// - optional: true
-func (opt *optimizerExprFactory) NewMapEntry(key, value ast.Expr, isOptional bool) ast.EntryExpr {
- return opt.fac.NewMapEntry(opt.nextID(), key, value, isOptional)
-}
-
-// NewHasMacro generates a test-only select expression to be included within an AST and an unexpanded
-// has() macro call signature to be inserted into the source info macro call metadata.
-func (opt *optimizerExprFactory) NewHasMacro(macroID int64, s ast.Expr) (astExpr, macroExpr ast.Expr) {
- sel := s.AsSelect()
- astExpr = opt.fac.NewPresenceTest(macroID, sel.Operand(), sel.FieldName())
- macroExpr = opt.fac.NewCall(0, "has",
- opt.NewSelect(opt.fac.CopyExpr(sel.Operand()), sel.FieldName()))
- opt.sanitizeMacro(macroID, macroExpr)
- return
-}
-
-// NewSelect creates a select expression where a field value is selected from an operand.
-//
-// Example:
-//
-// msg.field_name
-// - operand: msg
-// - field: field_name
-func (opt *optimizerExprFactory) NewSelect(operand ast.Expr, field string) ast.Expr {
- return opt.fac.NewSelect(opt.nextID(), operand, field)
-}
-
-// NewStruct creates a new typed struct value with an set of field initializations.
-//
-// Example:
-//
-// pkg.TypeName{field: value}
-// - typeName: pkg.TypeName
-// - fields: [{field: value}]
-func (opt *optimizerExprFactory) NewStruct(typeName string, fields []ast.EntryExpr) ast.Expr {
- return opt.fac.NewStruct(opt.nextID(), typeName, fields)
-}
-
-// NewStructField creates a struct field initialization.
-//
-// Examples:
-//
-// {count: 3u}
-// - field: count
-// - value: 3u
-// - optional: false
-//
-// {?count: x}
-// - field: count
-// - value: x
-// - optional: true
-func (opt *optimizerExprFactory) NewStructField(field string, value ast.Expr, isOptional bool) ast.EntryExpr {
- return opt.fac.NewStructField(opt.nextID(), field, value, isOptional)
-}
-
-// UpdateExpr updates the target expression with the updated content while preserving macro metadata.
-//
-// There are four scenarios during the update to consider:
-// 1. target is not macro, updated is not macro
-// 2. target is macro, updated is not macro
-// 3. target is macro, updated is macro
-// 4. target is not macro, updated is macro
-//
-// When the target is a macro already, it may either be updated to a new macro function
-// body if the update is also a macro, or it may be removed altogether if the update is
-// a macro.
-//
-// When the update is a macro, then the target references within other macros must be
-// updated to point to the new updated macro. Otherwise, other macros which pointed to
-// the target body must be replaced with copies of the updated expression body.
-func (opt *optimizerExprFactory) UpdateExpr(target, updated ast.Expr) {
- // Update the expression
- target.SetKindCase(updated)
-
- // Early return if there's no macros present sa the source info reflects the
- // macro set from the target and updated expressions.
- if len(opt.sourceInfo.MacroCalls()) == 0 {
- return
- }
- // Determine whether the target expression was a macro.
- _, targetIsMacro := opt.sourceInfo.GetMacroCall(target.ID())
-
- // Determine whether the updated expression was a macro.
- updatedMacro, updatedIsMacro := opt.sourceInfo.GetMacroCall(updated.ID())
-
- if updatedIsMacro {
- // If the updated call was a macro, then updated id maps to target id,
- // and the updated macro moves into the target id slot.
- opt.sourceInfo.ClearMacroCall(updated.ID())
- opt.sourceInfo.SetMacroCall(target.ID(), updatedMacro)
- } else if targetIsMacro {
- // Otherwise if the target expr was a macro, but is no longer, clear
- // the macro reference.
- opt.sourceInfo.ClearMacroCall(target.ID())
- }
-
- // Punch holes in the updated value where macros references exist.
- macroExpr := opt.fac.CopyExpr(target)
- macroRefVisitor := ast.NewExprVisitor(func(e ast.Expr) {
- if _, exists := opt.sourceInfo.GetMacroCall(e.ID()); exists {
- e.SetKindCase(nil)
- }
- })
- ast.PostOrderVisit(macroExpr, macroRefVisitor)
-
- // Update any references to the expression within a macro
- macroVisitor := ast.NewExprVisitor(func(call ast.Expr) {
- // Update the target expression to point to the macro expression which
- // will be empty if the updated expression was a macro.
- if call.ID() == target.ID() {
- call.SetKindCase(opt.fac.CopyExpr(macroExpr))
- }
- // Update the macro call expression if it refers to the updated expression
- // id which has since been remapped to the target id.
- if call.ID() == updated.ID() {
- // Either ensure the expression is a macro reference or a populated with
- // the relevant sub-expression if the updated expr was not a macro.
- if updatedIsMacro {
- call.SetKindCase(nil)
- } else {
- call.SetKindCase(opt.fac.CopyExpr(macroExpr))
- }
- // Since SetKindCase does not renumber the id, ensure the references to
- // the old 'updated' id are mapped to the target id.
- call.RenumberIDs(func(id int64) int64 {
- if id == updated.ID() {
- return target.ID()
- }
- return id
- })
- }
- })
- for _, call := range opt.sourceInfo.MacroCalls() {
- ast.PostOrderVisit(call, macroVisitor)
- }
-}
-
-func (opt *optimizerExprFactory) sanitizeMacro(macroID int64, macroExpr ast.Expr) {
- macroRefVisitor := ast.NewExprVisitor(func(e ast.Expr) {
- if _, exists := opt.sourceInfo.GetMacroCall(e.ID()); exists && e.ID() != macroID {
- e.SetKindCase(nil)
- }
- })
- ast.PostOrderVisit(macroExpr, macroRefVisitor)
-}
diff --git a/vendor/github.com/google/cel-go/cel/options.go b/vendor/github.com/google/cel-go/cel/options.go
index 3c53e21af..05867730d 100644
--- a/vendor/github.com/google/cel-go/cel/options.go
+++ b/vendor/github.com/google/cel-go/cel/options.go
@@ -448,8 +448,6 @@ const (
OptTrackCost EvalOption = 1 << iota
// OptCheckStringFormat enables compile-time checking of string.format calls for syntax/cardinality.
- //
- // Deprecated: use ext.StringsValidateFormatCalls() as this option is now a no-op.
OptCheckStringFormat EvalOption = 1 << iota
)
diff --git a/vendor/github.com/google/cel-go/cel/program.go b/vendor/github.com/google/cel-go/cel/program.go
index ece9fbdaf..2dd72f750 100644
--- a/vendor/github.com/google/cel-go/cel/program.go
+++ b/vendor/github.com/google/cel-go/cel/program.go
@@ -19,6 +19,7 @@ import (
"fmt"
"sync"
+ celast "github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/interpreter"
@@ -151,7 +152,7 @@ func (p *prog) clone() *prog {
// ProgramOption values.
//
// If the program cannot be configured the prog will be nil, with a non-nil error response.
-func newProgram(e *Env, a *Ast, opts []ProgramOption) (Program, error) {
+func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) {
// Build the dispatcher, interpreter, and default program value.
disp := interpreter.NewDispatcher()
@@ -212,6 +213,34 @@ func newProgram(e *Env, a *Ast, opts []ProgramOption) (Program, error) {
if len(p.regexOptimizations) > 0 {
decorators = append(decorators, interpreter.CompileRegexConstants(p.regexOptimizations...))
}
+ // Enable compile-time checking of syntax/cardinality for string.format calls.
+ if p.evalOpts&OptCheckStringFormat == OptCheckStringFormat {
+ var isValidType func(id int64, validTypes ...ref.Type) (bool, error)
+ if ast.IsChecked() {
+ isValidType = func(id int64, validTypes ...ref.Type) (bool, error) {
+ t := ast.typeMap[id]
+ if t.Kind() == DynKind {
+ return true, nil
+ }
+ for _, vt := range validTypes {
+ k, err := typeValueToKind(vt)
+ if err != nil {
+ return false, err
+ }
+ if t.Kind() == k {
+ return true, nil
+ }
+ }
+ return false, nil
+ }
+ } else {
+ // if the AST isn't type-checked, short-circuit validation
+ isValidType = func(id int64, validTypes ...ref.Type) (bool, error) {
+ return true, nil
+ }
+ }
+ decorators = append(decorators, interpreter.InterpolateFormattedString(isValidType))
+ }
// Enable exhaustive eval, state tracking and cost tracking last since they require a factory.
if p.evalOpts&(OptExhaustiveEval|OptTrackState|OptTrackCost) != 0 {
@@ -245,16 +274,33 @@ func newProgram(e *Env, a *Ast, opts []ProgramOption) (Program, error) {
decs = append(decs, interpreter.Observe(observers...))
}
- return p.clone().initInterpretable(a, decs)
+ return p.clone().initInterpretable(ast, decs)
}
return newProgGen(factory)
}
- return p.initInterpretable(a, decorators)
+ return p.initInterpretable(ast, decorators)
}
-func (p *prog) initInterpretable(a *Ast, decs []interpreter.InterpretableDecorator) (*prog, error) {
- // When the AST has been exprAST it contains metadata that can be used to speed up program execution.
- interpretable, err := p.interpreter.NewInterpretable(a.impl, decs...)
+func (p *prog) initInterpretable(ast *Ast, decs []interpreter.InterpretableDecorator) (*prog, error) {
+ // Unchecked programs do not contain type and reference information and may be slower to execute.
+ if !ast.IsChecked() {
+ interpretable, err :=
+ p.interpreter.NewUncheckedInterpretable(ast.Expr(), decs...)
+ if err != nil {
+ return nil, err
+ }
+ p.interpretable = interpretable
+ return p, nil
+ }
+
+ // When the AST has been checked it contains metadata that can be used to speed up program execution.
+ checked := &celast.CheckedAST{
+ Expr: ast.Expr(),
+ SourceInfo: ast.SourceInfo(),
+ TypeMap: ast.typeMap,
+ ReferenceMap: ast.refMap,
+ }
+ interpretable, err := p.interpreter.NewInterpretable(checked, decs...)
if err != nil {
return nil, err
}
@@ -534,6 +580,8 @@ func (p *evalActivationPool) Put(value any) {
}
var (
+ emptyEvalState = interpreter.NewEvalState()
+
// activationPool is an internally managed pool of Activation values that wrap map[string]any inputs
activationPool = newEvalActivationPool()
diff --git a/vendor/github.com/google/cel-go/cel/validator.go b/vendor/github.com/google/cel-go/cel/validator.go
index b50c67452..78b311381 100644
--- a/vendor/github.com/google/cel-go/cel/validator.go
+++ b/vendor/github.com/google/cel-go/cel/validator.go
@@ -21,6 +21,8 @@ import (
"github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/overloads"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
const (
@@ -67,7 +69,7 @@ type ASTValidator interface {
//
// See individual validators for more information on their configuration keys and configuration
// properties.
- Validate(*Env, ValidatorConfig, *ast.AST, *Issues)
+ Validate(*Env, ValidatorConfig, *ast.CheckedAST, *Issues)
}
// ValidatorConfig provides an accessor method for querying validator configuration state.
@@ -178,7 +180,7 @@ func ValidateComprehensionNestingLimit(limit int) ASTValidator {
return nestingLimitValidator{limit: limit}
}
-type argChecker func(env *Env, call, arg ast.Expr) error
+type argChecker func(env *Env, call, arg ast.NavigableExpr) error
func newFormatValidator(funcName string, argNum int, check argChecker) formatValidator {
return formatValidator{
@@ -201,8 +203,8 @@ func (v formatValidator) Name() string {
// Validate searches the AST for uses of a given function name with a constant argument and performs a check
// on whether the argument is a valid literal value.
-func (v formatValidator) Validate(e *Env, _ ValidatorConfig, a *ast.AST, iss *Issues) {
- root := ast.NavigateAST(a)
+func (v formatValidator) Validate(e *Env, _ ValidatorConfig, a *ast.CheckedAST, iss *Issues) {
+ root := ast.NavigateCheckedAST(a)
funcCalls := ast.MatchDescendants(root, ast.FunctionMatcher(v.funcName))
for _, call := range funcCalls {
callArgs := call.AsCall().Args()
@@ -219,8 +221,8 @@ func (v formatValidator) Validate(e *Env, _ ValidatorConfig, a *ast.AST, iss *Is
}
}
-func evalCall(env *Env, call, arg ast.Expr) error {
- ast := &Ast{impl: ast.NewAST(call, ast.NewSourceInfo(nil))}
+func evalCall(env *Env, call, arg ast.NavigableExpr) error {
+ ast := ParsedExprToAst(&exprpb.ParsedExpr{Expr: call.ToExpr()})
prg, err := env.Program(ast)
if err != nil {
return err
@@ -229,7 +231,7 @@ func evalCall(env *Env, call, arg ast.Expr) error {
return err
}
-func compileRegex(_ *Env, _, arg ast.Expr) error {
+func compileRegex(_ *Env, _, arg ast.NavigableExpr) error {
pattern := arg.AsLiteral().Value().(string)
_, err := regexp.Compile(pattern)
return err
@@ -242,14 +244,25 @@ func (homogeneousAggregateLiteralValidator) Name() string {
return homogeneousValidatorName
}
+// Configure implements the ASTValidatorConfigurer interface and currently sets the list of standard
+// and exempt functions from homogeneous aggregate literal checks.
+//
+// TODO: Move this call into the string.format() ASTValidator once ported.
+func (homogeneousAggregateLiteralValidator) Configure(c MutableValidatorConfig) error {
+ emptyList := []string{}
+ exemptFunctions := c.GetOrDefault(HomogeneousAggregateLiteralExemptFunctions, emptyList).([]string)
+ exemptFunctions = append(exemptFunctions, "format")
+ return c.Set(HomogeneousAggregateLiteralExemptFunctions, exemptFunctions)
+}
+
// Validate validates that all lists and map literals have homogeneous types, i.e. don't contain dyn types.
//
// This validator makes an exception for list and map literals which occur at any level of nesting within
// string format calls.
-func (v homogeneousAggregateLiteralValidator) Validate(_ *Env, c ValidatorConfig, a *ast.AST, iss *Issues) {
+func (v homogeneousAggregateLiteralValidator) Validate(_ *Env, c ValidatorConfig, a *ast.CheckedAST, iss *Issues) {
var exemptedFunctions []string
exemptedFunctions = c.GetOrDefault(HomogeneousAggregateLiteralExemptFunctions, exemptedFunctions).([]string)
- root := ast.NavigateAST(a)
+ root := ast.NavigateCheckedAST(a)
listExprs := ast.MatchDescendants(root, ast.KindMatcher(ast.ListKind))
for _, listExpr := range listExprs {
if inExemptFunction(listExpr, exemptedFunctions) {
@@ -260,7 +273,7 @@ func (v homogeneousAggregateLiteralValidator) Validate(_ *Env, c ValidatorConfig
optIndices := l.OptionalIndices()
var elemType *Type
for i, e := range elements {
- et := a.GetType(e.ID())
+ et := e.Type()
if isOptionalIndex(i, optIndices) {
et = et.Parameters()[0]
}
@@ -283,10 +296,9 @@ func (v homogeneousAggregateLiteralValidator) Validate(_ *Env, c ValidatorConfig
entries := m.Entries()
var keyType, valType *Type
for _, e := range entries {
- mapEntry := e.AsMapEntry()
- key, val := mapEntry.Key(), mapEntry.Value()
- kt, vt := a.GetType(key.ID()), a.GetType(val.ID())
- if mapEntry.IsOptional() {
+ key, val := e.Key(), e.Value()
+ kt, vt := key.Type(), val.Type()
+ if e.IsOptional() {
vt = vt.Parameters()[0]
}
if keyType == nil && valType == nil {
@@ -304,8 +316,7 @@ func (v homogeneousAggregateLiteralValidator) Validate(_ *Env, c ValidatorConfig
}
func inExemptFunction(e ast.NavigableExpr, exemptFunctions []string) bool {
- parent, found := e.Parent()
- for found {
+ if parent, found := e.Parent(); found {
if parent.Kind() == ast.CallKind {
fnName := parent.AsCall().FunctionName()
for _, exempt := range exemptFunctions {
@@ -314,7 +325,9 @@ func inExemptFunction(e ast.NavigableExpr, exemptFunctions []string) bool {
}
}
}
- parent, found = parent.Parent()
+ if parent.Kind() == ast.ListKind || parent.Kind() == ast.MapKind {
+ return inExemptFunction(parent, exemptFunctions)
+ }
}
return false
}
@@ -340,8 +353,8 @@ func (v nestingLimitValidator) Name() string {
return "cel.lib.std.validate.comprehension_nesting_limit"
}
-func (v nestingLimitValidator) Validate(e *Env, _ ValidatorConfig, a *ast.AST, iss *Issues) {
- root := ast.NavigateAST(a)
+func (v nestingLimitValidator) Validate(e *Env, _ ValidatorConfig, a *ast.CheckedAST, iss *Issues) {
+ root := ast.NavigateCheckedAST(a)
comprehensions := ast.MatchDescendants(root, ast.KindMatcher(ast.ComprehensionKind))
if len(comprehensions) <= v.limit {
return
diff --git a/vendor/github.com/google/cel-go/checker/BUILD.bazel b/vendor/github.com/google/cel-go/checker/BUILD.bazel
index 997fa91d1..0459d3523 100644
--- a/vendor/github.com/google/cel-go/checker/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/checker/BUILD.bazel
@@ -60,6 +60,7 @@ go_test(
"//test:go_default_library",
"//test/proto2pb:go_default_library",
"//test/proto3pb:go_default_library",
+ "@com_github_antlr_antlr4_runtime_go_antlr_v4//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)
diff --git a/vendor/github.com/google/cel-go/checker/checker.go b/vendor/github.com/google/cel-go/checker/checker.go
index 57fb3ce5e..720e4fa96 100644
--- a/vendor/github.com/google/cel-go/checker/checker.go
+++ b/vendor/github.com/google/cel-go/checker/checker.go
@@ -18,7 +18,6 @@ package checker
import (
"fmt"
- "reflect"
"github.com/google/cel-go/common"
"github.com/google/cel-go/common/ast"
@@ -26,98 +25,139 @@ import (
"github.com/google/cel-go/common/decls"
"github.com/google/cel-go/common/operators"
"github.com/google/cel-go/common/types"
- "github.com/google/cel-go/common/types/ref"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
type checker struct {
- *ast.AST
- ast.ExprFactory
env *Env
errors *typeErrors
mappings *mapping
freeTypeVarCounter int
+ sourceInfo *exprpb.SourceInfo
+ types map[int64]*types.Type
+ references map[int64]*ast.ReferenceInfo
}
// Check performs type checking, giving a typed AST.
-//
-// The input is a parsed AST and an env which encapsulates type binding of variables,
-// declarations of built-in functions, descriptions of protocol buffers, and a registry for
-// errors.
-//
-// Returns a type-checked AST, which might not be usable if there are errors in the error
-// registry.
-func Check(parsed *ast.AST, source common.Source, env *Env) (*ast.AST, *common.Errors) {
+// The input is a ParsedExpr proto and an env which encapsulates
+// type binding of variables, declarations of built-in functions,
+// descriptions of protocol buffers, and a registry for errors.
+// Returns a CheckedExpr proto, which might not be usable if
+// there are errors in the error registry.
+func Check(parsedExpr *exprpb.ParsedExpr, source common.Source, env *Env) (*ast.CheckedAST, *common.Errors) {
errs := common.NewErrors(source)
- typeMap := make(map[int64]*types.Type)
- refMap := make(map[int64]*ast.ReferenceInfo)
c := checker{
- AST: ast.NewCheckedAST(parsed, typeMap, refMap),
- ExprFactory: ast.NewExprFactory(),
env: env,
errors: &typeErrors{errs: errs},
mappings: newMapping(),
freeTypeVarCounter: 0,
+ sourceInfo: parsedExpr.GetSourceInfo(),
+ types: make(map[int64]*types.Type),
+ references: make(map[int64]*ast.ReferenceInfo),
}
- c.check(c.Expr())
+ c.check(parsedExpr.GetExpr())
- // Walk over the final type map substituting any type parameters either by their bound value
- // or by DYN.
- for id, t := range c.TypeMap() {
- c.SetType(id, substitute(c.mappings, t, true))
+ // Walk over the final type map substituting any type parameters either by their bound value or
+ // by DYN.
+ m := make(map[int64]*types.Type)
+ for id, t := range c.types {
+ m[id] = substitute(c.mappings, t, true)
}
- return c.AST, errs
+
+ return &ast.CheckedAST{
+ Expr: parsedExpr.GetExpr(),
+ SourceInfo: parsedExpr.GetSourceInfo(),
+ TypeMap: m,
+ ReferenceMap: c.references,
+ }, errs
}
-func (c *checker) check(e ast.Expr) {
+func (c *checker) check(e *exprpb.Expr) {
if e == nil {
return
}
- switch e.Kind() {
- case ast.LiteralKind:
- literal := ref.Val(e.AsLiteral())
- switch literal.Type() {
- case types.BoolType, types.BytesType, types.DoubleType, types.IntType,
- types.NullType, types.StringType, types.UintType:
- c.setType(e, literal.Type().(*types.Type))
- default:
- c.errors.unexpectedASTType(e.ID(), c.location(e), "literal", literal.Type().TypeName())
+ switch e.GetExprKind().(type) {
+ case *exprpb.Expr_ConstExpr:
+ literal := e.GetConstExpr()
+ switch literal.GetConstantKind().(type) {
+ case *exprpb.Constant_BoolValue:
+ c.checkBoolLiteral(e)
+ case *exprpb.Constant_BytesValue:
+ c.checkBytesLiteral(e)
+ case *exprpb.Constant_DoubleValue:
+ c.checkDoubleLiteral(e)
+ case *exprpb.Constant_Int64Value:
+ c.checkInt64Literal(e)
+ case *exprpb.Constant_NullValue:
+ c.checkNullLiteral(e)
+ case *exprpb.Constant_StringValue:
+ c.checkStringLiteral(e)
+ case *exprpb.Constant_Uint64Value:
+ c.checkUint64Literal(e)
}
- case ast.IdentKind:
+ case *exprpb.Expr_IdentExpr:
c.checkIdent(e)
- case ast.SelectKind:
+ case *exprpb.Expr_SelectExpr:
c.checkSelect(e)
- case ast.CallKind:
+ case *exprpb.Expr_CallExpr:
c.checkCall(e)
- case ast.ListKind:
+ case *exprpb.Expr_ListExpr:
c.checkCreateList(e)
- case ast.MapKind:
- c.checkCreateMap(e)
- case ast.StructKind:
+ case *exprpb.Expr_StructExpr:
c.checkCreateStruct(e)
- case ast.ComprehensionKind:
+ case *exprpb.Expr_ComprehensionExpr:
c.checkComprehension(e)
default:
- c.errors.unexpectedASTType(e.ID(), c.location(e), "unspecified", reflect.TypeOf(e).Name())
+ c.errors.unexpectedASTType(e.GetId(), c.location(e), e)
}
}
-func (c *checker) checkIdent(e ast.Expr) {
- identName := e.AsIdent()
+func (c *checker) checkInt64Literal(e *exprpb.Expr) {
+ c.setType(e, types.IntType)
+}
+
+func (c *checker) checkUint64Literal(e *exprpb.Expr) {
+ c.setType(e, types.UintType)
+}
+
+func (c *checker) checkStringLiteral(e *exprpb.Expr) {
+ c.setType(e, types.StringType)
+}
+
+func (c *checker) checkBytesLiteral(e *exprpb.Expr) {
+ c.setType(e, types.BytesType)
+}
+
+func (c *checker) checkDoubleLiteral(e *exprpb.Expr) {
+ c.setType(e, types.DoubleType)
+}
+
+func (c *checker) checkBoolLiteral(e *exprpb.Expr) {
+ c.setType(e, types.BoolType)
+}
+
+func (c *checker) checkNullLiteral(e *exprpb.Expr) {
+ c.setType(e, types.NullType)
+}
+
+func (c *checker) checkIdent(e *exprpb.Expr) {
+ identExpr := e.GetIdentExpr()
// Check to see if the identifier is declared.
- if ident := c.env.LookupIdent(identName); ident != nil {
+ if ident := c.env.LookupIdent(identExpr.GetName()); ident != nil {
c.setType(e, ident.Type())
c.setReference(e, ast.NewIdentReference(ident.Name(), ident.Value()))
// Overwrite the identifier with its fully qualified name.
- e.SetKindCase(c.NewIdent(e.ID(), ident.Name()))
+ identExpr.Name = ident.Name()
return
}
c.setType(e, types.ErrorType)
- c.errors.undeclaredReference(e.ID(), c.location(e), c.env.container.Name(), identName)
+ c.errors.undeclaredReference(e.GetId(), c.location(e), c.env.container.Name(), identExpr.GetName())
}
-func (c *checker) checkSelect(e ast.Expr) {
- sel := e.AsSelect()
+func (c *checker) checkSelect(e *exprpb.Expr) {
+ sel := e.GetSelectExpr()
// Before traversing down the tree, try to interpret as qualified name.
qname, found := containers.ToQualifiedName(e)
if found {
@@ -130,26 +170,31 @@ func (c *checker) checkSelect(e ast.Expr) {
// variable name.
c.setType(e, ident.Type())
c.setReference(e, ast.NewIdentReference(ident.Name(), ident.Value()))
- e.SetKindCase(c.NewIdent(e.ID(), ident.Name()))
+ identName := ident.Name()
+ e.ExprKind = &exprpb.Expr_IdentExpr{
+ IdentExpr: &exprpb.Expr_Ident{
+ Name: identName,
+ },
+ }
return
}
}
- resultType := c.checkSelectField(e, sel.Operand(), sel.FieldName(), false)
- if sel.IsTestOnly() {
+ resultType := c.checkSelectField(e, sel.GetOperand(), sel.GetField(), false)
+ if sel.TestOnly {
resultType = types.BoolType
}
c.setType(e, substitute(c.mappings, resultType, false))
}
-func (c *checker) checkOptSelect(e ast.Expr) {
+func (c *checker) checkOptSelect(e *exprpb.Expr) {
// Collect metadata related to the opt select call packaged by the parser.
- call := e.AsCall()
- operand := call.Args()[0]
- field := call.Args()[1]
+ call := e.GetCallExpr()
+ operand := call.GetArgs()[0]
+ field := call.GetArgs()[1]
fieldName, isString := maybeUnwrapString(field)
if !isString {
- c.errors.notAnOptionalFieldSelection(field.ID(), c.location(field), field)
+ c.errors.notAnOptionalFieldSelection(field.GetId(), c.location(field), field)
return
}
@@ -159,7 +204,7 @@ func (c *checker) checkOptSelect(e ast.Expr) {
c.setReference(e, ast.NewFunctionReference("select_optional_field"))
}
-func (c *checker) checkSelectField(e, operand ast.Expr, field string, optional bool) *types.Type {
+func (c *checker) checkSelectField(e, operand *exprpb.Expr, field string, optional bool) *types.Type {
// Interpret as field selection, first traversing down the operand.
c.check(operand)
operandType := substitute(c.mappings, c.getType(operand), false)
@@ -177,7 +222,7 @@ func (c *checker) checkSelectField(e, operand ast.Expr, field string, optional b
// Objects yield their field type declaration as the selection result type, but only if
// the field is defined.
messageType := targetType
- if fieldType, found := c.lookupFieldType(e.ID(), messageType.TypeName(), field); found {
+ if fieldType, found := c.lookupFieldType(e.GetId(), messageType.TypeName(), field); found {
resultType = fieldType
}
case types.TypeParamKind:
@@ -191,7 +236,7 @@ func (c *checker) checkSelectField(e, operand ast.Expr, field string, optional b
// Dynamic / error values are treated as DYN type. Errors are handled this way as well
// in order to allow forward progress on the check.
if !isDynOrError(targetType) {
- c.errors.typeDoesNotSupportFieldSelection(e.ID(), c.location(e), targetType)
+ c.errors.typeDoesNotSupportFieldSelection(e.GetId(), c.location(e), targetType)
}
resultType = types.DynType
}
@@ -203,34 +248,35 @@ func (c *checker) checkSelectField(e, operand ast.Expr, field string, optional b
return resultType
}
-func (c *checker) checkCall(e ast.Expr) {
+func (c *checker) checkCall(e *exprpb.Expr) {
// Note: similar logic exists within the `interpreter/planner.go`. If making changes here
// please consider the impact on planner.go and consolidate implementations or mirror code
// as appropriate.
- call := e.AsCall()
- fnName := call.FunctionName()
+ call := e.GetCallExpr()
+ fnName := call.GetFunction()
if fnName == operators.OptSelect {
c.checkOptSelect(e)
return
}
- args := call.Args()
+ args := call.GetArgs()
// Traverse arguments.
for _, arg := range args {
c.check(arg)
}
+ target := call.GetTarget()
// Regular static call with simple name.
- if !call.IsMemberFunction() {
+ if target == nil {
// Check for the existence of the function.
fn := c.env.LookupFunction(fnName)
if fn == nil {
- c.errors.undeclaredReference(e.ID(), c.location(e), c.env.container.Name(), fnName)
+ c.errors.undeclaredReference(e.GetId(), c.location(e), c.env.container.Name(), fnName)
c.setType(e, types.ErrorType)
return
}
// Overwrite the function name with its fully qualified resolved name.
- e.SetKindCase(c.NewCall(e.ID(), fn.Name(), args...))
+ call.Function = fn.Name()
// Check to see whether the overload resolves.
c.resolveOverloadOrError(e, fn, nil, args)
return
@@ -241,7 +287,6 @@ func (c *checker) checkCall(e ast.Expr) {
// target a.b.
//
// Check whether the target is a namespaced function name.
- target := call.Target()
qualifiedPrefix, maybeQualified := containers.ToQualifiedName(target)
if maybeQualified {
maybeQualifiedName := qualifiedPrefix + "." + fnName
@@ -250,14 +295,15 @@ func (c *checker) checkCall(e ast.Expr) {
// The function name is namespaced and so preserving the target operand would
// be an inaccurate representation of the desired evaluation behavior.
// Overwrite with fully-qualified resolved function name sans receiver target.
- e.SetKindCase(c.NewCall(e.ID(), fn.Name(), args...))
+ call.Target = nil
+ call.Function = fn.Name()
c.resolveOverloadOrError(e, fn, nil, args)
return
}
}
// Regular instance call.
- c.check(target)
+ c.check(call.Target)
fn := c.env.LookupFunction(fnName)
// Function found, attempt overload resolution.
if fn != nil {
@@ -266,11 +312,11 @@ func (c *checker) checkCall(e ast.Expr) {
}
// Function name not declared, record error.
c.setType(e, types.ErrorType)
- c.errors.undeclaredReference(e.ID(), c.location(e), c.env.container.Name(), fnName)
+ c.errors.undeclaredReference(e.GetId(), c.location(e), c.env.container.Name(), fnName)
}
func (c *checker) resolveOverloadOrError(
- e ast.Expr, fn *decls.FunctionDecl, target ast.Expr, args []ast.Expr) {
+ e *exprpb.Expr, fn *decls.FunctionDecl, target *exprpb.Expr, args []*exprpb.Expr) {
// Attempt to resolve the overload.
resolution := c.resolveOverload(e, fn, target, args)
// No such overload, error noted in the resolveOverload call, type recorded here.
@@ -284,7 +330,7 @@ func (c *checker) resolveOverloadOrError(
}
func (c *checker) resolveOverload(
- call ast.Expr, fn *decls.FunctionDecl, target ast.Expr, args []ast.Expr) *overloadResolution {
+ call *exprpb.Expr, fn *decls.FunctionDecl, target *exprpb.Expr, args []*exprpb.Expr) *overloadResolution {
var argTypes []*types.Type
if target != nil {
@@ -316,8 +362,8 @@ func (c *checker) resolveOverload(
for i, argType := range argTypes {
if !c.isAssignable(argType, types.BoolType) {
c.errors.typeMismatch(
- args[i].ID(),
- c.locationByID(args[i].ID()),
+ args[i].GetId(),
+ c.locationByID(args[i].GetId()),
types.BoolType,
argType)
resultType = types.ErrorType
@@ -362,29 +408,29 @@ func (c *checker) resolveOverload(
for i, argType := range argTypes {
argTypes[i] = substitute(c.mappings, argType, true)
}
- c.errors.noMatchingOverload(call.ID(), c.location(call), fn.Name(), argTypes, target != nil)
+ c.errors.noMatchingOverload(call.GetId(), c.location(call), fn.Name(), argTypes, target != nil)
return nil
}
return newResolution(checkedRef, resultType)
}
-func (c *checker) checkCreateList(e ast.Expr) {
- create := e.AsList()
+func (c *checker) checkCreateList(e *exprpb.Expr) {
+ create := e.GetListExpr()
var elemsType *types.Type
- optionalIndices := create.OptionalIndices()
+ optionalIndices := create.GetOptionalIndices()
optionals := make(map[int32]bool, len(optionalIndices))
for _, optInd := range optionalIndices {
optionals[optInd] = true
}
- for i, e := range create.Elements() {
+ for i, e := range create.GetElements() {
c.check(e)
elemType := c.getType(e)
if optionals[int32(i)] {
var isOptional bool
elemType, isOptional = maybeUnwrapOptional(elemType)
if !isOptional && !isDyn(elemType) {
- c.errors.typeMismatch(e.ID(), c.location(e), types.NewOptionalType(elemType), elemType)
+ c.errors.typeMismatch(e.GetId(), c.location(e), types.NewOptionalType(elemType), elemType)
}
}
elemsType = c.joinTypes(e, elemsType, elemType)
@@ -396,24 +442,32 @@ func (c *checker) checkCreateList(e ast.Expr) {
c.setType(e, types.NewListType(elemsType))
}
-func (c *checker) checkCreateMap(e ast.Expr) {
- mapVal := e.AsMap()
+func (c *checker) checkCreateStruct(e *exprpb.Expr) {
+ str := e.GetStructExpr()
+ if str.GetMessageName() != "" {
+ c.checkCreateMessage(e)
+ } else {
+ c.checkCreateMap(e)
+ }
+}
+
+func (c *checker) checkCreateMap(e *exprpb.Expr) {
+ mapVal := e.GetStructExpr()
var mapKeyType *types.Type
var mapValueType *types.Type
- for _, e := range mapVal.Entries() {
- entry := e.AsMapEntry()
- key := entry.Key()
+ for _, ent := range mapVal.GetEntries() {
+ key := ent.GetMapKey()
c.check(key)
mapKeyType = c.joinTypes(key, mapKeyType, c.getType(key))
- val := entry.Value()
+ val := ent.GetValue()
c.check(val)
valType := c.getType(val)
- if entry.IsOptional() {
+ if ent.GetOptionalEntry() {
var isOptional bool
valType, isOptional = maybeUnwrapOptional(valType)
if !isOptional && !isDyn(valType) {
- c.errors.typeMismatch(val.ID(), c.location(val), types.NewOptionalType(valType), valType)
+ c.errors.typeMismatch(val.GetId(), c.location(val), types.NewOptionalType(valType), valType)
}
}
mapValueType = c.joinTypes(val, mapValueType, valType)
@@ -426,28 +480,25 @@ func (c *checker) checkCreateMap(e ast.Expr) {
c.setType(e, types.NewMapType(mapKeyType, mapValueType))
}
-func (c *checker) checkCreateStruct(e ast.Expr) {
- msgVal := e.AsStruct()
+func (c *checker) checkCreateMessage(e *exprpb.Expr) {
+ msgVal := e.GetStructExpr()
// Determine the type of the message.
resultType := types.ErrorType
- ident := c.env.LookupIdent(msgVal.TypeName())
+ ident := c.env.LookupIdent(msgVal.GetMessageName())
if ident == nil {
c.errors.undeclaredReference(
- e.ID(), c.location(e), c.env.container.Name(), msgVal.TypeName())
+ e.GetId(), c.location(e), c.env.container.Name(), msgVal.GetMessageName())
c.setType(e, types.ErrorType)
return
}
// Ensure the type name is fully qualified in the AST.
typeName := ident.Name()
- if msgVal.TypeName() != typeName {
- e.SetKindCase(c.NewStruct(e.ID(), typeName, msgVal.Fields()))
- msgVal = e.AsStruct()
- }
- c.setReference(e, ast.NewIdentReference(typeName, nil))
+ msgVal.MessageName = typeName
+ c.setReference(e, ast.NewIdentReference(ident.Name(), nil))
identKind := ident.Type().Kind()
if identKind != types.ErrorKind {
if identKind != types.TypeKind {
- c.errors.notAType(e.ID(), c.location(e), ident.Type().DeclaredTypeName())
+ c.errors.notAType(e.GetId(), c.location(e), ident.Type().DeclaredTypeName())
} else {
resultType = ident.Type().Parameters()[0]
// Backwards compatibility test between well-known types and message types
@@ -458,7 +509,7 @@ func (c *checker) checkCreateStruct(e ast.Expr) {
} else if resultType.Kind() == types.StructKind {
typeName = resultType.DeclaredTypeName()
} else {
- c.errors.notAMessageType(e.ID(), c.location(e), resultType.DeclaredTypeName())
+ c.errors.notAMessageType(e.GetId(), c.location(e), resultType.DeclaredTypeName())
resultType = types.ErrorType
}
}
@@ -466,38 +517,37 @@ func (c *checker) checkCreateStruct(e ast.Expr) {
c.setType(e, resultType)
// Check the field initializers.
- for _, f := range msgVal.Fields() {
- field := f.AsStructField()
- fieldName := field.Name()
- value := field.Value()
+ for _, ent := range msgVal.GetEntries() {
+ field := ent.GetFieldKey()
+ value := ent.GetValue()
c.check(value)
fieldType := types.ErrorType
- ft, found := c.lookupFieldType(f.ID(), typeName, fieldName)
+ ft, found := c.lookupFieldType(ent.GetId(), typeName, field)
if found {
fieldType = ft
}
valType := c.getType(value)
- if field.IsOptional() {
+ if ent.GetOptionalEntry() {
var isOptional bool
valType, isOptional = maybeUnwrapOptional(valType)
if !isOptional && !isDyn(valType) {
- c.errors.typeMismatch(value.ID(), c.location(value), types.NewOptionalType(valType), valType)
+ c.errors.typeMismatch(value.GetId(), c.location(value), types.NewOptionalType(valType), valType)
}
}
if !c.isAssignable(fieldType, valType) {
- c.errors.fieldTypeMismatch(f.ID(), c.locationByID(f.ID()), fieldName, fieldType, valType)
+ c.errors.fieldTypeMismatch(ent.GetId(), c.locationByID(ent.GetId()), field, fieldType, valType)
}
}
}
-func (c *checker) checkComprehension(e ast.Expr) {
- comp := e.AsComprehension()
- c.check(comp.IterRange())
- c.check(comp.AccuInit())
- accuType := c.getType(comp.AccuInit())
- rangeType := substitute(c.mappings, c.getType(comp.IterRange()), false)
+func (c *checker) checkComprehension(e *exprpb.Expr) {
+ comp := e.GetComprehensionExpr()
+ c.check(comp.GetIterRange())
+ c.check(comp.GetAccuInit())
+ accuType := c.getType(comp.GetAccuInit())
+ rangeType := substitute(c.mappings, c.getType(comp.GetIterRange()), false)
var varType *types.Type
switch rangeType.Kind() {
@@ -514,32 +564,32 @@ func (c *checker) checkComprehension(e ast.Expr) {
// Set the range iteration variable to type DYN as well.
varType = types.DynType
default:
- c.errors.notAComprehensionRange(comp.IterRange().ID(), c.location(comp.IterRange()), rangeType)
+ c.errors.notAComprehensionRange(comp.GetIterRange().GetId(), c.location(comp.GetIterRange()), rangeType)
varType = types.ErrorType
}
// Create a scope for the comprehension since it has a local accumulation variable.
// This scope will contain the accumulation variable used to compute the result.
c.env = c.env.enterScope()
- c.env.AddIdents(decls.NewVariable(comp.AccuVar(), accuType))
+ c.env.AddIdents(decls.NewVariable(comp.GetAccuVar(), accuType))
// Create a block scope for the loop.
c.env = c.env.enterScope()
- c.env.AddIdents(decls.NewVariable(comp.IterVar(), varType))
+ c.env.AddIdents(decls.NewVariable(comp.GetIterVar(), varType))
// Check the variable references in the condition and step.
- c.check(comp.LoopCondition())
- c.assertType(comp.LoopCondition(), types.BoolType)
- c.check(comp.LoopStep())
- c.assertType(comp.LoopStep(), accuType)
+ c.check(comp.GetLoopCondition())
+ c.assertType(comp.GetLoopCondition(), types.BoolType)
+ c.check(comp.GetLoopStep())
+ c.assertType(comp.GetLoopStep(), accuType)
// Exit the loop's block scope before checking the result.
c.env = c.env.exitScope()
- c.check(comp.Result())
+ c.check(comp.GetResult())
// Exit the comprehension scope.
c.env = c.env.exitScope()
- c.setType(e, substitute(c.mappings, c.getType(comp.Result()), false))
+ c.setType(e, substitute(c.mappings, c.getType(comp.GetResult()), false))
}
// Checks compatibility of joined types, and returns the most general common type.
-func (c *checker) joinTypes(e ast.Expr, previous, current *types.Type) *types.Type {
+func (c *checker) joinTypes(e *exprpb.Expr, previous, current *types.Type) *types.Type {
if previous == nil {
return current
}
@@ -549,7 +599,7 @@ func (c *checker) joinTypes(e ast.Expr, previous, current *types.Type) *types.Ty
if c.dynAggregateLiteralElementTypesEnabled() {
return types.DynType
}
- c.errors.typeMismatch(e.ID(), c.location(e), previous, current)
+ c.errors.typeMismatch(e.GetId(), c.location(e), previous, current)
return types.ErrorType
}
@@ -583,41 +633,41 @@ func (c *checker) isAssignableList(l1, l2 []*types.Type) bool {
return false
}
-func maybeUnwrapString(e ast.Expr) (string, bool) {
- switch e.Kind() {
- case ast.LiteralKind:
- literal := e.AsLiteral()
- switch v := literal.(type) {
- case types.String:
- return string(v), true
+func maybeUnwrapString(e *exprpb.Expr) (string, bool) {
+ switch e.GetExprKind().(type) {
+ case *exprpb.Expr_ConstExpr:
+ literal := e.GetConstExpr()
+ switch literal.GetConstantKind().(type) {
+ case *exprpb.Constant_StringValue:
+ return literal.GetStringValue(), true
}
}
return "", false
}
-func (c *checker) setType(e ast.Expr, t *types.Type) {
- if old, found := c.TypeMap()[e.ID()]; found && !old.IsExactType(t) {
- c.errors.incompatibleType(e.ID(), c.location(e), e, old, t)
+func (c *checker) setType(e *exprpb.Expr, t *types.Type) {
+ if old, found := c.types[e.GetId()]; found && !old.IsExactType(t) {
+ c.errors.incompatibleType(e.GetId(), c.location(e), e, old, t)
return
}
- c.SetType(e.ID(), t)
+ c.types[e.GetId()] = t
}
-func (c *checker) getType(e ast.Expr) *types.Type {
- return c.TypeMap()[e.ID()]
+func (c *checker) getType(e *exprpb.Expr) *types.Type {
+ return c.types[e.GetId()]
}
-func (c *checker) setReference(e ast.Expr, r *ast.ReferenceInfo) {
- if old, found := c.ReferenceMap()[e.ID()]; found && !old.Equals(r) {
- c.errors.referenceRedefinition(e.ID(), c.location(e), e, old, r)
+func (c *checker) setReference(e *exprpb.Expr, r *ast.ReferenceInfo) {
+ if old, found := c.references[e.GetId()]; found && !old.Equals(r) {
+ c.errors.referenceRedefinition(e.GetId(), c.location(e), e, old, r)
return
}
- c.SetReference(e.ID(), r)
+ c.references[e.GetId()] = r
}
-func (c *checker) assertType(e ast.Expr, t *types.Type) {
+func (c *checker) assertType(e *exprpb.Expr, t *types.Type) {
if !c.isAssignable(t, c.getType(e)) {
- c.errors.typeMismatch(e.ID(), c.location(e), t, c.getType(e))
+ c.errors.typeMismatch(e.GetId(), c.location(e), t, c.getType(e))
}
}
@@ -633,12 +683,26 @@ func newResolution(r *ast.ReferenceInfo, t *types.Type) *overloadResolution {
}
}
-func (c *checker) location(e ast.Expr) common.Location {
- return c.locationByID(e.ID())
+func (c *checker) location(e *exprpb.Expr) common.Location {
+ return c.locationByID(e.GetId())
}
func (c *checker) locationByID(id int64) common.Location {
- return c.SourceInfo().GetStartLocation(id)
+ positions := c.sourceInfo.GetPositions()
+ var line = 1
+ if offset, found := positions[id]; found {
+ col := int(offset)
+ for _, lineOffset := range c.sourceInfo.GetLineOffsets() {
+ if lineOffset < offset {
+ line++
+ col = int(offset - lineOffset)
+ } else {
+ break
+ }
+ }
+ return common.NewLocation(line, col)
+ }
+ return common.NoLocation
}
func (c *checker) lookupFieldType(exprID int64, structType, fieldName string) (*types.Type, bool) {
diff --git a/vendor/github.com/google/cel-go/checker/cost.go b/vendor/github.com/google/cel-go/checker/cost.go
index 3470d0a3f..f232f30da 100644
--- a/vendor/github.com/google/cel-go/checker/cost.go
+++ b/vendor/github.com/google/cel-go/checker/cost.go
@@ -22,6 +22,8 @@ import (
"github.com/google/cel-go/common/overloads"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/parser"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
// WARNING: Any changes to cost calculations in this file require a corresponding change in interpreter/runtimecost.go
@@ -56,7 +58,7 @@ type AstNode interface {
// Type returns the deduced type of the AstNode.
Type() *types.Type
// Expr returns the expression of the AstNode.
- Expr() ast.Expr
+ Expr() *exprpb.Expr
// ComputedSize returns a size estimate of the AstNode derived from information available in the CEL expression.
// For constants and inline list and map declarations, the exact size is returned. For concatenated list, strings
// and bytes, the size is derived from the size estimates of the operands. nil is returned if there is no
@@ -67,7 +69,7 @@ type AstNode interface {
type astNode struct {
path []string
t *types.Type
- expr ast.Expr
+ expr *exprpb.Expr
derivedSize *SizeEstimate
}
@@ -79,7 +81,7 @@ func (e astNode) Type() *types.Type {
return e.t
}
-func (e astNode) Expr() ast.Expr {
+func (e astNode) Expr() *exprpb.Expr {
return e.expr
}
@@ -88,27 +90,29 @@ func (e astNode) ComputedSize() *SizeEstimate {
return e.derivedSize
}
var v uint64
- switch e.expr.Kind() {
- case ast.LiteralKind:
- switch ck := e.expr.AsLiteral().(type) {
- case types.String:
+ switch ek := e.expr.GetExprKind().(type) {
+ case *exprpb.Expr_ConstExpr:
+ switch ck := ek.ConstExpr.GetConstantKind().(type) {
+ case *exprpb.Constant_StringValue:
// converting to runes here is an O(n) operation, but
// this is consistent with how size is computed at runtime,
// and how the language definition defines string size
- v = uint64(len([]rune(ck)))
- case types.Bytes:
- v = uint64(len(ck))
- case types.Bool, types.Double, types.Duration,
- types.Int, types.Timestamp, types.Uint,
- types.Null:
+ v = uint64(len([]rune(ck.StringValue)))
+ case *exprpb.Constant_BytesValue:
+ v = uint64(len(ck.BytesValue))
+ case *exprpb.Constant_BoolValue, *exprpb.Constant_DoubleValue, *exprpb.Constant_DurationValue,
+ *exprpb.Constant_Int64Value, *exprpb.Constant_TimestampValue, *exprpb.Constant_Uint64Value,
+ *exprpb.Constant_NullValue:
v = uint64(1)
default:
return nil
}
- case ast.ListKind:
- v = uint64(e.expr.AsList().Size())
- case ast.MapKind:
- v = uint64(e.expr.AsMap().Size())
+ case *exprpb.Expr_ListExpr:
+ v = uint64(len(ek.ListExpr.GetElements()))
+ case *exprpb.Expr_StructExpr:
+ if ek.StructExpr.GetMessageName() == "" {
+ v = uint64(len(ek.StructExpr.GetEntries()))
+ }
default:
return nil
}
@@ -261,7 +265,7 @@ type coster struct {
iterRanges iterRangeScopes
// computedSizes tracks the computed sizes of call results.
computedSizes map[int64]SizeEstimate
- checkedAST *ast.AST
+ checkedAST *ast.CheckedAST
estimator CostEstimator
overloadEstimators map[string]FunctionEstimator
// presenceTestCost will either be a zero or one based on whether has() macros count against cost computations.
@@ -271,8 +275,8 @@ type coster struct {
// Use a stack of iterVar -> iterRange Expr Ids to handle shadowed variable names.
type iterRangeScopes map[string][]int64
-func (vs iterRangeScopes) push(varName string, expr ast.Expr) {
- vs[varName] = append(vs[varName], expr.ID())
+func (vs iterRangeScopes) push(varName string, expr *exprpb.Expr) {
+ vs[varName] = append(vs[varName], expr.GetId())
}
func (vs iterRangeScopes) pop(varName string) {
@@ -320,9 +324,9 @@ func OverloadCostEstimate(overloadID string, functionCoster FunctionEstimator) C
}
// Cost estimates the cost of the parsed and type checked CEL expression.
-func Cost(checked *ast.AST, estimator CostEstimator, opts ...CostOption) (CostEstimate, error) {
+func Cost(checker *ast.CheckedAST, estimator CostEstimator, opts ...CostOption) (CostEstimate, error) {
c := &coster{
- checkedAST: checked,
+ checkedAST: checker,
estimator: estimator,
overloadEstimators: map[string]FunctionEstimator{},
exprPath: map[int64][]string{},
@@ -336,30 +340,28 @@ func Cost(checked *ast.AST, estimator CostEstimator, opts ...CostOption) (CostEs
return CostEstimate{}, err
}
}
- return c.cost(checked.Expr()), nil
+ return c.cost(checker.Expr), nil
}
-func (c *coster) cost(e ast.Expr) CostEstimate {
+func (c *coster) cost(e *exprpb.Expr) CostEstimate {
if e == nil {
return CostEstimate{}
}
var cost CostEstimate
- switch e.Kind() {
- case ast.LiteralKind:
+ switch e.GetExprKind().(type) {
+ case *exprpb.Expr_ConstExpr:
cost = constCost
- case ast.IdentKind:
+ case *exprpb.Expr_IdentExpr:
cost = c.costIdent(e)
- case ast.SelectKind:
+ case *exprpb.Expr_SelectExpr:
cost = c.costSelect(e)
- case ast.CallKind:
+ case *exprpb.Expr_CallExpr:
cost = c.costCall(e)
- case ast.ListKind:
+ case *exprpb.Expr_ListExpr:
cost = c.costCreateList(e)
- case ast.MapKind:
- cost = c.costCreateMap(e)
- case ast.StructKind:
+ case *exprpb.Expr_StructExpr:
cost = c.costCreateStruct(e)
- case ast.ComprehensionKind:
+ case *exprpb.Expr_ComprehensionExpr:
cost = c.costComprehension(e)
default:
return CostEstimate{}
@@ -367,51 +369,53 @@ func (c *coster) cost(e ast.Expr) CostEstimate {
return cost
}
-func (c *coster) costIdent(e ast.Expr) CostEstimate {
- identName := e.AsIdent()
+func (c *coster) costIdent(e *exprpb.Expr) CostEstimate {
+ identExpr := e.GetIdentExpr()
+
// build and track the field path
- if iterRange, ok := c.iterRanges.peek(identName); ok {
- switch c.checkedAST.GetType(iterRange).Kind() {
+ if iterRange, ok := c.iterRanges.peek(identExpr.GetName()); ok {
+ switch c.checkedAST.TypeMap[iterRange].Kind() {
case types.ListKind:
c.addPath(e, append(c.exprPath[iterRange], "@items"))
case types.MapKind:
c.addPath(e, append(c.exprPath[iterRange], "@keys"))
}
} else {
- c.addPath(e, []string{identName})
+ c.addPath(e, []string{identExpr.GetName()})
}
return selectAndIdentCost
}
-func (c *coster) costSelect(e ast.Expr) CostEstimate {
- sel := e.AsSelect()
+func (c *coster) costSelect(e *exprpb.Expr) CostEstimate {
+ sel := e.GetSelectExpr()
var sum CostEstimate
- if sel.IsTestOnly() {
+ if sel.GetTestOnly() {
// recurse, but do not add any cost
// this is equivalent to how evalTestOnly increments the runtime cost counter
// but does not add any additional cost for the qualifier, except here we do
// the reverse (ident adds cost)
sum = sum.Add(c.presenceTestCost)
- sum = sum.Add(c.cost(sel.Operand()))
+ sum = sum.Add(c.cost(sel.GetOperand()))
return sum
}
- sum = sum.Add(c.cost(sel.Operand()))
- targetType := c.getType(sel.Operand())
+ sum = sum.Add(c.cost(sel.GetOperand()))
+ targetType := c.getType(sel.GetOperand())
switch targetType.Kind() {
case types.MapKind, types.StructKind, types.TypeParamKind:
sum = sum.Add(selectAndIdentCost)
}
// build and track the field path
- c.addPath(e, append(c.getPath(sel.Operand()), sel.FieldName()))
+ c.addPath(e, append(c.getPath(sel.GetOperand()), sel.GetField()))
return sum
}
-func (c *coster) costCall(e ast.Expr) CostEstimate {
- call := e.AsCall()
- args := call.Args()
+func (c *coster) costCall(e *exprpb.Expr) CostEstimate {
+ call := e.GetCallExpr()
+ target := call.GetTarget()
+ args := call.GetArgs()
var sum CostEstimate
@@ -422,20 +426,22 @@ func (c *coster) costCall(e ast.Expr) CostEstimate {
argTypes[i] = c.newAstNode(arg)
}
- overloadIDs := c.checkedAST.GetOverloadIDs(e.ID())
- if len(overloadIDs) == 0 {
+ ref := c.checkedAST.ReferenceMap[e.GetId()]
+ if ref == nil || len(ref.OverloadIDs) == 0 {
return CostEstimate{}
}
var targetType AstNode
- if call.IsMemberFunction() {
- sum = sum.Add(c.cost(call.Target()))
- targetType = c.newAstNode(call.Target())
+ if target != nil {
+ if call.Target != nil {
+ sum = sum.Add(c.cost(call.GetTarget()))
+ targetType = c.newAstNode(call.GetTarget())
+ }
}
// Pick a cost estimate range that covers all the overload cost estimation ranges
fnCost := CostEstimate{Min: uint64(math.MaxUint64), Max: 0}
var resultSize *SizeEstimate
- for _, overload := range overloadIDs {
- overloadCost := c.functionCost(call.FunctionName(), overload, &targetType, argTypes, argCosts)
+ for _, overload := range ref.OverloadIDs {
+ overloadCost := c.functionCost(call.GetFunction(), overload, &targetType, argTypes, argCosts)
fnCost = fnCost.Union(overloadCost.CostEstimate)
if overloadCost.ResultSize != nil {
if resultSize == nil {
@@ -458,54 +464,62 @@ func (c *coster) costCall(e ast.Expr) CostEstimate {
}
}
if resultSize != nil {
- c.computedSizes[e.ID()] = *resultSize
+ c.computedSizes[e.GetId()] = *resultSize
}
return sum.Add(fnCost)
}
-func (c *coster) costCreateList(e ast.Expr) CostEstimate {
- create := e.AsList()
+func (c *coster) costCreateList(e *exprpb.Expr) CostEstimate {
+ create := e.GetListExpr()
var sum CostEstimate
- for _, e := range create.Elements() {
+ for _, e := range create.GetElements() {
sum = sum.Add(c.cost(e))
}
return sum.Add(createListBaseCost)
}
-func (c *coster) costCreateMap(e ast.Expr) CostEstimate {
- mapVal := e.AsMap()
+func (c *coster) costCreateStruct(e *exprpb.Expr) CostEstimate {
+ str := e.GetStructExpr()
+ if str.MessageName != "" {
+ return c.costCreateMessage(e)
+ }
+ return c.costCreateMap(e)
+}
+
+func (c *coster) costCreateMap(e *exprpb.Expr) CostEstimate {
+ mapVal := e.GetStructExpr()
var sum CostEstimate
- for _, ent := range mapVal.Entries() {
- entry := ent.AsMapEntry()
- sum = sum.Add(c.cost(entry.Key()))
- sum = sum.Add(c.cost(entry.Value()))
+ for _, ent := range mapVal.GetEntries() {
+ key := ent.GetMapKey()
+ sum = sum.Add(c.cost(key))
+
+ sum = sum.Add(c.cost(ent.GetValue()))
}
return sum.Add(createMapBaseCost)
}
-func (c *coster) costCreateStruct(e ast.Expr) CostEstimate {
- msgVal := e.AsStruct()
+func (c *coster) costCreateMessage(e *exprpb.Expr) CostEstimate {
+ msgVal := e.GetStructExpr()
var sum CostEstimate
- for _, ent := range msgVal.Fields() {
- field := ent.AsStructField()
- sum = sum.Add(c.cost(field.Value()))
+ for _, ent := range msgVal.GetEntries() {
+ sum = sum.Add(c.cost(ent.GetValue()))
}
return sum.Add(createMessageBaseCost)
}
-func (c *coster) costComprehension(e ast.Expr) CostEstimate {
- comp := e.AsComprehension()
+func (c *coster) costComprehension(e *exprpb.Expr) CostEstimate {
+ comp := e.GetComprehensionExpr()
var sum CostEstimate
- sum = sum.Add(c.cost(comp.IterRange()))
- sum = sum.Add(c.cost(comp.AccuInit()))
+ sum = sum.Add(c.cost(comp.GetIterRange()))
+ sum = sum.Add(c.cost(comp.GetAccuInit()))
// Track the iterRange of each IterVar for field path construction
- c.iterRanges.push(comp.IterVar(), comp.IterRange())
- loopCost := c.cost(comp.LoopCondition())
- stepCost := c.cost(comp.LoopStep())
- c.iterRanges.pop(comp.IterVar())
- sum = sum.Add(c.cost(comp.Result()))
- rangeCnt := c.sizeEstimate(c.newAstNode(comp.IterRange()))
+ c.iterRanges.push(comp.GetIterVar(), comp.GetIterRange())
+ loopCost := c.cost(comp.GetLoopCondition())
+ stepCost := c.cost(comp.GetLoopStep())
+ c.iterRanges.pop(comp.GetIterVar())
+ sum = sum.Add(c.cost(comp.Result))
+ rangeCnt := c.sizeEstimate(c.newAstNode(comp.GetIterRange()))
rangeCost := rangeCnt.MultiplyByCost(stepCost.Add(loopCost))
sum = sum.Add(rangeCost)
@@ -657,26 +671,26 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args
return CallEstimate{CostEstimate: CostEstimate{Min: 1, Max: 1}.Add(argCostSum())}
}
-func (c *coster) getType(e ast.Expr) *types.Type {
- return c.checkedAST.GetType(e.ID())
+func (c *coster) getType(e *exprpb.Expr) *types.Type {
+ return c.checkedAST.TypeMap[e.GetId()]
}
-func (c *coster) getPath(e ast.Expr) []string {
- return c.exprPath[e.ID()]
+func (c *coster) getPath(e *exprpb.Expr) []string {
+ return c.exprPath[e.GetId()]
}
-func (c *coster) addPath(e ast.Expr, path []string) {
- c.exprPath[e.ID()] = path
+func (c *coster) addPath(e *exprpb.Expr, path []string) {
+ c.exprPath[e.GetId()] = path
}
-func (c *coster) newAstNode(e ast.Expr) *astNode {
+func (c *coster) newAstNode(e *exprpb.Expr) *astNode {
path := c.getPath(e)
if len(path) > 0 && path[0] == parser.AccumulatorName {
// only provide paths to root vars; omit accumulator vars
path = nil
}
var derivedSize *SizeEstimate
- if size, ok := c.computedSizes[e.ID()]; ok {
+ if size, ok := c.computedSizes[e.GetId()]; ok {
derivedSize = &size
}
return &astNode{
diff --git a/vendor/github.com/google/cel-go/checker/errors.go b/vendor/github.com/google/cel-go/checker/errors.go
index 8b3bf0b8b..c2b96498d 100644
--- a/vendor/github.com/google/cel-go/checker/errors.go
+++ b/vendor/github.com/google/cel-go/checker/errors.go
@@ -15,9 +15,13 @@
package checker
import (
+ "reflect"
+
"github.com/google/cel-go/common"
"github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/types"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
// typeErrors is a specialization of Errors.
@@ -30,9 +34,9 @@ func (e *typeErrors) fieldTypeMismatch(id int64, l common.Location, name string,
name, FormatCELType(field), FormatCELType(value))
}
-func (e *typeErrors) incompatibleType(id int64, l common.Location, ex ast.Expr, prev, next *types.Type) {
+func (e *typeErrors) incompatibleType(id int64, l common.Location, ex *exprpb.Expr, prev, next *types.Type) {
e.errs.ReportErrorAtID(id, l,
- "incompatible type already exists for expression: %v(%d) old:%v, new:%v", ex, ex.ID(), prev, next)
+ "incompatible type already exists for expression: %v(%d) old:%v, new:%v", ex, ex.GetId(), prev, next)
}
func (e *typeErrors) noMatchingOverload(id int64, l common.Location, name string, args []*types.Type, isInstance bool) {
@@ -45,7 +49,7 @@ func (e *typeErrors) notAComprehensionRange(id int64, l common.Location, t *type
FormatCELType(t))
}
-func (e *typeErrors) notAnOptionalFieldSelection(id int64, l common.Location, field ast.Expr) {
+func (e *typeErrors) notAnOptionalFieldSelection(id int64, l common.Location, field *exprpb.Expr) {
e.errs.ReportErrorAtID(id, l, "unsupported optional field selection: %v", field)
}
@@ -57,9 +61,9 @@ func (e *typeErrors) notAMessageType(id int64, l common.Location, typeName strin
e.errs.ReportErrorAtID(id, l, "'%s' is not a message type", typeName)
}
-func (e *typeErrors) referenceRedefinition(id int64, l common.Location, ex ast.Expr, prev, next *ast.ReferenceInfo) {
+func (e *typeErrors) referenceRedefinition(id int64, l common.Location, ex *exprpb.Expr, prev, next *ast.ReferenceInfo) {
e.errs.ReportErrorAtID(id, l,
- "reference already exists for expression: %v(%d) old:%v, new:%v", ex, ex.ID(), prev, next)
+ "reference already exists for expression: %v(%d) old:%v, new:%v", ex, ex.GetId(), prev, next)
}
func (e *typeErrors) typeDoesNotSupportFieldSelection(id int64, l common.Location, t *types.Type) {
@@ -83,6 +87,6 @@ func (e *typeErrors) unexpectedFailedResolution(id int64, l common.Location, typ
e.errs.ReportErrorAtID(id, l, "unexpected failed resolution of '%s'", typeName)
}
-func (e *typeErrors) unexpectedASTType(id int64, l common.Location, kind, typeName string) {
- e.errs.ReportErrorAtID(id, l, "unexpected %s type: %v", kind, typeName)
+func (e *typeErrors) unexpectedASTType(id int64, l common.Location, ex *exprpb.Expr) {
+ e.errs.ReportErrorAtID(id, l, "unrecognized ast type: %v", reflect.TypeOf(ex))
}
diff --git a/vendor/github.com/google/cel-go/checker/printer.go b/vendor/github.com/google/cel-go/checker/printer.go
index 7a3984f02..15cba06ee 100644
--- a/vendor/github.com/google/cel-go/checker/printer.go
+++ b/vendor/github.com/google/cel-go/checker/printer.go
@@ -17,40 +17,40 @@ package checker
import (
"sort"
- "github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/debug"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
type semanticAdorner struct {
- checked *ast.AST
+ checks *exprpb.CheckedExpr
}
var _ debug.Adorner = &semanticAdorner{}
func (a *semanticAdorner) GetMetadata(elem any) string {
result := ""
- e, isExpr := elem.(ast.Expr)
+ e, isExpr := elem.(*exprpb.Expr)
if !isExpr {
return result
}
- t := a.checked.TypeMap()[e.ID()]
+ t := a.checks.TypeMap[e.GetId()]
if t != nil {
result += "~"
- result += FormatCELType(t)
+ result += FormatCheckedType(t)
}
- switch e.Kind() {
- case ast.IdentKind,
- ast.CallKind,
- ast.ListKind,
- ast.StructKind,
- ast.SelectKind:
- if ref, found := a.checked.ReferenceMap()[e.ID()]; found {
- if len(ref.OverloadIDs) == 0 {
+ switch e.GetExprKind().(type) {
+ case *exprpb.Expr_IdentExpr,
+ *exprpb.Expr_CallExpr,
+ *exprpb.Expr_StructExpr,
+ *exprpb.Expr_SelectExpr:
+ if ref, found := a.checks.ReferenceMap[e.GetId()]; found {
+ if len(ref.GetOverloadId()) == 0 {
result += "^" + ref.Name
} else {
- sort.Strings(ref.OverloadIDs)
- for i, overload := range ref.OverloadIDs {
+ sort.Strings(ref.GetOverloadId())
+ for i, overload := range ref.GetOverloadId() {
if i == 0 {
result += "^"
} else {
@@ -68,7 +68,7 @@ func (a *semanticAdorner) GetMetadata(elem any) string {
// Print returns a string representation of the Expr message,
// annotated with types from the CheckedExpr. The Expr must
// be a sub-expression embedded in the CheckedExpr.
-func Print(e ast.Expr, checked *ast.AST) string {
- a := &semanticAdorner{checked: checked}
+func Print(e *exprpb.Expr, checks *exprpb.CheckedExpr) string {
+ a := &semanticAdorner{checks: checks}
return debug.ToAdornedDebugString(e, a)
}
diff --git a/vendor/github.com/google/cel-go/common/ast/BUILD.bazel b/vendor/github.com/google/cel-go/common/ast/BUILD.bazel
index c92a0f179..7269cdff5 100644
--- a/vendor/github.com/google/cel-go/common/ast/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/common/ast/BUILD.bazel
@@ -5,9 +5,7 @@ package(
"//cel:__subpackages__",
"//checker:__subpackages__",
"//common:__subpackages__",
- "//ext:__subpackages__",
"//interpreter:__subpackages__",
- "//parser:__subpackages__",
],
licenses = ["notice"], # Apache 2.0
)
@@ -16,14 +14,10 @@ go_library(
name = "go_default_library",
srcs = [
"ast.go",
- "conversion.go",
"expr.go",
- "factory.go",
- "navigable.go",
],
importpath = "github.com/google/cel-go/common/ast",
deps = [
- "//common:go_default_library",
"//common/types:go_default_library",
"//common/types/ref:go_default_library",
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
@@ -35,9 +29,7 @@ go_test(
name = "go_default_test",
srcs = [
"ast_test.go",
- "conversion_test.go",
"expr_test.go",
- "navigable_test.go",
],
embed = [
":go_default_library",
@@ -56,6 +48,5 @@ go_test(
"//test/proto3pb:go_default_library",
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
- "@org_golang_google_protobuf//encoding/prototext:go_default_library",
],
)
\ No newline at end of file
diff --git a/vendor/github.com/google/cel-go/common/ast/ast.go b/vendor/github.com/google/cel-go/common/ast/ast.go
index 4feddaa3a..b3c150793 100644
--- a/vendor/github.com/google/cel-go/common/ast/ast.go
+++ b/vendor/github.com/google/cel-go/common/ast/ast.go
@@ -16,355 +16,74 @@
package ast
import (
- "github.com/google/cel-go/common"
+ "fmt"
+
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
-)
-
-// AST contains a protobuf expression and source info along with CEL-native type and reference information.
-type AST struct {
- expr Expr
- sourceInfo *SourceInfo
- typeMap map[int64]*types.Type
- refMap map[int64]*ReferenceInfo
-}
-
-// Expr returns the root ast.Expr value in the AST.
-func (a *AST) Expr() Expr {
- if a == nil {
- return nilExpr
- }
- return a.expr
-}
-// SourceInfo returns the source metadata associated with the parse / type-check passes.
-func (a *AST) SourceInfo() *SourceInfo {
- if a == nil {
- return nil
- }
- return a.sourceInfo
-}
+ structpb "google.golang.org/protobuf/types/known/structpb"
-// GetType returns the type for the expression at the given id, if one exists, else types.DynType.
-func (a *AST) GetType(id int64) *types.Type {
- if t, found := a.TypeMap()[id]; found {
- return t
- }
- return types.DynType
-}
-
-// SetType sets the type of the expression node at the given id.
-func (a *AST) SetType(id int64, t *types.Type) {
- if a == nil {
- return
- }
- a.typeMap[id] = t
-}
-
-// TypeMap returns the map of expression ids to type-checked types.
-//
-// If the AST is not type-checked, the map will be empty.
-func (a *AST) TypeMap() map[int64]*types.Type {
- if a == nil {
- return map[int64]*types.Type{}
- }
- return a.typeMap
-}
-
-// GetOverloadIDs returns the set of overload function names for a given expression id.
-//
-// If the expression id is not a function call, or the AST is not type-checked, the result will be empty.
-func (a *AST) GetOverloadIDs(id int64) []string {
- if ref, found := a.ReferenceMap()[id]; found {
- return ref.OverloadIDs
- }
- return []string{}
-}
-
-// ReferenceMap returns the map of expression id to identifier, constant, and function references.
-func (a *AST) ReferenceMap() map[int64]*ReferenceInfo {
- if a == nil {
- return map[int64]*ReferenceInfo{}
- }
- return a.refMap
-}
-
-// SetReference adds a reference to the checked AST type map.
-func (a *AST) SetReference(id int64, r *ReferenceInfo) {
- if a == nil {
- return
- }
- a.refMap[id] = r
-}
-
-// IsChecked returns whether the AST is type-checked.
-func (a *AST) IsChecked() bool {
- return a != nil && len(a.TypeMap()) > 0
-}
-
-// NewAST creates a base AST instance with an ast.Expr and ast.SourceInfo value.
-func NewAST(e Expr, sourceInfo *SourceInfo) *AST {
- if e == nil {
- e = nilExpr
- }
- return &AST{
- expr: e,
- sourceInfo: sourceInfo,
- typeMap: make(map[int64]*types.Type),
- refMap: make(map[int64]*ReferenceInfo),
- }
-}
-
-// NewCheckedAST wraps an parsed AST and augments it with type and reference metadata.
-func NewCheckedAST(parsed *AST, typeMap map[int64]*types.Type, refMap map[int64]*ReferenceInfo) *AST {
- return &AST{
- expr: parsed.Expr(),
- sourceInfo: parsed.SourceInfo(),
- typeMap: typeMap,
- refMap: refMap,
- }
-}
-
-// Copy creates a deep copy of the Expr and SourceInfo values in the input AST.
-//
-// Copies of the Expr value are generated using an internal default ExprFactory.
-func Copy(a *AST) *AST {
- if a == nil {
- return nil
- }
- e := defaultFactory.CopyExpr(a.expr)
- if !a.IsChecked() {
- return NewAST(e, CopySourceInfo(a.SourceInfo()))
- }
- typesCopy := make(map[int64]*types.Type, len(a.typeMap))
- for id, t := range a.typeMap {
- typesCopy[id] = t
- }
- refsCopy := make(map[int64]*ReferenceInfo, len(a.refMap))
- for id, r := range a.refMap {
- refsCopy[id] = r
- }
- return NewCheckedAST(NewAST(e, CopySourceInfo(a.SourceInfo())), typesCopy, refsCopy)
-}
-
-// MaxID returns the upper-bound, non-inclusive, of ids present within the AST's Expr value.
-func MaxID(a *AST) int64 {
- visitor := &maxIDVisitor{maxID: 1}
- PostOrderVisit(a.Expr(), visitor)
- return visitor.maxID + 1
-}
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
-// NewSourceInfo creates a simple SourceInfo object from an input common.Source value.
-func NewSourceInfo(src common.Source) *SourceInfo {
- var lineOffsets []int32
- var desc string
- baseLine := int32(0)
- baseCol := int32(0)
- if src != nil {
- desc = src.Description()
- lineOffsets = src.LineOffsets()
- // Determine whether the source metadata should be computed relative
- // to a base line and column value. This can be determined by requesting
- // the location for offset 0 from the source object.
- if loc, found := src.OffsetLocation(0); found {
- baseLine = int32(loc.Line()) - 1
- baseCol = int32(loc.Column())
+// CheckedAST contains a protobuf expression and source info along with CEL-native type and reference information.
+type CheckedAST struct {
+ Expr *exprpb.Expr
+ SourceInfo *exprpb.SourceInfo
+ TypeMap map[int64]*types.Type
+ ReferenceMap map[int64]*ReferenceInfo
+}
+
+// CheckedASTToCheckedExpr converts a CheckedAST to a CheckedExpr protobouf.
+func CheckedASTToCheckedExpr(ast *CheckedAST) (*exprpb.CheckedExpr, error) {
+ refMap := make(map[int64]*exprpb.Reference, len(ast.ReferenceMap))
+ for id, ref := range ast.ReferenceMap {
+ r, err := ReferenceInfoToReferenceExpr(ref)
+ if err != nil {
+ return nil, err
}
+ refMap[id] = r
}
- return &SourceInfo{
- desc: desc,
- lines: lineOffsets,
- baseLine: baseLine,
- baseCol: baseCol,
- offsetRanges: make(map[int64]OffsetRange),
- macroCalls: make(map[int64]Expr),
- }
-}
-
-// CopySourceInfo creates a deep copy of the MacroCalls within the input SourceInfo.
-//
-// Copies of macro Expr values are generated using an internal default ExprFactory.
-func CopySourceInfo(info *SourceInfo) *SourceInfo {
- if info == nil {
- return nil
- }
- rangesCopy := make(map[int64]OffsetRange, len(info.offsetRanges))
- for id, off := range info.offsetRanges {
- rangesCopy[id] = off
- }
- callsCopy := make(map[int64]Expr, len(info.macroCalls))
- for id, call := range info.macroCalls {
- callsCopy[id] = defaultFactory.CopyExpr(call)
- }
- return &SourceInfo{
- syntax: info.syntax,
- desc: info.desc,
- lines: info.lines,
- baseLine: info.baseLine,
- baseCol: info.baseCol,
- offsetRanges: rangesCopy,
- macroCalls: callsCopy,
- }
-}
-
-// SourceInfo records basic information about the expression as a textual input and
-// as a parsed expression value.
-type SourceInfo struct {
- syntax string
- desc string
- lines []int32
- baseLine int32
- baseCol int32
- offsetRanges map[int64]OffsetRange
- macroCalls map[int64]Expr
-}
-
-// SyntaxVersion returns the syntax version associated with the text expression.
-func (s *SourceInfo) SyntaxVersion() string {
- if s == nil {
- return ""
- }
- return s.syntax
-}
-
-// Description provides information about where the expression came from.
-func (s *SourceInfo) Description() string {
- if s == nil {
- return ""
- }
- return s.desc
-}
-
-// LineOffsets returns a list of the 0-based character offsets in the input text where newlines appear.
-func (s *SourceInfo) LineOffsets() []int32 {
- if s == nil {
- return []int32{}
- }
- return s.lines
-}
-
-// MacroCalls returns a map of expression id to ast.Expr value where the id represents the expression
-// node where the macro was inserted into the AST, and the ast.Expr value represents the original call
-// signature which was replaced.
-func (s *SourceInfo) MacroCalls() map[int64]Expr {
- if s == nil {
- return map[int64]Expr{}
- }
- return s.macroCalls
-}
-
-// GetMacroCall returns the original ast.Expr value for the given expression if it was generated via
-// a macro replacement.
-//
-// Note, parsing options must be enabled to track macro calls before this method will return a value.
-func (s *SourceInfo) GetMacroCall(id int64) (Expr, bool) {
- e, found := s.MacroCalls()[id]
- return e, found
-}
-
-// SetMacroCall records a macro call at a specific location.
-func (s *SourceInfo) SetMacroCall(id int64, e Expr) {
- if s != nil {
- s.macroCalls[id] = e
- }
-}
-
-// ClearMacroCall removes the macro call at the given expression id.
-func (s *SourceInfo) ClearMacroCall(id int64) {
- if s != nil {
- delete(s.macroCalls, id)
- }
-}
-
-// OffsetRanges returns a map of expression id to OffsetRange values where the range indicates either:
-// the start and end position in the input stream where the expression occurs, or the start position
-// only. If the range only captures start position, the stop position of the range will be equal to
-// the start.
-func (s *SourceInfo) OffsetRanges() map[int64]OffsetRange {
- if s == nil {
- return map[int64]OffsetRange{}
- }
- return s.offsetRanges
-}
-
-// GetOffsetRange retrieves an OffsetRange for the given expression id if one exists.
-func (s *SourceInfo) GetOffsetRange(id int64) (OffsetRange, bool) {
- if s == nil {
- return OffsetRange{}, false
- }
- o, found := s.offsetRanges[id]
- return o, found
-}
-
-// SetOffsetRange sets the OffsetRange for the given expression id.
-func (s *SourceInfo) SetOffsetRange(id int64, o OffsetRange) {
- if s == nil {
- return
- }
- s.offsetRanges[id] = o
-}
-
-// GetStartLocation calculates the human-readable 1-based line and 0-based column of the first character
-// of the expression node at the id.
-func (s *SourceInfo) GetStartLocation(id int64) common.Location {
- if o, found := s.GetOffsetRange(id); found {
- line := 1
- col := int(o.Start)
- for _, lineOffset := range s.LineOffsets() {
- if lineOffset < o.Start {
- line++
- col = int(o.Start - lineOffset)
- } else {
- break
- }
+ typeMap := make(map[int64]*exprpb.Type, len(ast.TypeMap))
+ for id, typ := range ast.TypeMap {
+ t, err := types.TypeToExprType(typ)
+ if err != nil {
+ return nil, err
}
- return common.NewLocation(line, col)
- }
- return common.NoLocation
-}
-
-// GetStopLocation calculates the human-readable 1-based line and 0-based column of the last character for
-// the expression node at the given id.
-//
-// If the SourceInfo was generated from a serialized protobuf representation, the stop location will
-// be identical to the start location for the expression.
-func (s *SourceInfo) GetStopLocation(id int64) common.Location {
- if o, found := s.GetOffsetRange(id); found {
- line := 1
- col := int(o.Stop)
- for _, lineOffset := range s.LineOffsets() {
- if lineOffset < o.Stop {
- line++
- col = int(o.Stop - lineOffset)
- } else {
- break
- }
+ typeMap[id] = t
+ }
+ return &exprpb.CheckedExpr{
+ Expr: ast.Expr,
+ SourceInfo: ast.SourceInfo,
+ ReferenceMap: refMap,
+ TypeMap: typeMap,
+ }, nil
+}
+
+// CheckedExprToCheckedAST converts a CheckedExpr protobuf to a CheckedAST instance.
+func CheckedExprToCheckedAST(checked *exprpb.CheckedExpr) (*CheckedAST, error) {
+ refMap := make(map[int64]*ReferenceInfo, len(checked.GetReferenceMap()))
+ for id, ref := range checked.GetReferenceMap() {
+ r, err := ReferenceExprToReferenceInfo(ref)
+ if err != nil {
+ return nil, err
}
- return common.NewLocation(line, col)
- }
- return common.NoLocation
-}
-
-// ComputeOffset calculates the 0-based character offset from a 1-based line and 0-based column.
-func (s *SourceInfo) ComputeOffset(line, col int32) int32 {
- if s != nil {
- line = s.baseLine + line
- col = s.baseCol + col
- }
- if line == 1 {
- return col
+ refMap[id] = r
}
- if line < 1 || line > int32(len(s.LineOffsets())) {
- return -1
+ typeMap := make(map[int64]*types.Type, len(checked.GetTypeMap()))
+ for id, typ := range checked.GetTypeMap() {
+ t, err := types.ExprTypeToType(typ)
+ if err != nil {
+ return nil, err
+ }
+ typeMap[id] = t
}
- offset := s.LineOffsets()[line-2]
- return offset + col
-}
-
-// OffsetRange captures the start and stop positions of a section of text in the input expression.
-type OffsetRange struct {
- Start int32
- Stop int32
+ return &CheckedAST{
+ Expr: checked.GetExpr(),
+ SourceInfo: checked.GetSourceInfo(),
+ ReferenceMap: refMap,
+ TypeMap: typeMap,
+ }, nil
}
// ReferenceInfo contains a CEL native representation of an identifier reference which may refer to
@@ -430,21 +149,78 @@ func (r *ReferenceInfo) Equals(other *ReferenceInfo) bool {
return true
}
-type maxIDVisitor struct {
- maxID int64
- *baseVisitor
+// ReferenceInfoToReferenceExpr converts a ReferenceInfo instance to a protobuf Reference suitable for serialization.
+func ReferenceInfoToReferenceExpr(info *ReferenceInfo) (*exprpb.Reference, error) {
+ c, err := ValToConstant(info.Value)
+ if err != nil {
+ return nil, err
+ }
+ return &exprpb.Reference{
+ Name: info.Name,
+ OverloadId: info.OverloadIDs,
+ Value: c,
+ }, nil
}
-// VisitExpr updates the max identifier if the incoming expression id is greater than previously observed.
-func (v *maxIDVisitor) VisitExpr(e Expr) {
- if v.maxID < e.ID() {
- v.maxID = e.ID()
+// ReferenceExprToReferenceInfo converts a protobuf Reference into a CEL-native ReferenceInfo instance.
+func ReferenceExprToReferenceInfo(ref *exprpb.Reference) (*ReferenceInfo, error) {
+ v, err := ConstantToVal(ref.GetValue())
+ if err != nil {
+ return nil, err
}
+ return &ReferenceInfo{
+ Name: ref.GetName(),
+ OverloadIDs: ref.GetOverloadId(),
+ Value: v,
+ }, nil
}
-// VisitEntryExpr updates the max identifier if the incoming entry id is greater than previously observed.
-func (v *maxIDVisitor) VisitEntryExpr(e EntryExpr) {
- if v.maxID < e.ID() {
- v.maxID = e.ID()
- }
+// ValToConstant converts a CEL-native ref.Val to a protobuf Constant.
+//
+// Only simple scalar types are supported by this method.
+func ValToConstant(v ref.Val) (*exprpb.Constant, error) {
+ if v == nil {
+ return nil, nil
+ }
+ switch v.Type() {
+ case types.BoolType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: v.Value().(bool)}}, nil
+ case types.BytesType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: v.Value().([]byte)}}, nil
+ case types.DoubleType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: v.Value().(float64)}}, nil
+ case types.IntType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: v.Value().(int64)}}, nil
+ case types.NullType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_NullValue{NullValue: structpb.NullValue_NULL_VALUE}}, nil
+ case types.StringType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: v.Value().(string)}}, nil
+ case types.UintType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: v.Value().(uint64)}}, nil
+ }
+ return nil, fmt.Errorf("unsupported constant kind: %v", v.Type())
+}
+
+// ConstantToVal converts a protobuf Constant to a CEL-native ref.Val.
+func ConstantToVal(c *exprpb.Constant) (ref.Val, error) {
+ if c == nil {
+ return nil, nil
+ }
+ switch c.GetConstantKind().(type) {
+ case *exprpb.Constant_BoolValue:
+ return types.Bool(c.GetBoolValue()), nil
+ case *exprpb.Constant_BytesValue:
+ return types.Bytes(c.GetBytesValue()), nil
+ case *exprpb.Constant_DoubleValue:
+ return types.Double(c.GetDoubleValue()), nil
+ case *exprpb.Constant_Int64Value:
+ return types.Int(c.GetInt64Value()), nil
+ case *exprpb.Constant_NullValue:
+ return types.NullValue, nil
+ case *exprpb.Constant_StringValue:
+ return types.String(c.GetStringValue()), nil
+ case *exprpb.Constant_Uint64Value:
+ return types.Uint(c.GetUint64Value()), nil
+ }
+ return nil, fmt.Errorf("unsupported constant kind: %v", c.GetConstantKind())
}
diff --git a/vendor/github.com/google/cel-go/common/ast/conversion.go b/vendor/github.com/google/cel-go/common/ast/conversion.go
deleted file mode 100644
index 8f2c4bd1e..000000000
--- a/vendor/github.com/google/cel-go/common/ast/conversion.go
+++ /dev/null
@@ -1,632 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ast
-
-import (
- "fmt"
-
- "github.com/google/cel-go/common/types"
- "github.com/google/cel-go/common/types/ref"
-
- structpb "google.golang.org/protobuf/types/known/structpb"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
-)
-
-// ToProto converts an AST to a CheckedExpr protobouf.
-func ToProto(ast *AST) (*exprpb.CheckedExpr, error) {
- refMap := make(map[int64]*exprpb.Reference, len(ast.ReferenceMap()))
- for id, ref := range ast.ReferenceMap() {
- r, err := ReferenceInfoToProto(ref)
- if err != nil {
- return nil, err
- }
- refMap[id] = r
- }
- typeMap := make(map[int64]*exprpb.Type, len(ast.TypeMap()))
- for id, typ := range ast.TypeMap() {
- t, err := types.TypeToExprType(typ)
- if err != nil {
- return nil, err
- }
- typeMap[id] = t
- }
- e, err := ExprToProto(ast.Expr())
- if err != nil {
- return nil, err
- }
- info, err := SourceInfoToProto(ast.SourceInfo())
- if err != nil {
- return nil, err
- }
- return &exprpb.CheckedExpr{
- Expr: e,
- SourceInfo: info,
- ReferenceMap: refMap,
- TypeMap: typeMap,
- }, nil
-}
-
-// ToAST converts a CheckedExpr protobuf to an AST instance.
-func ToAST(checked *exprpb.CheckedExpr) (*AST, error) {
- refMap := make(map[int64]*ReferenceInfo, len(checked.GetReferenceMap()))
- for id, ref := range checked.GetReferenceMap() {
- r, err := ProtoToReferenceInfo(ref)
- if err != nil {
- return nil, err
- }
- refMap[id] = r
- }
- typeMap := make(map[int64]*types.Type, len(checked.GetTypeMap()))
- for id, typ := range checked.GetTypeMap() {
- t, err := types.ExprTypeToType(typ)
- if err != nil {
- return nil, err
- }
- typeMap[id] = t
- }
- info, err := ProtoToSourceInfo(checked.GetSourceInfo())
- if err != nil {
- return nil, err
- }
- root, err := ProtoToExpr(checked.GetExpr())
- if err != nil {
- return nil, err
- }
- ast := NewCheckedAST(NewAST(root, info), typeMap, refMap)
- return ast, nil
-}
-
-// ProtoToExpr converts a protobuf Expr value to an ast.Expr value.
-func ProtoToExpr(e *exprpb.Expr) (Expr, error) {
- factory := NewExprFactory()
- return exprInternal(factory, e)
-}
-
-// ProtoToEntryExpr converts a protobuf struct/map entry to an ast.EntryExpr
-func ProtoToEntryExpr(e *exprpb.Expr_CreateStruct_Entry) (EntryExpr, error) {
- factory := NewExprFactory()
- switch e.GetKeyKind().(type) {
- case *exprpb.Expr_CreateStruct_Entry_FieldKey:
- return exprStructField(factory, e.GetId(), e)
- case *exprpb.Expr_CreateStruct_Entry_MapKey:
- return exprMapEntry(factory, e.GetId(), e)
- }
- return nil, fmt.Errorf("unsupported expr entry kind: %v", e)
-}
-
-func exprInternal(factory ExprFactory, e *exprpb.Expr) (Expr, error) {
- id := e.GetId()
- switch e.GetExprKind().(type) {
- case *exprpb.Expr_CallExpr:
- return exprCall(factory, id, e.GetCallExpr())
- case *exprpb.Expr_ComprehensionExpr:
- return exprComprehension(factory, id, e.GetComprehensionExpr())
- case *exprpb.Expr_ConstExpr:
- return exprLiteral(factory, id, e.GetConstExpr())
- case *exprpb.Expr_IdentExpr:
- return exprIdent(factory, id, e.GetIdentExpr())
- case *exprpb.Expr_ListExpr:
- return exprList(factory, id, e.GetListExpr())
- case *exprpb.Expr_SelectExpr:
- return exprSelect(factory, id, e.GetSelectExpr())
- case *exprpb.Expr_StructExpr:
- s := e.GetStructExpr()
- if s.GetMessageName() != "" {
- return exprStruct(factory, id, s)
- }
- return exprMap(factory, id, s)
- }
- return factory.NewUnspecifiedExpr(id), nil
-}
-
-func exprCall(factory ExprFactory, id int64, call *exprpb.Expr_Call) (Expr, error) {
- var err error
- args := make([]Expr, len(call.GetArgs()))
- for i, a := range call.GetArgs() {
- args[i], err = exprInternal(factory, a)
- if err != nil {
- return nil, err
- }
- }
- if call.GetTarget() == nil {
- return factory.NewCall(id, call.GetFunction(), args...), nil
- }
-
- target, err := exprInternal(factory, call.GetTarget())
- if err != nil {
- return nil, err
- }
- return factory.NewMemberCall(id, call.GetFunction(), target, args...), nil
-}
-
-func exprComprehension(factory ExprFactory, id int64, comp *exprpb.Expr_Comprehension) (Expr, error) {
- iterRange, err := exprInternal(factory, comp.GetIterRange())
- if err != nil {
- return nil, err
- }
- accuInit, err := exprInternal(factory, comp.GetAccuInit())
- if err != nil {
- return nil, err
- }
- loopCond, err := exprInternal(factory, comp.GetLoopCondition())
- if err != nil {
- return nil, err
- }
- loopStep, err := exprInternal(factory, comp.GetLoopStep())
- if err != nil {
- return nil, err
- }
- result, err := exprInternal(factory, comp.GetResult())
- if err != nil {
- return nil, err
- }
- return factory.NewComprehension(id,
- iterRange,
- comp.GetIterVar(),
- comp.GetAccuVar(),
- accuInit,
- loopCond,
- loopStep,
- result), nil
-}
-
-func exprLiteral(factory ExprFactory, id int64, c *exprpb.Constant) (Expr, error) {
- val, err := ConstantToVal(c)
- if err != nil {
- return nil, err
- }
- return factory.NewLiteral(id, val), nil
-}
-
-func exprIdent(factory ExprFactory, id int64, i *exprpb.Expr_Ident) (Expr, error) {
- return factory.NewIdent(id, i.GetName()), nil
-}
-
-func exprList(factory ExprFactory, id int64, l *exprpb.Expr_CreateList) (Expr, error) {
- elems := make([]Expr, len(l.GetElements()))
- for i, e := range l.GetElements() {
- elem, err := exprInternal(factory, e)
- if err != nil {
- return nil, err
- }
- elems[i] = elem
- }
- return factory.NewList(id, elems, l.GetOptionalIndices()), nil
-}
-
-func exprMap(factory ExprFactory, id int64, s *exprpb.Expr_CreateStruct) (Expr, error) {
- entries := make([]EntryExpr, len(s.GetEntries()))
- var err error
- for i, entry := range s.GetEntries() {
- entries[i], err = exprMapEntry(factory, entry.GetId(), entry)
- if err != nil {
- return nil, err
- }
- }
- return factory.NewMap(id, entries), nil
-}
-
-func exprMapEntry(factory ExprFactory, id int64, e *exprpb.Expr_CreateStruct_Entry) (EntryExpr, error) {
- k, err := exprInternal(factory, e.GetMapKey())
- if err != nil {
- return nil, err
- }
- v, err := exprInternal(factory, e.GetValue())
- if err != nil {
- return nil, err
- }
- return factory.NewMapEntry(id, k, v, e.GetOptionalEntry()), nil
-}
-
-func exprSelect(factory ExprFactory, id int64, s *exprpb.Expr_Select) (Expr, error) {
- op, err := exprInternal(factory, s.GetOperand())
- if err != nil {
- return nil, err
- }
- if s.GetTestOnly() {
- return factory.NewPresenceTest(id, op, s.GetField()), nil
- }
- return factory.NewSelect(id, op, s.GetField()), nil
-}
-
-func exprStruct(factory ExprFactory, id int64, s *exprpb.Expr_CreateStruct) (Expr, error) {
- fields := make([]EntryExpr, len(s.GetEntries()))
- var err error
- for i, field := range s.GetEntries() {
- fields[i], err = exprStructField(factory, field.GetId(), field)
- if err != nil {
- return nil, err
- }
- }
- return factory.NewStruct(id, s.GetMessageName(), fields), nil
-}
-
-func exprStructField(factory ExprFactory, id int64, f *exprpb.Expr_CreateStruct_Entry) (EntryExpr, error) {
- v, err := exprInternal(factory, f.GetValue())
- if err != nil {
- return nil, err
- }
- return factory.NewStructField(id, f.GetFieldKey(), v, f.GetOptionalEntry()), nil
-}
-
-// ExprToProto serializes an ast.Expr value to a protobuf Expr representation.
-func ExprToProto(e Expr) (*exprpb.Expr, error) {
- if e == nil {
- return &exprpb.Expr{}, nil
- }
- switch e.Kind() {
- case CallKind:
- return protoCall(e.ID(), e.AsCall())
- case ComprehensionKind:
- return protoComprehension(e.ID(), e.AsComprehension())
- case IdentKind:
- return protoIdent(e.ID(), e.AsIdent())
- case ListKind:
- return protoList(e.ID(), e.AsList())
- case LiteralKind:
- return protoLiteral(e.ID(), e.AsLiteral())
- case MapKind:
- return protoMap(e.ID(), e.AsMap())
- case SelectKind:
- return protoSelect(e.ID(), e.AsSelect())
- case StructKind:
- return protoStruct(e.ID(), e.AsStruct())
- case UnspecifiedExprKind:
- // Handle the case where a macro reference may be getting translated.
- // A nested macro 'pointer' is a non-zero expression id with no kind set.
- if e.ID() != 0 {
- return &exprpb.Expr{Id: e.ID()}, nil
- }
- return &exprpb.Expr{}, nil
- }
- return nil, fmt.Errorf("unsupported expr kind: %v", e)
-}
-
-// EntryExprToProto converts an ast.EntryExpr to a protobuf CreateStruct entry
-func EntryExprToProto(e EntryExpr) (*exprpb.Expr_CreateStruct_Entry, error) {
- switch e.Kind() {
- case MapEntryKind:
- return protoMapEntry(e.ID(), e.AsMapEntry())
- case StructFieldKind:
- return protoStructField(e.ID(), e.AsStructField())
- case UnspecifiedEntryExprKind:
- return &exprpb.Expr_CreateStruct_Entry{}, nil
- }
- return nil, fmt.Errorf("unsupported expr entry kind: %v", e)
-}
-
-func protoCall(id int64, call CallExpr) (*exprpb.Expr, error) {
- var err error
- var target *exprpb.Expr
- if call.IsMemberFunction() {
- target, err = ExprToProto(call.Target())
- if err != nil {
- return nil, err
- }
- }
- callArgs := call.Args()
- args := make([]*exprpb.Expr, len(callArgs))
- for i, a := range callArgs {
- args[i], err = ExprToProto(a)
- if err != nil {
- return nil, err
- }
- }
- return &exprpb.Expr{
- Id: id,
- ExprKind: &exprpb.Expr_CallExpr{
- CallExpr: &exprpb.Expr_Call{
- Function: call.FunctionName(),
- Target: target,
- Args: args,
- },
- },
- }, nil
-}
-
-func protoComprehension(id int64, comp ComprehensionExpr) (*exprpb.Expr, error) {
- iterRange, err := ExprToProto(comp.IterRange())
- if err != nil {
- return nil, err
- }
- accuInit, err := ExprToProto(comp.AccuInit())
- if err != nil {
- return nil, err
- }
- loopCond, err := ExprToProto(comp.LoopCondition())
- if err != nil {
- return nil, err
- }
- loopStep, err := ExprToProto(comp.LoopStep())
- if err != nil {
- return nil, err
- }
- result, err := ExprToProto(comp.Result())
- if err != nil {
- return nil, err
- }
- return &exprpb.Expr{
- Id: id,
- ExprKind: &exprpb.Expr_ComprehensionExpr{
- ComprehensionExpr: &exprpb.Expr_Comprehension{
- IterVar: comp.IterVar(),
- IterRange: iterRange,
- AccuVar: comp.AccuVar(),
- AccuInit: accuInit,
- LoopCondition: loopCond,
- LoopStep: loopStep,
- Result: result,
- },
- },
- }, nil
-}
-
-func protoIdent(id int64, name string) (*exprpb.Expr, error) {
- return &exprpb.Expr{
- Id: id,
- ExprKind: &exprpb.Expr_IdentExpr{
- IdentExpr: &exprpb.Expr_Ident{
- Name: name,
- },
- },
- }, nil
-}
-
-func protoList(id int64, list ListExpr) (*exprpb.Expr, error) {
- var err error
- elems := make([]*exprpb.Expr, list.Size())
- for i, e := range list.Elements() {
- elems[i], err = ExprToProto(e)
- if err != nil {
- return nil, err
- }
- }
- return &exprpb.Expr{
- Id: id,
- ExprKind: &exprpb.Expr_ListExpr{
- ListExpr: &exprpb.Expr_CreateList{
- Elements: elems,
- OptionalIndices: list.OptionalIndices(),
- },
- },
- }, nil
-}
-
-func protoLiteral(id int64, val ref.Val) (*exprpb.Expr, error) {
- c, err := ValToConstant(val)
- if err != nil {
- return nil, err
- }
- return &exprpb.Expr{
- Id: id,
- ExprKind: &exprpb.Expr_ConstExpr{
- ConstExpr: c,
- },
- }, nil
-}
-
-func protoMap(id int64, m MapExpr) (*exprpb.Expr, error) {
- entries := make([]*exprpb.Expr_CreateStruct_Entry, len(m.Entries()))
- var err error
- for i, e := range m.Entries() {
- entries[i], err = EntryExprToProto(e)
- if err != nil {
- return nil, err
- }
- }
- return &exprpb.Expr{
- Id: id,
- ExprKind: &exprpb.Expr_StructExpr{
- StructExpr: &exprpb.Expr_CreateStruct{
- Entries: entries,
- },
- },
- }, nil
-}
-
-func protoMapEntry(id int64, e MapEntry) (*exprpb.Expr_CreateStruct_Entry, error) {
- k, err := ExprToProto(e.Key())
- if err != nil {
- return nil, err
- }
- v, err := ExprToProto(e.Value())
- if err != nil {
- return nil, err
- }
- return &exprpb.Expr_CreateStruct_Entry{
- Id: id,
- KeyKind: &exprpb.Expr_CreateStruct_Entry_MapKey{
- MapKey: k,
- },
- Value: v,
- OptionalEntry: e.IsOptional(),
- }, nil
-}
-
-func protoSelect(id int64, s SelectExpr) (*exprpb.Expr, error) {
- op, err := ExprToProto(s.Operand())
- if err != nil {
- return nil, err
- }
- return &exprpb.Expr{
- Id: id,
- ExprKind: &exprpb.Expr_SelectExpr{
- SelectExpr: &exprpb.Expr_Select{
- Operand: op,
- Field: s.FieldName(),
- TestOnly: s.IsTestOnly(),
- },
- },
- }, nil
-}
-
-func protoStruct(id int64, s StructExpr) (*exprpb.Expr, error) {
- entries := make([]*exprpb.Expr_CreateStruct_Entry, len(s.Fields()))
- var err error
- for i, e := range s.Fields() {
- entries[i], err = EntryExprToProto(e)
- if err != nil {
- return nil, err
- }
- }
- return &exprpb.Expr{
- Id: id,
- ExprKind: &exprpb.Expr_StructExpr{
- StructExpr: &exprpb.Expr_CreateStruct{
- MessageName: s.TypeName(),
- Entries: entries,
- },
- },
- }, nil
-}
-
-func protoStructField(id int64, f StructField) (*exprpb.Expr_CreateStruct_Entry, error) {
- v, err := ExprToProto(f.Value())
- if err != nil {
- return nil, err
- }
- return &exprpb.Expr_CreateStruct_Entry{
- Id: id,
- KeyKind: &exprpb.Expr_CreateStruct_Entry_FieldKey{
- FieldKey: f.Name(),
- },
- Value: v,
- OptionalEntry: f.IsOptional(),
- }, nil
-}
-
-// SourceInfoToProto serializes an ast.SourceInfo value to a protobuf SourceInfo object.
-func SourceInfoToProto(info *SourceInfo) (*exprpb.SourceInfo, error) {
- if info == nil {
- return &exprpb.SourceInfo{}, nil
- }
- sourceInfo := &exprpb.SourceInfo{
- SyntaxVersion: info.SyntaxVersion(),
- Location: info.Description(),
- LineOffsets: info.LineOffsets(),
- Positions: make(map[int64]int32, len(info.OffsetRanges())),
- MacroCalls: make(map[int64]*exprpb.Expr, len(info.MacroCalls())),
- }
- for id, offset := range info.OffsetRanges() {
- sourceInfo.Positions[id] = offset.Start
- }
- for id, e := range info.MacroCalls() {
- call, err := ExprToProto(e)
- if err != nil {
- return nil, err
- }
- sourceInfo.MacroCalls[id] = call
- }
- return sourceInfo, nil
-}
-
-// ProtoToSourceInfo deserializes the protobuf into a native SourceInfo value.
-func ProtoToSourceInfo(info *exprpb.SourceInfo) (*SourceInfo, error) {
- sourceInfo := &SourceInfo{
- syntax: info.GetSyntaxVersion(),
- desc: info.GetLocation(),
- lines: info.GetLineOffsets(),
- offsetRanges: make(map[int64]OffsetRange, len(info.GetPositions())),
- macroCalls: make(map[int64]Expr, len(info.GetMacroCalls())),
- }
- for id, offset := range info.GetPositions() {
- sourceInfo.SetOffsetRange(id, OffsetRange{Start: offset, Stop: offset})
- }
- for id, e := range info.GetMacroCalls() {
- call, err := ProtoToExpr(e)
- if err != nil {
- return nil, err
- }
- sourceInfo.SetMacroCall(id, call)
- }
- return sourceInfo, nil
-}
-
-// ReferenceInfoToProto converts a ReferenceInfo instance to a protobuf Reference suitable for serialization.
-func ReferenceInfoToProto(info *ReferenceInfo) (*exprpb.Reference, error) {
- c, err := ValToConstant(info.Value)
- if err != nil {
- return nil, err
- }
- return &exprpb.Reference{
- Name: info.Name,
- OverloadId: info.OverloadIDs,
- Value: c,
- }, nil
-}
-
-// ProtoToReferenceInfo converts a protobuf Reference into a CEL-native ReferenceInfo instance.
-func ProtoToReferenceInfo(ref *exprpb.Reference) (*ReferenceInfo, error) {
- v, err := ConstantToVal(ref.GetValue())
- if err != nil {
- return nil, err
- }
- return &ReferenceInfo{
- Name: ref.GetName(),
- OverloadIDs: ref.GetOverloadId(),
- Value: v,
- }, nil
-}
-
-// ValToConstant converts a CEL-native ref.Val to a protobuf Constant.
-//
-// Only simple scalar types are supported by this method.
-func ValToConstant(v ref.Val) (*exprpb.Constant, error) {
- if v == nil {
- return nil, nil
- }
- switch v.Type() {
- case types.BoolType:
- return &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: v.Value().(bool)}}, nil
- case types.BytesType:
- return &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: v.Value().([]byte)}}, nil
- case types.DoubleType:
- return &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: v.Value().(float64)}}, nil
- case types.IntType:
- return &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: v.Value().(int64)}}, nil
- case types.NullType:
- return &exprpb.Constant{ConstantKind: &exprpb.Constant_NullValue{NullValue: structpb.NullValue_NULL_VALUE}}, nil
- case types.StringType:
- return &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: v.Value().(string)}}, nil
- case types.UintType:
- return &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: v.Value().(uint64)}}, nil
- }
- return nil, fmt.Errorf("unsupported constant kind: %v", v.Type())
-}
-
-// ConstantToVal converts a protobuf Constant to a CEL-native ref.Val.
-func ConstantToVal(c *exprpb.Constant) (ref.Val, error) {
- if c == nil {
- return nil, nil
- }
- switch c.GetConstantKind().(type) {
- case *exprpb.Constant_BoolValue:
- return types.Bool(c.GetBoolValue()), nil
- case *exprpb.Constant_BytesValue:
- return types.Bytes(c.GetBytesValue()), nil
- case *exprpb.Constant_DoubleValue:
- return types.Double(c.GetDoubleValue()), nil
- case *exprpb.Constant_Int64Value:
- return types.Int(c.GetInt64Value()), nil
- case *exprpb.Constant_NullValue:
- return types.NullValue, nil
- case *exprpb.Constant_StringValue:
- return types.String(c.GetStringValue()), nil
- case *exprpb.Constant_Uint64Value:
- return types.Uint(c.GetUint64Value()), nil
- }
- return nil, fmt.Errorf("unsupported constant kind: %v", c.GetConstantKind())
-}
diff --git a/vendor/github.com/google/cel-go/common/ast/expr.go b/vendor/github.com/google/cel-go/common/ast/expr.go
index c9d88bbaa..b63884a60 100644
--- a/vendor/github.com/google/cel-go/common/ast/expr.go
+++ b/vendor/github.com/google/cel-go/common/ast/expr.go
@@ -15,61 +15,168 @@
package ast
import (
+ "github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
// ExprKind represents the expression node kind.
type ExprKind int
const (
- // UnspecifiedExprKind represents an unset expression with no specified properties.
- UnspecifiedExprKind ExprKind = iota
-
- // CallKind represents a function call.
- CallKind
+ // UnspecifiedKind represents an unset expression with no specified properties.
+ UnspecifiedKind ExprKind = iota
- // ComprehensionKind represents a comprehension expression generated by a macro.
- ComprehensionKind
+ // LiteralKind represents a primitive scalar literal.
+ LiteralKind
// IdentKind represents a simple variable, constant, or type identifier.
IdentKind
+ // SelectKind represents a field selection expression.
+ SelectKind
+
+ // CallKind represents a function call.
+ CallKind
+
// ListKind represents a list literal expression.
ListKind
- // LiteralKind represents a primitive scalar literal.
- LiteralKind
-
// MapKind represents a map literal expression.
MapKind
- // SelectKind represents a field selection expression.
- SelectKind
-
// StructKind represents a struct literal expression.
StructKind
+
+ // ComprehensionKind represents a comprehension expression generated by a macro.
+ ComprehensionKind
)
-// Expr represents the base expression node in a CEL abstract syntax tree.
+// NavigateCheckedAST converts a CheckedAST to a NavigableExpr
+func NavigateCheckedAST(ast *CheckedAST) NavigableExpr {
+ return newNavigableExpr(nil, ast.Expr, ast.TypeMap)
+}
+
+// ExprMatcher takes a NavigableExpr in and indicates whether the value is a match.
//
-// Depending on the `Kind()` value, the Expr may be converted to a concrete expression types
+// This function type should be use with the `Match` and `MatchList` calls.
+type ExprMatcher func(NavigableExpr) bool
+
+// ConstantValueMatcher returns an ExprMatcher which will return true if the input NavigableExpr
+// is comprised of all constant values, such as a simple literal or even list and map literal.
+func ConstantValueMatcher() ExprMatcher {
+ return matchIsConstantValue
+}
+
+// KindMatcher returns an ExprMatcher which will return true if the input NavigableExpr.Kind() matches
+// the specified `kind`.
+func KindMatcher(kind ExprKind) ExprMatcher {
+ return func(e NavigableExpr) bool {
+ return e.Kind() == kind
+ }
+}
+
+// FunctionMatcher returns an ExprMatcher which will match NavigableExpr nodes of CallKind type whose
+// function name is equal to `funcName`.
+func FunctionMatcher(funcName string) ExprMatcher {
+ return func(e NavigableExpr) bool {
+ if e.Kind() != CallKind {
+ return false
+ }
+ return e.AsCall().FunctionName() == funcName
+ }
+}
+
+// AllMatcher returns true for all descendants of a NavigableExpr, effectively flattening them into a list.
+//
+// Such a result would work well with subsequent MatchList calls.
+func AllMatcher() ExprMatcher {
+ return func(NavigableExpr) bool {
+ return true
+ }
+}
+
+// MatchDescendants takes a NavigableExpr and ExprMatcher and produces a list of NavigableExpr values of the
+// descendants which match.
+func MatchDescendants(expr NavigableExpr, matcher ExprMatcher) []NavigableExpr {
+ return matchListInternal([]NavigableExpr{expr}, matcher, true)
+}
+
+// MatchSubset applies an ExprMatcher to a list of NavigableExpr values and their descendants, producing a
+// subset of NavigableExpr values which match.
+func MatchSubset(exprs []NavigableExpr, matcher ExprMatcher) []NavigableExpr {
+ visit := make([]NavigableExpr, len(exprs))
+ copy(visit, exprs)
+ return matchListInternal(visit, matcher, false)
+}
+
+func matchListInternal(visit []NavigableExpr, matcher ExprMatcher, visitDescendants bool) []NavigableExpr {
+ var matched []NavigableExpr
+ for len(visit) != 0 {
+ e := visit[0]
+ if matcher(e) {
+ matched = append(matched, e)
+ }
+ if visitDescendants {
+ visit = append(visit[1:], e.Children()...)
+ } else {
+ visit = visit[1:]
+ }
+ }
+ return matched
+}
+
+func matchIsConstantValue(e NavigableExpr) bool {
+ if e.Kind() == LiteralKind {
+ return true
+ }
+ if e.Kind() == StructKind || e.Kind() == MapKind || e.Kind() == ListKind {
+ for _, child := range e.Children() {
+ if !matchIsConstantValue(child) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+// NavigableExpr represents the base navigable expression value.
+//
+// Depending on the `Kind()` value, the NavigableExpr may be converted to a concrete expression types
// as indicated by the `As` methods.
-type Expr interface {
+//
+// NavigableExpr values and their concrete expression types should be nil-safe. Conversion of an expr
+// to the wrong kind should produce a nil value.
+type NavigableExpr interface {
// ID of the expression as it appears in the AST
ID() int64
// Kind of the expression node. See ExprKind for the valid enum values.
Kind() ExprKind
- // AsCall adapts the expr into a CallExpr
+ // Type of the expression node.
+ Type() *types.Type
+
+ // Parent returns the parent expression node, if one exists.
+ Parent() (NavigableExpr, bool)
+
+ // Children returns a list of child expression nodes.
+ Children() []NavigableExpr
+
+ // ToExpr adapts this NavigableExpr to a protobuf representation.
+ ToExpr() *exprpb.Expr
+
+ // AsCall adapts the expr into a NavigableCallExpr
//
// The Kind() must be equal to a CallKind for the conversion to be well-defined.
- AsCall() CallExpr
+ AsCall() NavigableCallExpr
- // AsComprehension adapts the expr into a ComprehensionExpr.
+ // AsComprehension adapts the expr into a NavigableComprehensionExpr.
//
// The Kind() must be equal to a ComprehensionKind for the conversion to be well-defined.
- AsComprehension() ComprehensionExpr
+ AsComprehension() NavigableComprehensionExpr
// AsIdent adapts the expr into an identifier string.
//
@@ -81,123 +188,67 @@ type Expr interface {
// The Kind() must be equal to a LiteralKind for the conversion to be well-defined.
AsLiteral() ref.Val
- // AsList adapts the expr into a ListExpr.
+ // AsList adapts the expr into a NavigableListExpr.
//
// The Kind() must be equal to a ListKind for the conversion to be well-defined.
- AsList() ListExpr
+ AsList() NavigableListExpr
- // AsMap adapts the expr into a MapExpr.
+ // AsMap adapts the expr into a NavigableMapExpr.
//
// The Kind() must be equal to a MapKind for the conversion to be well-defined.
- AsMap() MapExpr
+ AsMap() NavigableMapExpr
- // AsSelect adapts the expr into a SelectExpr.
+ // AsSelect adapts the expr into a NavigableSelectExpr.
//
// The Kind() must be equal to a SelectKind for the conversion to be well-defined.
- AsSelect() SelectExpr
+ AsSelect() NavigableSelectExpr
- // AsStruct adapts the expr into a StructExpr.
+ // AsStruct adapts the expr into a NavigableStructExpr.
//
// The Kind() must be equal to a StructKind for the conversion to be well-defined.
- AsStruct() StructExpr
-
- // RenumberIDs performs an in-place update of the expression and all of its descendents numeric ids.
- RenumberIDs(IDGenerator)
-
- // SetKindCase replaces the contents of the current expression with the contents of the other.
- //
- // The SetKindCase takes ownership of any expression instances references within the input Expr.
- // A shallow copy is made of the Expr value itself, but not a deep one.
- //
- // This method should only be used during AST rewrites using temporary Expr values.
- SetKindCase(Expr)
-
- // isExpr is a marker interface.
- isExpr()
-}
-
-// EntryExprKind represents the possible EntryExpr kinds.
-type EntryExprKind int
-
-const (
- // UnspecifiedEntryExprKind indicates that the entry expr is not set.
- UnspecifiedEntryExprKind EntryExprKind = iota
-
- // MapEntryKind indicates that the entry is a MapEntry type with key and value expressions.
- MapEntryKind
+ AsStruct() NavigableStructExpr
- // StructFieldKind indicates that the entry is a StructField with a field name and initializer
- // expression.
- StructFieldKind
-)
-
-// EntryExpr represents the base entry expression in a CEL map or struct literal.
-type EntryExpr interface {
- // ID of the entry as it appears in the AST.
- ID() int64
-
- // Kind of the entry expression node. See EntryExprKind for valid enum values.
- Kind() EntryExprKind
-
- // AsMapEntry casts the EntryExpr to a MapEntry.
- //
- // The Kind() must be equal to MapEntryKind for the conversion to be well-defined.
- AsMapEntry() MapEntry
-
- // AsStructField casts the EntryExpr to a StructField
- //
- // The Kind() must be equal to StructFieldKind for the conversion to be well-defined.
- AsStructField() StructField
-
- // RenumberIDs performs an in-place update of the expression and all of its descendents numeric ids.
- RenumberIDs(IDGenerator)
-
- isEntryExpr()
+ // marker interface method
+ isNavigable()
}
-// IDGenerator produces unique ids suitable for tagging expression nodes
-type IDGenerator func(originalID int64) int64
-
-// CallExpr defines an interface for inspecting a function call and its arugments.
-type CallExpr interface {
+// NavigableCallExpr defines an interface for inspecting a function call and its arugments.
+type NavigableCallExpr interface {
// FunctionName returns the name of the function.
FunctionName() string
- // IsMemberFunction returns whether the call has a non-nil target indicating it is a member function
- IsMemberFunction() bool
-
// Target returns the target of the expression if one is present.
- Target() Expr
+ Target() NavigableExpr
// Args returns the list of call arguments, excluding the target.
- Args() []Expr
+ Args() []NavigableExpr
+
+ // ReturnType returns the result type of the call.
+ ReturnType() *types.Type
// marker interface method
- isExpr()
+ isNavigable()
}
-// ListExpr defines an interface for inspecting a list literal expression.
-type ListExpr interface {
+// NavigableListExpr defines an interface for inspecting a list literal expression.
+type NavigableListExpr interface {
// Elements returns the list elements as navigable expressions.
- Elements() []Expr
+ Elements() []NavigableExpr
// OptionalIndicies returns the list of optional indices in the list literal.
OptionalIndices() []int32
- // IsOptional indicates whether the given element index is optional.
- IsOptional(int32) bool
-
// Size returns the number of elements in the list.
Size() int
// marker interface method
- isExpr()
+ isNavigable()
}
-// SelectExpr defines an interface for inspecting a select expression.
-type SelectExpr interface {
+// NavigableSelectExpr defines an interface for inspecting a select expression.
+type NavigableSelectExpr interface {
// Operand returns the selection operand expression.
- Operand() Expr
+ Operand() NavigableExpr
// FieldName returns the field name being selected from the operand.
FieldName() string
@@ -206,67 +257,67 @@ type SelectExpr interface {
IsTestOnly() bool
// marker interface method
- isExpr()
+ isNavigable()
}
-// MapExpr defines an interface for inspecting a map expression.
-type MapExpr interface {
- // Entries returns the map key value pairs as EntryExpr values.
- Entries() []EntryExpr
+// NavigableMapExpr defines an interface for inspecting a map expression.
+type NavigableMapExpr interface {
+ // Entries returns the map key value pairs as NavigableEntry values.
+ Entries() []NavigableEntry
// Size returns the number of entries in the map.
Size() int
// marker interface method
- isExpr()
+ isNavigable()
}
-// MapEntry defines an interface for inspecting a map entry.
-type MapEntry interface {
+// NavigableEntry defines an interface for inspecting a map entry.
+type NavigableEntry interface {
// Key returns the map entry key expression.
- Key() Expr
+ Key() NavigableExpr
// Value returns the map entry value expression.
- Value() Expr
+ Value() NavigableExpr
// IsOptional returns whether the entry is optional.
IsOptional() bool
// marker interface method
- isEntryExpr()
+ isNavigable()
}
-// StructExpr defines an interfaces for inspecting a struct and its field initializers.
-type StructExpr interface {
+// NavigableStructExpr defines an interfaces for inspecting a struct and its field initializers.
+type NavigableStructExpr interface {
// TypeName returns the struct type name.
TypeName() string
- // Fields returns the set of field initializers in the struct expression as EntryExpr values.
- Fields() []EntryExpr
+ // Fields returns the set of field initializers in the struct expression as NavigableField values.
+ Fields() []NavigableField
// marker interface method
- isExpr()
+ isNavigable()
}
-// StructField defines an interface for inspecting a struct field initialization.
-type StructField interface {
- // Name returns the name of the field.
- Name() string
+// NavigableField defines an interface for inspecting a struct field initialization.
+type NavigableField interface {
+ // FieldName returns the name of the field.
+ FieldName() string
// Value returns the field initialization expression.
- Value() Expr
+ Value() NavigableExpr
// IsOptional returns whether the field is optional.
IsOptional() bool
// marker interface method
- isEntryExpr()
+ isNavigable()
}
-// ComprehensionExpr defines an interface for inspecting a comprehension expression.
-type ComprehensionExpr interface {
+// NavigableComprehensionExpr defines an interface for inspecting a comprehension expression.
+type NavigableComprehensionExpr interface {
// IterRange returns the iteration range expression.
- IterRange() Expr
+ IterRange() NavigableExpr
// IterVar returns the iteration variable name.
IterVar() string
@@ -275,586 +326,384 @@ type ComprehensionExpr interface {
AccuVar() string
// AccuInit returns the accumulation variable initialization expression.
- AccuInit() Expr
+ AccuInit() NavigableExpr
// LoopCondition returns the loop condition expression.
- LoopCondition() Expr
+ LoopCondition() NavigableExpr
// LoopStep returns the loop step expression.
- LoopStep() Expr
+ LoopStep() NavigableExpr
// Result returns the comprehension result expression.
- Result() Expr
+ Result() NavigableExpr
// marker interface method
- isExpr()
+ isNavigable()
}
-var _ Expr = &expr{}
-
-type expr struct {
- id int64
- exprKindCase
-}
-
-type exprKindCase interface {
- Kind() ExprKind
-
- renumberIDs(IDGenerator)
-
- isExpr()
-}
-
-func (e *expr) ID() int64 {
- if e == nil {
- return 0
+func newNavigableExpr(parent NavigableExpr, expr *exprpb.Expr, typeMap map[int64]*types.Type) NavigableExpr {
+ kind, factory := kindOf(expr)
+ nav := &navigableExprImpl{
+ parent: parent,
+ kind: kind,
+ expr: expr,
+ typeMap: typeMap,
+ createChildren: factory,
}
- return e.id
+ return nav
}
-func (e *expr) Kind() ExprKind {
- if e == nil || e.exprKindCase == nil {
- return UnspecifiedExprKind
- }
- return e.exprKindCase.Kind()
+type navigableExprImpl struct {
+ parent NavigableExpr
+ kind ExprKind
+ expr *exprpb.Expr
+ typeMap map[int64]*types.Type
+ createChildren childFactory
}
-func (e *expr) AsCall() CallExpr {
- if e.Kind() != CallKind {
- return nilCall
- }
- return e.exprKindCase.(CallExpr)
+func (nav *navigableExprImpl) ID() int64 {
+ return nav.ToExpr().GetId()
}
-func (e *expr) AsComprehension() ComprehensionExpr {
- if e.Kind() != ComprehensionKind {
- return nilCompre
- }
- return e.exprKindCase.(ComprehensionExpr)
+func (nav *navigableExprImpl) Kind() ExprKind {
+ return nav.kind
}
-func (e *expr) AsIdent() string {
- if e.Kind() != IdentKind {
- return ""
+func (nav *navigableExprImpl) Type() *types.Type {
+ if t, found := nav.typeMap[nav.ID()]; found {
+ return t
}
- return string(e.exprKindCase.(baseIdentExpr))
+ return types.DynType
}
-func (e *expr) AsLiteral() ref.Val {
- if e.Kind() != LiteralKind {
- return nil
+func (nav *navigableExprImpl) Parent() (NavigableExpr, bool) {
+ if nav.parent != nil {
+ return nav.parent, true
}
- return e.exprKindCase.(*baseLiteral).Val
+ return nil, false
}
-func (e *expr) AsList() ListExpr {
- if e.Kind() != ListKind {
- return nilList
- }
- return e.exprKindCase.(ListExpr)
+func (nav *navigableExprImpl) Children() []NavigableExpr {
+ return nav.createChildren(nav)
}
-func (e *expr) AsMap() MapExpr {
- if e.Kind() != MapKind {
- return nilMap
- }
- return e.exprKindCase.(MapExpr)
+func (nav *navigableExprImpl) ToExpr() *exprpb.Expr {
+ return nav.expr
}
-func (e *expr) AsSelect() SelectExpr {
- if e.Kind() != SelectKind {
- return nilSel
- }
- return e.exprKindCase.(SelectExpr)
+func (nav *navigableExprImpl) AsCall() NavigableCallExpr {
+ return navigableCallImpl{navigableExprImpl: nav}
}
-func (e *expr) AsStruct() StructExpr {
- if e.Kind() != StructKind {
- return nilStruct
- }
- return e.exprKindCase.(StructExpr)
+func (nav *navigableExprImpl) AsComprehension() NavigableComprehensionExpr {
+ return navigableComprehensionImpl{navigableExprImpl: nav}
}
-func (e *expr) SetKindCase(other Expr) {
- if e == nil {
- return
- }
- if other == nil {
- e.exprKindCase = nil
- return
- }
- switch other.Kind() {
- case CallKind:
- c := other.AsCall()
- e.exprKindCase = &baseCallExpr{
- function: c.FunctionName(),
- target: c.Target(),
- args: c.Args(),
- isMember: c.IsMemberFunction(),
- }
- case ComprehensionKind:
- c := other.AsComprehension()
- e.exprKindCase = &baseComprehensionExpr{
- iterRange: c.IterRange(),
- iterVar: c.IterVar(),
- accuVar: c.AccuVar(),
- accuInit: c.AccuInit(),
- loopCond: c.LoopCondition(),
- loopStep: c.LoopStep(),
- result: c.Result(),
- }
- case IdentKind:
- e.exprKindCase = baseIdentExpr(other.AsIdent())
- case ListKind:
- l := other.AsList()
- optIndexMap := make(map[int32]struct{}, len(l.OptionalIndices()))
- for _, idx := range l.OptionalIndices() {
- optIndexMap[idx] = struct{}{}
- }
- e.exprKindCase = &baseListExpr{
- elements: l.Elements(),
- optIndices: l.OptionalIndices(),
- optIndexMap: optIndexMap,
- }
- case LiteralKind:
- e.exprKindCase = &baseLiteral{Val: other.AsLiteral()}
- case MapKind:
- e.exprKindCase = &baseMapExpr{
- entries: other.AsMap().Entries(),
- }
- case SelectKind:
- s := other.AsSelect()
- e.exprKindCase = &baseSelectExpr{
- operand: s.Operand(),
- field: s.FieldName(),
- testOnly: s.IsTestOnly(),
- }
- case StructKind:
- s := other.AsStruct()
- e.exprKindCase = &baseStructExpr{
- typeName: s.TypeName(),
- fields: s.Fields(),
- }
- case UnspecifiedExprKind:
- e.exprKindCase = nil
- }
+func (nav *navigableExprImpl) AsIdent() string {
+ return nav.ToExpr().GetIdentExpr().GetName()
}
-func (e *expr) RenumberIDs(idGen IDGenerator) {
- if e == nil {
- return
+func (nav *navigableExprImpl) AsLiteral() ref.Val {
+ if nav.Kind() != LiteralKind {
+ return nil
}
- e.id = idGen(e.id)
- if e.exprKindCase != nil {
- e.exprKindCase.renumberIDs(idGen)
+ val, err := ConstantToVal(nav.ToExpr().GetConstExpr())
+ if err != nil {
+ panic(err)
}
+ return val
}
-type baseCallExpr struct {
- function string
- target Expr
- args []Expr
- isMember bool
+func (nav *navigableExprImpl) AsList() NavigableListExpr {
+ return navigableListImpl{navigableExprImpl: nav}
}
-func (*baseCallExpr) Kind() ExprKind {
- return CallKind
+func (nav *navigableExprImpl) AsMap() NavigableMapExpr {
+ return navigableMapImpl{navigableExprImpl: nav}
}
-func (e *baseCallExpr) FunctionName() string {
- if e == nil {
- return ""
- }
- return e.function
+func (nav *navigableExprImpl) AsSelect() NavigableSelectExpr {
+ return navigableSelectImpl{navigableExprImpl: nav}
}
-func (e *baseCallExpr) IsMemberFunction() bool {
- if e == nil {
- return false
- }
- return e.isMember
+func (nav *navigableExprImpl) AsStruct() NavigableStructExpr {
+ return navigableStructImpl{navigableExprImpl: nav}
}
-func (e *baseCallExpr) Target() Expr {
- if e == nil || !e.IsMemberFunction() {
- return nilExpr
- }
- return e.target
+func (nav *navigableExprImpl) createChild(e *exprpb.Expr) NavigableExpr {
+ return newNavigableExpr(nav, e, nav.typeMap)
}
-func (e *baseCallExpr) Args() []Expr {
- if e == nil {
- return []Expr{}
- }
- return e.args
-}
+func (nav *navigableExprImpl) isNavigable() {}
-func (e *baseCallExpr) renumberIDs(idGen IDGenerator) {
- if e.IsMemberFunction() {
- e.Target().RenumberIDs(idGen)
- }
- for _, arg := range e.Args() {
- arg.RenumberIDs(idGen)
- }
+type navigableCallImpl struct {
+ *navigableExprImpl
}
-func (*baseCallExpr) isExpr() {}
-
-var _ ComprehensionExpr = &baseComprehensionExpr{}
-
-type baseComprehensionExpr struct {
- iterRange Expr
- iterVar string
- accuVar string
- accuInit Expr
- loopCond Expr
- loopStep Expr
- result Expr
+func (call navigableCallImpl) FunctionName() string {
+ return call.ToExpr().GetCallExpr().GetFunction()
}
-func (*baseComprehensionExpr) Kind() ExprKind {
- return ComprehensionKind
+func (call navigableCallImpl) Target() NavigableExpr {
+ t := call.ToExpr().GetCallExpr().GetTarget()
+ if t != nil {
+ return call.createChild(t)
+ }
+ return nil
}
-func (e *baseComprehensionExpr) IterRange() Expr {
- if e == nil {
- return nilExpr
+func (call navigableCallImpl) Args() []NavigableExpr {
+ args := call.ToExpr().GetCallExpr().GetArgs()
+ navArgs := make([]NavigableExpr, len(args))
+ for i, a := range args {
+ navArgs[i] = call.createChild(a)
}
- return e.iterRange
+ return navArgs
}
-func (e *baseComprehensionExpr) IterVar() string {
- return e.iterVar
+func (call navigableCallImpl) ReturnType() *types.Type {
+ return call.Type()
}
-func (e *baseComprehensionExpr) AccuVar() string {
- return e.accuVar
+type navigableComprehensionImpl struct {
+ *navigableExprImpl
}
-func (e *baseComprehensionExpr) AccuInit() Expr {
- if e == nil {
- return nilExpr
- }
- return e.accuInit
+func (comp navigableComprehensionImpl) IterRange() NavigableExpr {
+ return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetIterRange())
}
-func (e *baseComprehensionExpr) LoopCondition() Expr {
- if e == nil {
- return nilExpr
- }
- return e.loopCond
+func (comp navigableComprehensionImpl) IterVar() string {
+ return comp.ToExpr().GetComprehensionExpr().GetIterVar()
}
-func (e *baseComprehensionExpr) LoopStep() Expr {
- if e == nil {
- return nilExpr
- }
- return e.loopStep
+func (comp navigableComprehensionImpl) AccuVar() string {
+ return comp.ToExpr().GetComprehensionExpr().GetAccuVar()
}
-func (e *baseComprehensionExpr) Result() Expr {
- if e == nil {
- return nilExpr
- }
- return e.result
+func (comp navigableComprehensionImpl) AccuInit() NavigableExpr {
+ return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetAccuInit())
}
-func (e *baseComprehensionExpr) renumberIDs(idGen IDGenerator) {
- e.IterRange().RenumberIDs(idGen)
- e.AccuInit().RenumberIDs(idGen)
- e.LoopCondition().RenumberIDs(idGen)
- e.LoopStep().RenumberIDs(idGen)
- e.Result().RenumberIDs(idGen)
+func (comp navigableComprehensionImpl) LoopCondition() NavigableExpr {
+ return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetLoopCondition())
}
-func (*baseComprehensionExpr) isExpr() {}
-
-var _ exprKindCase = baseIdentExpr("")
-
-type baseIdentExpr string
-
-func (baseIdentExpr) Kind() ExprKind {
- return IdentKind
+func (comp navigableComprehensionImpl) LoopStep() NavigableExpr {
+ return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetLoopStep())
}
-func (e baseIdentExpr) renumberIDs(IDGenerator) {}
-
-func (baseIdentExpr) isExpr() {}
-
-var _ exprKindCase = &baseLiteral{}
-var _ ref.Val = &baseLiteral{}
-
-type baseLiteral struct {
- ref.Val
+func (comp navigableComprehensionImpl) Result() NavigableExpr {
+ return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetResult())
}
-func (*baseLiteral) Kind() ExprKind {
- return LiteralKind
+type navigableListImpl struct {
+ *navigableExprImpl
}
-func (l *baseLiteral) renumberIDs(IDGenerator) {}
-
-func (*baseLiteral) isExpr() {}
-
-var _ ListExpr = &baseListExpr{}
-
-type baseListExpr struct {
- elements []Expr
- optIndices []int32
- optIndexMap map[int32]struct{}
+func (l navigableListImpl) Elements() []NavigableExpr {
+ return l.Children()
}
-func (*baseListExpr) Kind() ExprKind {
- return ListKind
+func (l navigableListImpl) OptionalIndices() []int32 {
+ return l.ToExpr().GetListExpr().GetOptionalIndices()
}
-func (e *baseListExpr) Elements() []Expr {
- if e == nil {
- return []Expr{}
- }
- return e.elements
+func (l navigableListImpl) Size() int {
+ return len(l.ToExpr().GetListExpr().GetElements())
}
-func (e *baseListExpr) IsOptional(index int32) bool {
- _, found := e.optIndexMap[index]
- return found
+type navigableMapImpl struct {
+ *navigableExprImpl
}
-func (e *baseListExpr) OptionalIndices() []int32 {
- if e == nil {
- return []int32{}
+func (m navigableMapImpl) Entries() []NavigableEntry {
+ mapExpr := m.ToExpr().GetStructExpr()
+ entries := make([]NavigableEntry, len(mapExpr.GetEntries()))
+ for i, e := range mapExpr.GetEntries() {
+ entries[i] = navigableEntryImpl{
+ key: m.createChild(e.GetMapKey()),
+ val: m.createChild(e.GetValue()),
+ isOpt: e.GetOptionalEntry(),
+ }
}
- return e.optIndices
+ return entries
}
-func (e *baseListExpr) Size() int {
- return len(e.Elements())
+func (m navigableMapImpl) Size() int {
+ return len(m.ToExpr().GetStructExpr().GetEntries())
}
-func (e *baseListExpr) renumberIDs(idGen IDGenerator) {
- for _, elem := range e.Elements() {
- elem.RenumberIDs(idGen)
- }
+type navigableEntryImpl struct {
+ key NavigableExpr
+ val NavigableExpr
+ isOpt bool
}
-func (*baseListExpr) isExpr() {}
-
-type baseMapExpr struct {
- entries []EntryExpr
+func (e navigableEntryImpl) Key() NavigableExpr {
+ return e.key
}
-func (*baseMapExpr) Kind() ExprKind {
- return MapKind
+func (e navigableEntryImpl) Value() NavigableExpr {
+ return e.val
}
-func (e *baseMapExpr) Entries() []EntryExpr {
- if e == nil {
- return []EntryExpr{}
- }
- return e.entries
+func (e navigableEntryImpl) IsOptional() bool {
+ return e.isOpt
}
-func (e *baseMapExpr) Size() int {
- return len(e.Entries())
-}
+func (e navigableEntryImpl) isNavigable() {}
-func (e *baseMapExpr) renumberIDs(idGen IDGenerator) {
- for _, entry := range e.Entries() {
- entry.RenumberIDs(idGen)
- }
+type navigableSelectImpl struct {
+ *navigableExprImpl
}
-func (*baseMapExpr) isExpr() {}
+func (sel navigableSelectImpl) FieldName() string {
+ return sel.ToExpr().GetSelectExpr().GetField()
+}
-type baseSelectExpr struct {
- operand Expr
- field string
- testOnly bool
+func (sel navigableSelectImpl) IsTestOnly() bool {
+ return sel.ToExpr().GetSelectExpr().GetTestOnly()
}
-func (*baseSelectExpr) Kind() ExprKind {
- return SelectKind
+func (sel navigableSelectImpl) Operand() NavigableExpr {
+ return sel.createChild(sel.ToExpr().GetSelectExpr().GetOperand())
}
-func (e *baseSelectExpr) Operand() Expr {
- if e == nil || e.operand == nil {
- return nilExpr
- }
- return e.operand
+type navigableStructImpl struct {
+ *navigableExprImpl
}
-func (e *baseSelectExpr) FieldName() string {
- if e == nil {
- return ""
- }
- return e.field
+func (s navigableStructImpl) TypeName() string {
+ return s.ToExpr().GetStructExpr().GetMessageName()
}
-func (e *baseSelectExpr) IsTestOnly() bool {
- if e == nil {
- return false
+func (s navigableStructImpl) Fields() []NavigableField {
+ fieldInits := s.ToExpr().GetStructExpr().GetEntries()
+ fields := make([]NavigableField, len(fieldInits))
+ for i, f := range fieldInits {
+ fields[i] = navigableFieldImpl{
+ name: f.GetFieldKey(),
+ val: s.createChild(f.GetValue()),
+ isOpt: f.GetOptionalEntry(),
+ }
}
- return e.testOnly
+ return fields
}
-func (e *baseSelectExpr) renumberIDs(idGen IDGenerator) {
- e.Operand().RenumberIDs(idGen)
+type navigableFieldImpl struct {
+ name string
+ val NavigableExpr
+ isOpt bool
}
-func (*baseSelectExpr) isExpr() {}
-
-type baseStructExpr struct {
- typeName string
- fields []EntryExpr
+func (f navigableFieldImpl) FieldName() string {
+ return f.name
}
-func (*baseStructExpr) Kind() ExprKind {
- return StructKind
+func (f navigableFieldImpl) Value() NavigableExpr {
+ return f.val
}
-func (e *baseStructExpr) TypeName() string {
- if e == nil {
- return ""
- }
- return e.typeName
+func (f navigableFieldImpl) IsOptional() bool {
+ return f.isOpt
}
-func (e *baseStructExpr) Fields() []EntryExpr {
- if e == nil {
- return []EntryExpr{}
- }
- return e.fields
-}
+func (f navigableFieldImpl) isNavigable() {}
-func (e *baseStructExpr) renumberIDs(idGen IDGenerator) {
- for _, f := range e.Fields() {
- f.RenumberIDs(idGen)
+func kindOf(expr *exprpb.Expr) (ExprKind, childFactory) {
+ switch expr.GetExprKind().(type) {
+ case *exprpb.Expr_ConstExpr:
+ return LiteralKind, noopFactory
+ case *exprpb.Expr_IdentExpr:
+ return IdentKind, noopFactory
+ case *exprpb.Expr_SelectExpr:
+ return SelectKind, selectFactory
+ case *exprpb.Expr_CallExpr:
+ return CallKind, callArgFactory
+ case *exprpb.Expr_ListExpr:
+ return ListKind, listElemFactory
+ case *exprpb.Expr_StructExpr:
+ if expr.GetStructExpr().GetMessageName() != "" {
+ return StructKind, structEntryFactory
+ }
+ return MapKind, mapEntryFactory
+ case *exprpb.Expr_ComprehensionExpr:
+ return ComprehensionKind, comprehensionFactory
+ default:
+ return UnspecifiedKind, noopFactory
}
}
-func (*baseStructExpr) isExpr() {}
-
-type entryExprKindCase interface {
- Kind() EntryExprKind
-
- renumberIDs(IDGenerator)
-
- isEntryExpr()
-}
-
-var _ EntryExpr = &entryExpr{}
-
-type entryExpr struct {
- id int64
- entryExprKindCase
-}
+type childFactory func(*navigableExprImpl) []NavigableExpr
-func (e *entryExpr) ID() int64 {
- return e.id
+func noopFactory(*navigableExprImpl) []NavigableExpr {
+ return nil
}
-func (e *entryExpr) AsMapEntry() MapEntry {
- if e.Kind() != MapEntryKind {
- return nilMapEntry
+func selectFactory(nav *navigableExprImpl) []NavigableExpr {
+ return []NavigableExpr{
+ nav.createChild(nav.ToExpr().GetSelectExpr().GetOperand()),
}
- return e.entryExprKindCase.(MapEntry)
}
-func (e *entryExpr) AsStructField() StructField {
- if e.Kind() != StructFieldKind {
- return nilStructField
+func callArgFactory(nav *navigableExprImpl) []NavigableExpr {
+ call := nav.ToExpr().GetCallExpr()
+ argCount := len(call.GetArgs())
+ if call.GetTarget() != nil {
+ argCount++
}
- return e.entryExprKindCase.(StructField)
-}
-
-func (e *entryExpr) RenumberIDs(idGen IDGenerator) {
- e.id = idGen(e.id)
- e.entryExprKindCase.renumberIDs(idGen)
-}
-
-type baseMapEntry struct {
- key Expr
- value Expr
- isOptional bool
-}
-
-func (e *baseMapEntry) Kind() EntryExprKind {
- return MapEntryKind
-}
-
-func (e *baseMapEntry) Key() Expr {
- if e == nil {
- return nilExpr
+ navExprs := make([]NavigableExpr, argCount)
+ i := 0
+ if call.GetTarget() != nil {
+ navExprs[i] = nav.createChild(call.GetTarget())
+ i++
}
- return e.key
-}
-
-func (e *baseMapEntry) Value() Expr {
- if e == nil {
- return nilExpr
+ for _, arg := range call.GetArgs() {
+ navExprs[i] = nav.createChild(arg)
+ i++
}
- return e.value
+ return navExprs
}
-func (e *baseMapEntry) IsOptional() bool {
- if e == nil {
- return false
+func listElemFactory(nav *navigableExprImpl) []NavigableExpr {
+ l := nav.ToExpr().GetListExpr()
+ navExprs := make([]NavigableExpr, len(l.GetElements()))
+ for i, e := range l.GetElements() {
+ navExprs[i] = nav.createChild(e)
}
- return e.isOptional
+ return navExprs
}
-func (e *baseMapEntry) renumberIDs(idGen IDGenerator) {
- e.Key().RenumberIDs(idGen)
- e.Value().RenumberIDs(idGen)
-}
-
-func (*baseMapEntry) isEntryExpr() {}
-
-type baseStructField struct {
- field string
- value Expr
- isOptional bool
-}
-
-func (f *baseStructField) Kind() EntryExprKind {
- return StructFieldKind
-}
+func structEntryFactory(nav *navigableExprImpl) []NavigableExpr {
+ s := nav.ToExpr().GetStructExpr()
+ entries := make([]NavigableExpr, len(s.GetEntries()))
+ for i, e := range s.GetEntries() {
-func (f *baseStructField) Name() string {
- if f == nil {
- return ""
+ entries[i] = nav.createChild(e.GetValue())
}
- return f.field
+ return entries
}
-func (f *baseStructField) Value() Expr {
- if f == nil {
- return nilExpr
+func mapEntryFactory(nav *navigableExprImpl) []NavigableExpr {
+ s := nav.ToExpr().GetStructExpr()
+ entries := make([]NavigableExpr, len(s.GetEntries())*2)
+ j := 0
+ for _, e := range s.GetEntries() {
+ entries[j] = nav.createChild(e.GetMapKey())
+ entries[j+1] = nav.createChild(e.GetValue())
+ j += 2
}
- return f.value
+ return entries
}
-func (f *baseStructField) IsOptional() bool {
- if f == nil {
- return false
+func comprehensionFactory(nav *navigableExprImpl) []NavigableExpr {
+ compre := nav.ToExpr().GetComprehensionExpr()
+ return []NavigableExpr{
+ nav.createChild(compre.GetIterRange()),
+ nav.createChild(compre.GetAccuInit()),
+ nav.createChild(compre.GetLoopCondition()),
+ nav.createChild(compre.GetLoopStep()),
+ nav.createChild(compre.GetResult()),
}
- return f.isOptional
}
-
-func (f *baseStructField) renumberIDs(idGen IDGenerator) {
- f.Value().RenumberIDs(idGen)
-}
-
-func (*baseStructField) isEntryExpr() {}
-
-var (
- nilExpr *expr = nil
- nilCall *baseCallExpr = nil
- nilCompre *baseComprehensionExpr = nil
- nilList *baseListExpr = nil
- nilMap *baseMapExpr = nil
- nilMapEntry *baseMapEntry = nil
- nilSel *baseSelectExpr = nil
- nilStruct *baseStructExpr = nil
- nilStructField *baseStructField = nil
-)
diff --git a/vendor/github.com/google/cel-go/common/ast/factory.go b/vendor/github.com/google/cel-go/common/ast/factory.go
deleted file mode 100644
index b7f36e72a..000000000
--- a/vendor/github.com/google/cel-go/common/ast/factory.go
+++ /dev/null
@@ -1,303 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ast
-
-import "github.com/google/cel-go/common/types/ref"
-
-// ExprFactory interfaces defines a set of methods necessary for building native expression values.
-type ExprFactory interface {
- // CopyExpr creates a deep copy of the input Expr value.
- CopyExpr(Expr) Expr
-
- // CopyEntryExpr creates a deep copy of the input EntryExpr value.
- CopyEntryExpr(EntryExpr) EntryExpr
-
- // NewCall creates an Expr value representing a global function call.
- NewCall(id int64, function string, args ...Expr) Expr
-
- // NewComprehension creates an Expr value representing a comprehension over a value range.
- NewComprehension(id int64, iterRange Expr, iterVar, accuVar string, accuInit, loopCondition, loopStep, result Expr) Expr
-
- // NewMemberCall creates an Expr value representing a member function call.
- NewMemberCall(id int64, function string, receiver Expr, args ...Expr) Expr
-
- // NewIdent creates an Expr value representing an identifier.
- NewIdent(id int64, name string) Expr
-
- // NewAccuIdent creates an Expr value representing an accumulator identifier within a
- //comprehension.
- NewAccuIdent(id int64) Expr
-
- // NewLiteral creates an Expr value representing a literal value, such as a string or integer.
- NewLiteral(id int64, value ref.Val) Expr
-
- // NewList creates an Expr value representing a list literal expression with optional indices.
- //
- // Optional indicies will typically be empty unless the CEL optional types are enabled.
- NewList(id int64, elems []Expr, optIndices []int32) Expr
-
- // NewMap creates an Expr value representing a map literal expression
- NewMap(id int64, entries []EntryExpr) Expr
-
- // NewMapEntry creates a MapEntry with a given key, value, and a flag indicating whether
- // the key is optionally set.
- NewMapEntry(id int64, key, value Expr, isOptional bool) EntryExpr
-
- // NewPresenceTest creates an Expr representing a field presence test on an operand expression.
- NewPresenceTest(id int64, operand Expr, field string) Expr
-
- // NewSelect creates an Expr representing a field selection on an operand expression.
- NewSelect(id int64, operand Expr, field string) Expr
-
- // NewStruct creates an Expr value representing a struct literal with a given type name and a
- // set of field initializers.
- NewStruct(id int64, typeName string, fields []EntryExpr) Expr
-
- // NewStructField creates a StructField with a given field name, value, and a flag indicating
- // whether the field is optionally set.
- NewStructField(id int64, field string, value Expr, isOptional bool) EntryExpr
-
- // NewUnspecifiedExpr creates an empty expression node.
- NewUnspecifiedExpr(id int64) Expr
-
- isExprFactory()
-}
-
-type baseExprFactory struct{}
-
-// NewExprFactory creates an ExprFactory instance.
-func NewExprFactory() ExprFactory {
- return &baseExprFactory{}
-}
-
-func (fac *baseExprFactory) NewCall(id int64, function string, args ...Expr) Expr {
- if len(args) == 0 {
- args = []Expr{}
- }
- return fac.newExpr(
- id,
- &baseCallExpr{
- function: function,
- target: nilExpr,
- args: args,
- isMember: false,
- })
-}
-
-func (fac *baseExprFactory) NewMemberCall(id int64, function string, target Expr, args ...Expr) Expr {
- if len(args) == 0 {
- args = []Expr{}
- }
- return fac.newExpr(
- id,
- &baseCallExpr{
- function: function,
- target: target,
- args: args,
- isMember: true,
- })
-}
-
-func (fac *baseExprFactory) NewComprehension(id int64, iterRange Expr, iterVar, accuVar string, accuInit, loopCond, loopStep, result Expr) Expr {
- return fac.newExpr(
- id,
- &baseComprehensionExpr{
- iterRange: iterRange,
- iterVar: iterVar,
- accuVar: accuVar,
- accuInit: accuInit,
- loopCond: loopCond,
- loopStep: loopStep,
- result: result,
- })
-}
-
-func (fac *baseExprFactory) NewIdent(id int64, name string) Expr {
- return fac.newExpr(id, baseIdentExpr(name))
-}
-
-func (fac *baseExprFactory) NewAccuIdent(id int64) Expr {
- return fac.NewIdent(id, "__result__")
-}
-
-func (fac *baseExprFactory) NewLiteral(id int64, value ref.Val) Expr {
- return fac.newExpr(id, &baseLiteral{Val: value})
-}
-
-func (fac *baseExprFactory) NewList(id int64, elems []Expr, optIndices []int32) Expr {
- optIndexMap := make(map[int32]struct{}, len(optIndices))
- for _, idx := range optIndices {
- optIndexMap[idx] = struct{}{}
- }
- return fac.newExpr(id,
- &baseListExpr{
- elements: elems,
- optIndices: optIndices,
- optIndexMap: optIndexMap,
- })
-}
-
-func (fac *baseExprFactory) NewMap(id int64, entries []EntryExpr) Expr {
- return fac.newExpr(id, &baseMapExpr{entries: entries})
-}
-
-func (fac *baseExprFactory) NewMapEntry(id int64, key, value Expr, isOptional bool) EntryExpr {
- return fac.newEntryExpr(
- id,
- &baseMapEntry{
- key: key,
- value: value,
- isOptional: isOptional,
- })
-}
-
-func (fac *baseExprFactory) NewPresenceTest(id int64, operand Expr, field string) Expr {
- return fac.newExpr(
- id,
- &baseSelectExpr{
- operand: operand,
- field: field,
- testOnly: true,
- })
-}
-
-func (fac *baseExprFactory) NewSelect(id int64, operand Expr, field string) Expr {
- return fac.newExpr(
- id,
- &baseSelectExpr{
- operand: operand,
- field: field,
- })
-}
-
-func (fac *baseExprFactory) NewStruct(id int64, typeName string, fields []EntryExpr) Expr {
- return fac.newExpr(
- id,
- &baseStructExpr{
- typeName: typeName,
- fields: fields,
- })
-}
-
-func (fac *baseExprFactory) NewStructField(id int64, field string, value Expr, isOptional bool) EntryExpr {
- return fac.newEntryExpr(
- id,
- &baseStructField{
- field: field,
- value: value,
- isOptional: isOptional,
- })
-}
-
-func (fac *baseExprFactory) NewUnspecifiedExpr(id int64) Expr {
- return fac.newExpr(id, nil)
-}
-
-func (fac *baseExprFactory) CopyExpr(e Expr) Expr {
- // unwrap navigable expressions to avoid unnecessary allocations during copying.
- if nav, ok := e.(*navigableExprImpl); ok {
- e = nav.Expr
- }
- switch e.Kind() {
- case CallKind:
- c := e.AsCall()
- argsCopy := make([]Expr, len(c.Args()))
- for i, arg := range c.Args() {
- argsCopy[i] = fac.CopyExpr(arg)
- }
- if !c.IsMemberFunction() {
- return fac.NewCall(e.ID(), c.FunctionName(), argsCopy...)
- }
- return fac.NewMemberCall(e.ID(), c.FunctionName(), fac.CopyExpr(c.Target()), argsCopy...)
- case ComprehensionKind:
- compre := e.AsComprehension()
- return fac.NewComprehension(e.ID(),
- fac.CopyExpr(compre.IterRange()),
- compre.IterVar(),
- compre.AccuVar(),
- fac.CopyExpr(compre.AccuInit()),
- fac.CopyExpr(compre.LoopCondition()),
- fac.CopyExpr(compre.LoopStep()),
- fac.CopyExpr(compre.Result()))
- case IdentKind:
- return fac.NewIdent(e.ID(), e.AsIdent())
- case ListKind:
- l := e.AsList()
- elemsCopy := make([]Expr, l.Size())
- for i, elem := range l.Elements() {
- elemsCopy[i] = fac.CopyExpr(elem)
- }
- return fac.NewList(e.ID(), elemsCopy, l.OptionalIndices())
- case LiteralKind:
- return fac.NewLiteral(e.ID(), e.AsLiteral())
- case MapKind:
- m := e.AsMap()
- entriesCopy := make([]EntryExpr, m.Size())
- for i, entry := range m.Entries() {
- entriesCopy[i] = fac.CopyEntryExpr(entry)
- }
- return fac.NewMap(e.ID(), entriesCopy)
- case SelectKind:
- s := e.AsSelect()
- if s.IsTestOnly() {
- return fac.NewPresenceTest(e.ID(), fac.CopyExpr(s.Operand()), s.FieldName())
- }
- return fac.NewSelect(e.ID(), fac.CopyExpr(s.Operand()), s.FieldName())
- case StructKind:
- s := e.AsStruct()
- fieldsCopy := make([]EntryExpr, len(s.Fields()))
- for i, field := range s.Fields() {
- fieldsCopy[i] = fac.CopyEntryExpr(field)
- }
- return fac.NewStruct(e.ID(), s.TypeName(), fieldsCopy)
- default:
- return fac.NewUnspecifiedExpr(e.ID())
- }
-}
-
-func (fac *baseExprFactory) CopyEntryExpr(e EntryExpr) EntryExpr {
- switch e.Kind() {
- case MapEntryKind:
- entry := e.AsMapEntry()
- return fac.NewMapEntry(e.ID(),
- fac.CopyExpr(entry.Key()), fac.CopyExpr(entry.Value()), entry.IsOptional())
- case StructFieldKind:
- field := e.AsStructField()
- return fac.NewStructField(e.ID(),
- field.Name(), fac.CopyExpr(field.Value()), field.IsOptional())
- default:
- return fac.newEntryExpr(e.ID(), nil)
- }
-}
-
-func (*baseExprFactory) isExprFactory() {}
-
-func (fac *baseExprFactory) newExpr(id int64, e exprKindCase) Expr {
- return &expr{
- id: id,
- exprKindCase: e,
- }
-}
-
-func (fac *baseExprFactory) newEntryExpr(id int64, e entryExprKindCase) EntryExpr {
- return &entryExpr{
- id: id,
- entryExprKindCase: e,
- }
-}
-
-var (
- defaultFactory = &baseExprFactory{}
-)
diff --git a/vendor/github.com/google/cel-go/common/ast/navigable.go b/vendor/github.com/google/cel-go/common/ast/navigable.go
deleted file mode 100644
index f5ddf6aac..000000000
--- a/vendor/github.com/google/cel-go/common/ast/navigable.go
+++ /dev/null
@@ -1,652 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ast
-
-import (
- "github.com/google/cel-go/common/types"
- "github.com/google/cel-go/common/types/ref"
-)
-
-// NavigableExpr represents the base navigable expression value with methods to inspect the
-// parent and child expressions.
-type NavigableExpr interface {
- Expr
-
- // Type of the expression.
- //
- // If the expression is type-checked, the type check metadata is returned. If the expression
- // has not been type-checked, the types.DynType value is returned.
- Type() *types.Type
-
- // Parent returns the parent expression node, if one exists.
- Parent() (NavigableExpr, bool)
-
- // Children returns a list of child expression nodes.
- Children() []NavigableExpr
-
- // Depth indicates the depth in the expression tree.
- //
- // The root expression has depth 0.
- Depth() int
-}
-
-// NavigateAST converts an AST to a NavigableExpr
-func NavigateAST(ast *AST) NavigableExpr {
- return NavigateExpr(ast, ast.Expr())
-}
-
-// NavigateExpr creates a NavigableExpr whose type information is backed by the input AST.
-//
-// If the expression is already a NavigableExpr, the parent and depth information will be
-// propagated on the new NavigableExpr value; otherwise, the expr value will be treated
-// as though it is the root of the expression graph with a depth of 0.
-func NavigateExpr(ast *AST, expr Expr) NavigableExpr {
- depth := 0
- var parent NavigableExpr = nil
- if nav, ok := expr.(NavigableExpr); ok {
- depth = nav.Depth()
- parent, _ = nav.Parent()
- }
- return newNavigableExpr(ast, parent, expr, depth)
-}
-
-// ExprMatcher takes a NavigableExpr in and indicates whether the value is a match.
-//
-// This function type should be use with the `Match` and `MatchList` calls.
-type ExprMatcher func(NavigableExpr) bool
-
-// ConstantValueMatcher returns an ExprMatcher which will return true if the input NavigableExpr
-// is comprised of all constant values, such as a simple literal or even list and map literal.
-func ConstantValueMatcher() ExprMatcher {
- return matchIsConstantValue
-}
-
-// KindMatcher returns an ExprMatcher which will return true if the input NavigableExpr.Kind() matches
-// the specified `kind`.
-func KindMatcher(kind ExprKind) ExprMatcher {
- return func(e NavigableExpr) bool {
- return e.Kind() == kind
- }
-}
-
-// FunctionMatcher returns an ExprMatcher which will match NavigableExpr nodes of CallKind type whose
-// function name is equal to `funcName`.
-func FunctionMatcher(funcName string) ExprMatcher {
- return func(e NavigableExpr) bool {
- if e.Kind() != CallKind {
- return false
- }
- return e.AsCall().FunctionName() == funcName
- }
-}
-
-// AllMatcher returns true for all descendants of a NavigableExpr, effectively flattening them into a list.
-//
-// Such a result would work well with subsequent MatchList calls.
-func AllMatcher() ExprMatcher {
- return func(NavigableExpr) bool {
- return true
- }
-}
-
-// MatchDescendants takes a NavigableExpr and ExprMatcher and produces a list of NavigableExpr values
-// matching the input criteria in post-order (bottom up).
-func MatchDescendants(expr NavigableExpr, matcher ExprMatcher) []NavigableExpr {
- matches := []NavigableExpr{}
- navVisitor := &baseVisitor{
- visitExpr: func(e Expr) {
- nav := e.(NavigableExpr)
- if matcher(nav) {
- matches = append(matches, nav)
- }
- },
- }
- visit(expr, navVisitor, postOrder, 0, 0)
- return matches
-}
-
-// MatchSubset applies an ExprMatcher to a list of NavigableExpr values and their descendants, producing a
-// subset of NavigableExpr values which match.
-func MatchSubset(exprs []NavigableExpr, matcher ExprMatcher) []NavigableExpr {
- matches := []NavigableExpr{}
- navVisitor := &baseVisitor{
- visitExpr: func(e Expr) {
- nav := e.(NavigableExpr)
- if matcher(nav) {
- matches = append(matches, nav)
- }
- },
- }
- for _, expr := range exprs {
- visit(expr, navVisitor, postOrder, 0, 1)
- }
- return matches
-}
-
-// Visitor defines an object for visiting Expr and EntryExpr nodes within an expression graph.
-type Visitor interface {
- // VisitExpr visits the input expression.
- VisitExpr(Expr)
-
- // VisitEntryExpr visits the input entry expression, i.e. a struct field or map entry.
- VisitEntryExpr(EntryExpr)
-}
-
-type baseVisitor struct {
- visitExpr func(Expr)
- visitEntryExpr func(EntryExpr)
-}
-
-// VisitExpr visits the Expr if the internal expr visitor has been configured.
-func (v *baseVisitor) VisitExpr(e Expr) {
- if v.visitExpr != nil {
- v.visitExpr(e)
- }
-}
-
-// VisitEntryExpr visits the entry if the internal expr entry visitor has been configured.
-func (v *baseVisitor) VisitEntryExpr(e EntryExpr) {
- if v.visitEntryExpr != nil {
- v.visitEntryExpr(e)
- }
-}
-
-// NewExprVisitor creates a visitor which only visits expression nodes.
-func NewExprVisitor(v func(Expr)) Visitor {
- return &baseVisitor{
- visitExpr: v,
- visitEntryExpr: nil,
- }
-}
-
-// PostOrderVisit walks the expression graph and calls the visitor in post-order (bottom-up).
-func PostOrderVisit(expr Expr, visitor Visitor) {
- visit(expr, visitor, postOrder, 0, 0)
-}
-
-// PreOrderVisit walks the expression graph and calls the visitor in pre-order (top-down).
-func PreOrderVisit(expr Expr, visitor Visitor) {
- visit(expr, visitor, preOrder, 0, 0)
-}
-
-type visitOrder int
-
-const (
- preOrder = iota + 1
- postOrder
-)
-
-// TODO: consider exposing a way to configure a limit for the max visit depth.
-// It's possible that we could want to configure this on the NewExprVisitor()
-// and through MatchDescendents() / MaxID().
-func visit(expr Expr, visitor Visitor, order visitOrder, depth, maxDepth int) {
- if maxDepth > 0 && depth == maxDepth {
- return
- }
- if order == preOrder {
- visitor.VisitExpr(expr)
- }
- switch expr.Kind() {
- case CallKind:
- c := expr.AsCall()
- if c.IsMemberFunction() {
- visit(c.Target(), visitor, order, depth+1, maxDepth)
- }
- for _, arg := range c.Args() {
- visit(arg, visitor, order, depth+1, maxDepth)
- }
- case ComprehensionKind:
- c := expr.AsComprehension()
- visit(c.IterRange(), visitor, order, depth+1, maxDepth)
- visit(c.AccuInit(), visitor, order, depth+1, maxDepth)
- visit(c.LoopCondition(), visitor, order, depth+1, maxDepth)
- visit(c.LoopStep(), visitor, order, depth+1, maxDepth)
- visit(c.Result(), visitor, order, depth+1, maxDepth)
- case ListKind:
- l := expr.AsList()
- for _, elem := range l.Elements() {
- visit(elem, visitor, order, depth+1, maxDepth)
- }
- case MapKind:
- m := expr.AsMap()
- for _, e := range m.Entries() {
- if order == preOrder {
- visitor.VisitEntryExpr(e)
- }
- entry := e.AsMapEntry()
- visit(entry.Key(), visitor, order, depth+1, maxDepth)
- visit(entry.Value(), visitor, order, depth+1, maxDepth)
- if order == postOrder {
- visitor.VisitEntryExpr(e)
- }
- }
- case SelectKind:
- visit(expr.AsSelect().Operand(), visitor, order, depth+1, maxDepth)
- case StructKind:
- s := expr.AsStruct()
- for _, f := range s.Fields() {
- visitor.VisitEntryExpr(f)
- visit(f.AsStructField().Value(), visitor, order, depth+1, maxDepth)
- }
- }
- if order == postOrder {
- visitor.VisitExpr(expr)
- }
-}
-
-func matchIsConstantValue(e NavigableExpr) bool {
- if e.Kind() == LiteralKind {
- return true
- }
- if e.Kind() == StructKind || e.Kind() == MapKind || e.Kind() == ListKind {
- for _, child := range e.Children() {
- if !matchIsConstantValue(child) {
- return false
- }
- }
- return true
- }
- return false
-}
-
-func newNavigableExpr(ast *AST, parent NavigableExpr, expr Expr, depth int) NavigableExpr {
- // Reduce navigable expression nesting by unwrapping the embedded Expr value.
- if nav, ok := expr.(*navigableExprImpl); ok {
- expr = nav.Expr
- }
- nav := &navigableExprImpl{
- Expr: expr,
- depth: depth,
- ast: ast,
- parent: parent,
- createChildren: getChildFactory(expr),
- }
- return nav
-}
-
-type navigableExprImpl struct {
- Expr
- depth int
- ast *AST
- parent NavigableExpr
- createChildren childFactory
-}
-
-func (nav *navigableExprImpl) Parent() (NavigableExpr, bool) {
- if nav.parent != nil {
- return nav.parent, true
- }
- return nil, false
-}
-
-func (nav *navigableExprImpl) ID() int64 {
- return nav.Expr.ID()
-}
-
-func (nav *navigableExprImpl) Kind() ExprKind {
- return nav.Expr.Kind()
-}
-
-func (nav *navigableExprImpl) Type() *types.Type {
- return nav.ast.GetType(nav.ID())
-}
-
-func (nav *navigableExprImpl) Children() []NavigableExpr {
- return nav.createChildren(nav)
-}
-
-func (nav *navigableExprImpl) Depth() int {
- return nav.depth
-}
-
-func (nav *navigableExprImpl) AsCall() CallExpr {
- return navigableCallImpl{navigableExprImpl: nav}
-}
-
-func (nav *navigableExprImpl) AsComprehension() ComprehensionExpr {
- return navigableComprehensionImpl{navigableExprImpl: nav}
-}
-
-func (nav *navigableExprImpl) AsIdent() string {
- return nav.Expr.AsIdent()
-}
-
-func (nav *navigableExprImpl) AsList() ListExpr {
- return navigableListImpl{navigableExprImpl: nav}
-}
-
-func (nav *navigableExprImpl) AsLiteral() ref.Val {
- return nav.Expr.AsLiteral()
-}
-
-func (nav *navigableExprImpl) AsMap() MapExpr {
- return navigableMapImpl{navigableExprImpl: nav}
-}
-
-func (nav *navigableExprImpl) AsSelect() SelectExpr {
- return navigableSelectImpl{navigableExprImpl: nav}
-}
-
-func (nav *navigableExprImpl) AsStruct() StructExpr {
- return navigableStructImpl{navigableExprImpl: nav}
-}
-
-func (nav *navigableExprImpl) createChild(e Expr) NavigableExpr {
- return newNavigableExpr(nav.ast, nav, e, nav.depth+1)
-}
-
-func (nav *navigableExprImpl) isExpr() {}
-
-type navigableCallImpl struct {
- *navigableExprImpl
-}
-
-func (call navigableCallImpl) FunctionName() string {
- return call.Expr.AsCall().FunctionName()
-}
-
-func (call navigableCallImpl) IsMemberFunction() bool {
- return call.Expr.AsCall().IsMemberFunction()
-}
-
-func (call navigableCallImpl) Target() Expr {
- t := call.Expr.AsCall().Target()
- if t != nil {
- return call.createChild(t)
- }
- return nil
-}
-
-func (call navigableCallImpl) Args() []Expr {
- args := call.Expr.AsCall().Args()
- navArgs := make([]Expr, len(args))
- for i, a := range args {
- navArgs[i] = call.createChild(a)
- }
- return navArgs
-}
-
-type navigableComprehensionImpl struct {
- *navigableExprImpl
-}
-
-func (comp navigableComprehensionImpl) IterRange() Expr {
- return comp.createChild(comp.Expr.AsComprehension().IterRange())
-}
-
-func (comp navigableComprehensionImpl) IterVar() string {
- return comp.Expr.AsComprehension().IterVar()
-}
-
-func (comp navigableComprehensionImpl) AccuVar() string {
- return comp.Expr.AsComprehension().AccuVar()
-}
-
-func (comp navigableComprehensionImpl) AccuInit() Expr {
- return comp.createChild(comp.Expr.AsComprehension().AccuInit())
-}
-
-func (comp navigableComprehensionImpl) LoopCondition() Expr {
- return comp.createChild(comp.Expr.AsComprehension().LoopCondition())
-}
-
-func (comp navigableComprehensionImpl) LoopStep() Expr {
- return comp.createChild(comp.Expr.AsComprehension().LoopStep())
-}
-
-func (comp navigableComprehensionImpl) Result() Expr {
- return comp.createChild(comp.Expr.AsComprehension().Result())
-}
-
-type navigableListImpl struct {
- *navigableExprImpl
-}
-
-func (l navigableListImpl) Elements() []Expr {
- pbElems := l.Expr.AsList().Elements()
- elems := make([]Expr, len(pbElems))
- for i := 0; i < len(pbElems); i++ {
- elems[i] = l.createChild(pbElems[i])
- }
- return elems
-}
-
-func (l navigableListImpl) IsOptional(index int32) bool {
- return l.Expr.AsList().IsOptional(index)
-}
-
-func (l navigableListImpl) OptionalIndices() []int32 {
- return l.Expr.AsList().OptionalIndices()
-}
-
-func (l navigableListImpl) Size() int {
- return l.Expr.AsList().Size()
-}
-
-type navigableMapImpl struct {
- *navigableExprImpl
-}
-
-func (m navigableMapImpl) Entries() []EntryExpr {
- mapExpr := m.Expr.AsMap()
- entries := make([]EntryExpr, len(mapExpr.Entries()))
- for i, e := range mapExpr.Entries() {
- entry := e.AsMapEntry()
- entries[i] = &entryExpr{
- id: e.ID(),
- entryExprKindCase: navigableEntryImpl{
- key: m.createChild(entry.Key()),
- val: m.createChild(entry.Value()),
- isOpt: entry.IsOptional(),
- },
- }
- }
- return entries
-}
-
-func (m navigableMapImpl) Size() int {
- return m.Expr.AsMap().Size()
-}
-
-type navigableEntryImpl struct {
- key NavigableExpr
- val NavigableExpr
- isOpt bool
-}
-
-func (e navigableEntryImpl) Kind() EntryExprKind {
- return MapEntryKind
-}
-
-func (e navigableEntryImpl) Key() Expr {
- return e.key
-}
-
-func (e navigableEntryImpl) Value() Expr {
- return e.val
-}
-
-func (e navigableEntryImpl) IsOptional() bool {
- return e.isOpt
-}
-
-func (e navigableEntryImpl) renumberIDs(IDGenerator) {}
-
-func (e navigableEntryImpl) isEntryExpr() {}
-
-type navigableSelectImpl struct {
- *navigableExprImpl
-}
-
-func (sel navigableSelectImpl) FieldName() string {
- return sel.Expr.AsSelect().FieldName()
-}
-
-func (sel navigableSelectImpl) IsTestOnly() bool {
- return sel.Expr.AsSelect().IsTestOnly()
-}
-
-func (sel navigableSelectImpl) Operand() Expr {
- return sel.createChild(sel.Expr.AsSelect().Operand())
-}
-
-type navigableStructImpl struct {
- *navigableExprImpl
-}
-
-func (s navigableStructImpl) TypeName() string {
- return s.Expr.AsStruct().TypeName()
-}
-
-func (s navigableStructImpl) Fields() []EntryExpr {
- fieldInits := s.Expr.AsStruct().Fields()
- fields := make([]EntryExpr, len(fieldInits))
- for i, f := range fieldInits {
- field := f.AsStructField()
- fields[i] = &entryExpr{
- id: f.ID(),
- entryExprKindCase: navigableFieldImpl{
- name: field.Name(),
- val: s.createChild(field.Value()),
- isOpt: field.IsOptional(),
- },
- }
- }
- return fields
-}
-
-type navigableFieldImpl struct {
- name string
- val NavigableExpr
- isOpt bool
-}
-
-func (f navigableFieldImpl) Kind() EntryExprKind {
- return StructFieldKind
-}
-
-func (f navigableFieldImpl) Name() string {
- return f.name
-}
-
-func (f navigableFieldImpl) Value() Expr {
- return f.val
-}
-
-func (f navigableFieldImpl) IsOptional() bool {
- return f.isOpt
-}
-
-func (f navigableFieldImpl) renumberIDs(IDGenerator) {}
-
-func (f navigableFieldImpl) isEntryExpr() {}
-
-func getChildFactory(expr Expr) childFactory {
- if expr == nil {
- return noopFactory
- }
- switch expr.Kind() {
- case LiteralKind:
- return noopFactory
- case IdentKind:
- return noopFactory
- case SelectKind:
- return selectFactory
- case CallKind:
- return callArgFactory
- case ListKind:
- return listElemFactory
- case MapKind:
- return mapEntryFactory
- case StructKind:
- return structEntryFactory
- case ComprehensionKind:
- return comprehensionFactory
- default:
- return noopFactory
- }
-}
-
-type childFactory func(*navigableExprImpl) []NavigableExpr
-
-func noopFactory(*navigableExprImpl) []NavigableExpr {
- return nil
-}
-
-func selectFactory(nav *navigableExprImpl) []NavigableExpr {
- return []NavigableExpr{nav.createChild(nav.AsSelect().Operand())}
-}
-
-func callArgFactory(nav *navigableExprImpl) []NavigableExpr {
- call := nav.Expr.AsCall()
- argCount := len(call.Args())
- if call.IsMemberFunction() {
- argCount++
- }
- navExprs := make([]NavigableExpr, argCount)
- i := 0
- if call.IsMemberFunction() {
- navExprs[i] = nav.createChild(call.Target())
- i++
- }
- for _, arg := range call.Args() {
- navExprs[i] = nav.createChild(arg)
- i++
- }
- return navExprs
-}
-
-func listElemFactory(nav *navigableExprImpl) []NavigableExpr {
- l := nav.Expr.AsList()
- navExprs := make([]NavigableExpr, len(l.Elements()))
- for i, e := range l.Elements() {
- navExprs[i] = nav.createChild(e)
- }
- return navExprs
-}
-
-func structEntryFactory(nav *navigableExprImpl) []NavigableExpr {
- s := nav.Expr.AsStruct()
- entries := make([]NavigableExpr, len(s.Fields()))
- for i, e := range s.Fields() {
- f := e.AsStructField()
- entries[i] = nav.createChild(f.Value())
- }
- return entries
-}
-
-func mapEntryFactory(nav *navigableExprImpl) []NavigableExpr {
- m := nav.Expr.AsMap()
- entries := make([]NavigableExpr, len(m.Entries())*2)
- j := 0
- for _, e := range m.Entries() {
- mapEntry := e.AsMapEntry()
- entries[j] = nav.createChild(mapEntry.Key())
- entries[j+1] = nav.createChild(mapEntry.Value())
- j += 2
- }
- return entries
-}
-
-func comprehensionFactory(nav *navigableExprImpl) []NavigableExpr {
- compre := nav.Expr.AsComprehension()
- return []NavigableExpr{
- nav.createChild(compre.IterRange()),
- nav.createChild(compre.AccuInit()),
- nav.createChild(compre.LoopCondition()),
- nav.createChild(compre.LoopStep()),
- nav.createChild(compre.Result()),
- }
-}
diff --git a/vendor/github.com/google/cel-go/common/containers/BUILD.bazel b/vendor/github.com/google/cel-go/common/containers/BUILD.bazel
index 81197f064..3f3f07887 100644
--- a/vendor/github.com/google/cel-go/common/containers/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/common/containers/BUILD.bazel
@@ -12,7 +12,7 @@ go_library(
],
importpath = "github.com/google/cel-go/common/containers",
deps = [
- "//common/ast:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
],
)
@@ -26,6 +26,6 @@ go_test(
":go_default_library",
],
deps = [
- "//common/ast:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
],
)
diff --git a/vendor/github.com/google/cel-go/common/containers/container.go b/vendor/github.com/google/cel-go/common/containers/container.go
index 52153d4cd..d46698d3c 100644
--- a/vendor/github.com/google/cel-go/common/containers/container.go
+++ b/vendor/github.com/google/cel-go/common/containers/container.go
@@ -20,7 +20,7 @@ import (
"fmt"
"strings"
- "github.com/google/cel-go/common/ast"
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
var (
@@ -297,19 +297,19 @@ func Name(name string) ContainerOption {
// ToQualifiedName converts an expression AST into a qualified name if possible, with a boolean
// 'found' value that indicates if the conversion is successful.
-func ToQualifiedName(e ast.Expr) (string, bool) {
- switch e.Kind() {
- case ast.IdentKind:
- id := e.AsIdent()
- return id, true
- case ast.SelectKind:
- sel := e.AsSelect()
+func ToQualifiedName(e *exprpb.Expr) (string, bool) {
+ switch e.GetExprKind().(type) {
+ case *exprpb.Expr_IdentExpr:
+ id := e.GetIdentExpr()
+ return id.GetName(), true
+ case *exprpb.Expr_SelectExpr:
+ sel := e.GetSelectExpr()
// Test only expressions are not valid as qualified names.
- if sel.IsTestOnly() {
+ if sel.GetTestOnly() {
return "", false
}
- if qual, found := ToQualifiedName(sel.Operand()); found {
- return qual + "." + sel.FieldName(), true
+ if qual, found := ToQualifiedName(sel.GetOperand()); found {
+ return qual + "." + sel.GetField(), true
}
}
return "", false
diff --git a/vendor/github.com/google/cel-go/common/debug/BUILD.bazel b/vendor/github.com/google/cel-go/common/debug/BUILD.bazel
index 724ed3404..1f029839c 100644
--- a/vendor/github.com/google/cel-go/common/debug/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/common/debug/BUILD.bazel
@@ -13,8 +13,6 @@ go_library(
importpath = "github.com/google/cel-go/common/debug",
deps = [
"//common:go_default_library",
- "//common/ast:go_default_library",
- "//common/types:go_default_library",
- "//common/types/ref:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
],
)
diff --git a/vendor/github.com/google/cel-go/common/debug/debug.go b/vendor/github.com/google/cel-go/common/debug/debug.go
index e4c01ac6e..5dab156ef 100644
--- a/vendor/github.com/google/cel-go/common/debug/debug.go
+++ b/vendor/github.com/google/cel-go/common/debug/debug.go
@@ -22,9 +22,7 @@ import (
"strconv"
"strings"
- "github.com/google/cel-go/common/ast"
- "github.com/google/cel-go/common/types"
- "github.com/google/cel-go/common/types/ref"
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
// Adorner returns debug metadata that will be tacked on to the string
@@ -40,7 +38,7 @@ type Writer interface {
// Buffer pushes an expression into an internal queue of expressions to
// write to a string.
- Buffer(e ast.Expr)
+ Buffer(e *exprpb.Expr)
}
type emptyDebugAdorner struct {
@@ -53,12 +51,12 @@ func (a *emptyDebugAdorner) GetMetadata(e any) string {
}
// ToDebugString gives the unadorned string representation of the Expr.
-func ToDebugString(e ast.Expr) string {
+func ToDebugString(e *exprpb.Expr) string {
return ToAdornedDebugString(e, emptyAdorner)
}
// ToAdornedDebugString gives the adorned string representation of the Expr.
-func ToAdornedDebugString(e ast.Expr, adorner Adorner) string {
+func ToAdornedDebugString(e *exprpb.Expr, adorner Adorner) string {
w := newDebugWriter(adorner)
w.Buffer(e)
return w.String()
@@ -80,51 +78,49 @@ func newDebugWriter(a Adorner) *debugWriter {
}
}
-func (w *debugWriter) Buffer(e ast.Expr) {
+func (w *debugWriter) Buffer(e *exprpb.Expr) {
if e == nil {
return
}
- switch e.Kind() {
- case ast.LiteralKind:
- w.append(formatLiteral(e.AsLiteral()))
- case ast.IdentKind:
- w.append(e.AsIdent())
- case ast.SelectKind:
- w.appendSelect(e.AsSelect())
- case ast.CallKind:
- w.appendCall(e.AsCall())
- case ast.ListKind:
- w.appendList(e.AsList())
- case ast.MapKind:
- w.appendMap(e.AsMap())
- case ast.StructKind:
- w.appendStruct(e.AsStruct())
- case ast.ComprehensionKind:
- w.appendComprehension(e.AsComprehension())
+ switch e.ExprKind.(type) {
+ case *exprpb.Expr_ConstExpr:
+ w.append(formatLiteral(e.GetConstExpr()))
+ case *exprpb.Expr_IdentExpr:
+ w.append(e.GetIdentExpr().Name)
+ case *exprpb.Expr_SelectExpr:
+ w.appendSelect(e.GetSelectExpr())
+ case *exprpb.Expr_CallExpr:
+ w.appendCall(e.GetCallExpr())
+ case *exprpb.Expr_ListExpr:
+ w.appendList(e.GetListExpr())
+ case *exprpb.Expr_StructExpr:
+ w.appendStruct(e.GetStructExpr())
+ case *exprpb.Expr_ComprehensionExpr:
+ w.appendComprehension(e.GetComprehensionExpr())
}
w.adorn(e)
}
-func (w *debugWriter) appendSelect(sel ast.SelectExpr) {
- w.Buffer(sel.Operand())
+func (w *debugWriter) appendSelect(sel *exprpb.Expr_Select) {
+ w.Buffer(sel.GetOperand())
w.append(".")
- w.append(sel.FieldName())
- if sel.IsTestOnly() {
+ w.append(sel.GetField())
+ if sel.TestOnly {
w.append("~test-only~")
}
}
-func (w *debugWriter) appendCall(call ast.CallExpr) {
- if call.IsMemberFunction() {
- w.Buffer(call.Target())
+func (w *debugWriter) appendCall(call *exprpb.Expr_Call) {
+ if call.Target != nil {
+ w.Buffer(call.GetTarget())
w.append(".")
}
- w.append(call.FunctionName())
+ w.append(call.GetFunction())
w.append("(")
- if len(call.Args()) > 0 {
+ if len(call.GetArgs()) > 0 {
w.addIndent()
w.appendLine()
- for i, arg := range call.Args() {
+ for i, arg := range call.GetArgs() {
if i > 0 {
w.append(",")
w.appendLine()
@@ -137,12 +133,12 @@ func (w *debugWriter) appendCall(call ast.CallExpr) {
w.append(")")
}
-func (w *debugWriter) appendList(list ast.ListExpr) {
+func (w *debugWriter) appendList(list *exprpb.Expr_CreateList) {
w.append("[")
- if len(list.Elements()) > 0 {
+ if len(list.GetElements()) > 0 {
w.appendLine()
w.addIndent()
- for i, elem := range list.Elements() {
+ for i, elem := range list.GetElements() {
if i > 0 {
w.append(",")
w.appendLine()
@@ -155,25 +151,32 @@ func (w *debugWriter) appendList(list ast.ListExpr) {
w.append("]")
}
-func (w *debugWriter) appendStruct(obj ast.StructExpr) {
- w.append(obj.TypeName())
+func (w *debugWriter) appendStruct(obj *exprpb.Expr_CreateStruct) {
+ if obj.MessageName != "" {
+ w.appendObject(obj)
+ } else {
+ w.appendMap(obj)
+ }
+}
+
+func (w *debugWriter) appendObject(obj *exprpb.Expr_CreateStruct) {
+ w.append(obj.GetMessageName())
w.append("{")
- if len(obj.Fields()) > 0 {
+ if len(obj.GetEntries()) > 0 {
w.appendLine()
w.addIndent()
- for i, f := range obj.Fields() {
- field := f.AsStructField()
+ for i, entry := range obj.GetEntries() {
if i > 0 {
w.append(",")
w.appendLine()
}
- if field.IsOptional() {
+ if entry.GetOptionalEntry() {
w.append("?")
}
- w.append(field.Name())
+ w.append(entry.GetFieldKey())
w.append(":")
- w.Buffer(field.Value())
- w.adorn(f)
+ w.Buffer(entry.GetValue())
+ w.adorn(entry)
}
w.removeIndent()
w.appendLine()
@@ -181,24 +184,23 @@ func (w *debugWriter) appendStruct(obj ast.StructExpr) {
w.append("}")
}
-func (w *debugWriter) appendMap(m ast.MapExpr) {
+func (w *debugWriter) appendMap(obj *exprpb.Expr_CreateStruct) {
w.append("{")
- if m.Size() > 0 {
+ if len(obj.GetEntries()) > 0 {
w.appendLine()
w.addIndent()
- for i, e := range m.Entries() {
- entry := e.AsMapEntry()
+ for i, entry := range obj.GetEntries() {
if i > 0 {
w.append(",")
w.appendLine()
}
- if entry.IsOptional() {
+ if entry.GetOptionalEntry() {
w.append("?")
}
- w.Buffer(entry.Key())
+ w.Buffer(entry.GetMapKey())
w.append(":")
- w.Buffer(entry.Value())
- w.adorn(e)
+ w.Buffer(entry.GetValue())
+ w.adorn(entry)
}
w.removeIndent()
w.appendLine()
@@ -206,62 +208,62 @@ func (w *debugWriter) appendMap(m ast.MapExpr) {
w.append("}")
}
-func (w *debugWriter) appendComprehension(comprehension ast.ComprehensionExpr) {
+func (w *debugWriter) appendComprehension(comprehension *exprpb.Expr_Comprehension) {
w.append("__comprehension__(")
w.addIndent()
w.appendLine()
w.append("// Variable")
w.appendLine()
- w.append(comprehension.IterVar())
+ w.append(comprehension.GetIterVar())
w.append(",")
w.appendLine()
w.append("// Target")
w.appendLine()
- w.Buffer(comprehension.IterRange())
+ w.Buffer(comprehension.GetIterRange())
w.append(",")
w.appendLine()
w.append("// Accumulator")
w.appendLine()
- w.append(comprehension.AccuVar())
+ w.append(comprehension.GetAccuVar())
w.append(",")
w.appendLine()
w.append("// Init")
w.appendLine()
- w.Buffer(comprehension.AccuInit())
+ w.Buffer(comprehension.GetAccuInit())
w.append(",")
w.appendLine()
w.append("// LoopCondition")
w.appendLine()
- w.Buffer(comprehension.LoopCondition())
+ w.Buffer(comprehension.GetLoopCondition())
w.append(",")
w.appendLine()
w.append("// LoopStep")
w.appendLine()
- w.Buffer(comprehension.LoopStep())
+ w.Buffer(comprehension.GetLoopStep())
w.append(",")
w.appendLine()
w.append("// Result")
w.appendLine()
- w.Buffer(comprehension.Result())
+ w.Buffer(comprehension.GetResult())
w.append(")")
w.removeIndent()
}
-func formatLiteral(c ref.Val) string {
- switch v := c.(type) {
- case types.Bool:
- return fmt.Sprintf("%t", v)
- case types.Bytes:
- return fmt.Sprintf("b\"%s\"", string(v))
- case types.Double:
- return fmt.Sprintf("%v", float64(v))
- case types.Int:
- return fmt.Sprintf("%d", int64(v))
- case types.String:
- return strconv.Quote(string(v))
- case types.Uint:
- return fmt.Sprintf("%du", uint64(v))
- case types.Null:
+func formatLiteral(c *exprpb.Constant) string {
+ switch c.GetConstantKind().(type) {
+ case *exprpb.Constant_BoolValue:
+ return fmt.Sprintf("%t", c.GetBoolValue())
+ case *exprpb.Constant_BytesValue:
+ return fmt.Sprintf("b\"%s\"", string(c.GetBytesValue()))
+ case *exprpb.Constant_DoubleValue:
+ return fmt.Sprintf("%v", c.GetDoubleValue())
+ case *exprpb.Constant_Int64Value:
+ return fmt.Sprintf("%d", c.GetInt64Value())
+ case *exprpb.Constant_StringValue:
+ return strconv.Quote(c.GetStringValue())
+ case *exprpb.Constant_Uint64Value:
+ return fmt.Sprintf("%du", c.GetUint64Value())
+ case *exprpb.Constant_NullValue:
return "null"
default:
panic("Unknown constant type")
diff --git a/vendor/github.com/google/cel-go/common/errors.go b/vendor/github.com/google/cel-go/common/errors.go
index 25adc73d8..63919714e 100644
--- a/vendor/github.com/google/cel-go/common/errors.go
+++ b/vendor/github.com/google/cel-go/common/errors.go
@@ -64,7 +64,7 @@ func (e *Errors) GetErrors() []*Error {
// Append creates a new Errors object with the current and input errors.
func (e *Errors) Append(errs []*Error) *Errors {
return &Errors{
- errors: append(e.errors[:], errs...),
+ errors: append(e.errors, errs...),
source: e.source,
numErrors: e.numErrors + len(errs),
maxErrorsToReport: e.maxErrorsToReport,
diff --git a/vendor/github.com/google/cel-go/common/types/provider.go b/vendor/github.com/google/cel-go/common/types/provider.go
index d301aa38a..e80b4622e 100644
--- a/vendor/github.com/google/cel-go/common/types/provider.go
+++ b/vendor/github.com/google/cel-go/common/types/provider.go
@@ -54,10 +54,6 @@ type Provider interface {
// Returns false if not found.
FindStructType(structType string) (*Type, bool)
- // FindStructFieldNames returns thet field names associated with the type, if the type
- // is found.
- FindStructFieldNames(structType string) ([]string, bool)
-
// FieldStructFieldType returns the field type for a checked type value. Returns
// false if the field could not be found.
FindStructFieldType(structType, fieldName string) (*FieldType, bool)
@@ -158,7 +154,7 @@ func (p *Registry) EnumValue(enumName string) ref.Val {
return Int(enumVal.Value())
}
-// FindFieldType returns the field type for a checked type value. Returns false if
+// FieldFieldType returns the field type for a checked type value. Returns false if
// the field could not be found.
//
// Deprecated: use FindStructFieldType
@@ -177,24 +173,7 @@ func (p *Registry) FindFieldType(structType, fieldName string) (*ref.FieldType,
GetFrom: field.GetFrom}, true
}
-// FindStructFieldNames returns the set of field names for the given struct type,
-// if the type exists in the registry.
-func (p *Registry) FindStructFieldNames(structType string) ([]string, bool) {
- msgType, found := p.pbdb.DescribeType(structType)
- if !found {
- return []string{}, false
- }
- fieldMap := msgType.FieldMap()
- fields := make([]string, len(fieldMap))
- idx := 0
- for f := range fieldMap {
- fields[idx] = f
- idx++
- }
- return fields, true
-}
-
-// FindStructFieldType returns the field type for a checked type value. Returns
+// FieldStructFieldType returns the field type for a checked type value. Returns
// false if the field could not be found.
func (p *Registry) FindStructFieldType(structType, fieldName string) (*FieldType, bool) {
msgType, found := p.pbdb.DescribeType(structType)
diff --git a/vendor/github.com/google/cel-go/ext/BUILD.bazel b/vendor/github.com/google/cel-go/ext/BUILD.bazel
index 570130229..6fdcc60c6 100644
--- a/vendor/github.com/google/cel-go/ext/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/ext/BUILD.bazel
@@ -7,9 +7,7 @@ package(
go_library(
name = "go_default_library",
srcs = [
- "bindings.go",
"encoders.go",
- "formatting.go",
"guards.go",
"lists.go",
"math.go",
@@ -23,13 +21,14 @@ go_library(
deps = [
"//cel:go_default_library",
"//checker:go_default_library",
- "//common/ast:go_default_library",
+ "//checker/decls:go_default_library",
"//common/overloads:go_default_library",
"//common/types:go_default_library",
"//common/types/pb:go_default_library",
"//common/types/ref:go_default_library",
"//common/types/traits:go_default_library",
"//interpreter:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
"@org_golang_google_protobuf//types/known/structpb",
@@ -62,6 +61,7 @@ go_test(
"//test:go_default_library",
"//test/proto2pb:go_default_library",
"//test/proto3pb:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_google_protobuf//types/known/wrapperspb:go_default_library",
"@org_golang_google_protobuf//encoding/protojson:go_default_library",
diff --git a/vendor/github.com/google/cel-go/ext/README.md b/vendor/github.com/google/cel-go/ext/README.md
index 2fac0cb22..6f621ac4a 100644
--- a/vendor/github.com/google/cel-go/ext/README.md
+++ b/vendor/github.com/google/cel-go/ext/README.md
@@ -414,17 +414,3 @@ Examples:
'TacoCat'.upperAscii() // returns 'TACOCAT'
'TacoCÆt Xii'.upperAscii() // returns 'TACOCÆT XII'
-
-### Reverse
-
-Returns a new string whose characters are the same as the target string, only formatted in
-reverse order.
-This function relies on converting strings to rune arrays in order to reverse.
-It can be located in Version 3 of strings.
-
- .reverse() ->
-
-Examples:
-
- 'gums'.reverse() // returns 'smug'
- 'John Smith'.reverse() // returns 'htimS nhoJ'
\ No newline at end of file
diff --git a/vendor/github.com/google/cel-go/ext/bindings.go b/vendor/github.com/google/cel-go/ext/bindings.go
index 2c6cc627f..4ac9a7f07 100644
--- a/vendor/github.com/google/cel-go/ext/bindings.go
+++ b/vendor/github.com/google/cel-go/ext/bindings.go
@@ -16,8 +16,8 @@ package ext
import (
"github.com/google/cel-go/cel"
- "github.com/google/cel-go/common/ast"
- "github.com/google/cel-go/common/types"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
// Bindings returns a cel.EnvOption to configure support for local variable
@@ -61,7 +61,7 @@ func (celBindings) CompileOptions() []cel.EnvOption {
return []cel.EnvOption{
cel.Macros(
// cel.bind(var, , )
- cel.ReceiverMacro(bindMacro, 3, celBind),
+ cel.NewReceiverMacro(bindMacro, 3, celBind),
),
}
}
@@ -70,27 +70,27 @@ func (celBindings) ProgramOptions() []cel.ProgramOption {
return []cel.ProgramOption{}
}
-func celBind(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) {
+func celBind(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *cel.Error) {
if !macroTargetMatchesNamespace(celNamespace, target) {
return nil, nil
}
varIdent := args[0]
varName := ""
- switch varIdent.Kind() {
- case ast.IdentKind:
- varName = varIdent.AsIdent()
+ switch varIdent.GetExprKind().(type) {
+ case *exprpb.Expr_IdentExpr:
+ varName = varIdent.GetIdentExpr().GetName()
default:
- return nil, mef.NewError(varIdent.ID(), "cel.bind() variable names must be simple identifiers")
+ return nil, meh.NewError(varIdent.GetId(), "cel.bind() variable names must be simple identifiers")
}
varInit := args[1]
resultExpr := args[2]
- return mef.NewComprehension(
- mef.NewList(),
+ return meh.Fold(
unusedIterVar,
+ meh.NewList(),
varName,
varInit,
- mef.NewLiteral(types.False),
- mef.NewIdent(varName),
+ meh.LiteralBool(false),
+ meh.Ident(varName),
resultExpr,
), nil
}
diff --git a/vendor/github.com/google/cel-go/ext/formatting.go b/vendor/github.com/google/cel-go/ext/formatting.go
deleted file mode 100644
index 2f35b996c..000000000
--- a/vendor/github.com/google/cel-go/ext/formatting.go
+++ /dev/null
@@ -1,904 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "errors"
- "fmt"
- "math"
- "sort"
- "strconv"
- "strings"
- "unicode"
-
- "golang.org/x/text/language"
- "golang.org/x/text/message"
-
- "github.com/google/cel-go/cel"
- "github.com/google/cel-go/common/ast"
- "github.com/google/cel-go/common/overloads"
- "github.com/google/cel-go/common/types"
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/common/types/traits"
-)
-
-type clauseImpl func(ref.Val, string) (string, error)
-
-func clauseForType(argType ref.Type) (clauseImpl, error) {
- switch argType {
- case types.IntType, types.UintType:
- return formatDecimal, nil
- case types.StringType, types.BytesType, types.BoolType, types.NullType, types.TypeType:
- return FormatString, nil
- case types.TimestampType, types.DurationType:
- // special case to ensure timestamps/durations get printed as CEL literals
- return func(arg ref.Val, locale string) (string, error) {
- argStrVal := arg.ConvertToType(types.StringType)
- argStr := argStrVal.Value().(string)
- if arg.Type() == types.TimestampType {
- return fmt.Sprintf("timestamp(%q)", argStr), nil
- }
- if arg.Type() == types.DurationType {
- return fmt.Sprintf("duration(%q)", argStr), nil
- }
- return "", fmt.Errorf("cannot convert argument of type %s to timestamp/duration", arg.Type().TypeName())
- }, nil
- case types.ListType:
- return formatList, nil
- case types.MapType:
- return formatMap, nil
- case types.DoubleType:
- // avoid formatFixed so we can output a period as the decimal separator in order
- // to always be a valid CEL literal
- return func(arg ref.Val, locale string) (string, error) {
- argDouble, ok := arg.Value().(float64)
- if !ok {
- return "", fmt.Errorf("couldn't convert %s to float64", arg.Type().TypeName())
- }
- fmtStr := fmt.Sprintf("%%.%df", defaultPrecision)
- return fmt.Sprintf(fmtStr, argDouble), nil
- }, nil
- case types.TypeType:
- return func(arg ref.Val, locale string) (string, error) {
- return fmt.Sprintf("type(%s)", arg.Value().(string)), nil
- }, nil
- default:
- return nil, fmt.Errorf("no formatting function for %s", argType.TypeName())
- }
-}
-
-func formatList(arg ref.Val, locale string) (string, error) {
- argList := arg.(traits.Lister)
- argIterator := argList.Iterator()
- var listStrBuilder strings.Builder
- _, err := listStrBuilder.WriteRune('[')
- if err != nil {
- return "", fmt.Errorf("error writing to list string: %w", err)
- }
- for argIterator.HasNext() == types.True {
- member := argIterator.Next()
- memberFormat, err := clauseForType(member.Type())
- if err != nil {
- return "", err
- }
- unquotedStr, err := memberFormat(member, locale)
- if err != nil {
- return "", err
- }
- str := quoteForCEL(member, unquotedStr)
- _, err = listStrBuilder.WriteString(str)
- if err != nil {
- return "", fmt.Errorf("error writing to list string: %w", err)
- }
- if argIterator.HasNext() == types.True {
- _, err = listStrBuilder.WriteString(", ")
- if err != nil {
- return "", fmt.Errorf("error writing to list string: %w", err)
- }
- }
- }
- _, err = listStrBuilder.WriteRune(']')
- if err != nil {
- return "", fmt.Errorf("error writing to list string: %w", err)
- }
- return listStrBuilder.String(), nil
-}
-
-func formatMap(arg ref.Val, locale string) (string, error) {
- argMap := arg.(traits.Mapper)
- argIterator := argMap.Iterator()
- type mapPair struct {
- key string
- value string
- }
- argPairs := make([]mapPair, argMap.Size().Value().(int64))
- i := 0
- for argIterator.HasNext() == types.True {
- key := argIterator.Next()
- var keyFormat clauseImpl
- switch key.Type() {
- case types.StringType, types.BoolType:
- keyFormat = FormatString
- case types.IntType, types.UintType:
- keyFormat = formatDecimal
- default:
- return "", fmt.Errorf("no formatting function for map key of type %s", key.Type().TypeName())
- }
- unquotedKeyStr, err := keyFormat(key, locale)
- if err != nil {
- return "", err
- }
- keyStr := quoteForCEL(key, unquotedKeyStr)
- value, found := argMap.Find(key)
- if !found {
- return "", fmt.Errorf("could not find key: %q", key)
- }
- valueFormat, err := clauseForType(value.Type())
- if err != nil {
- return "", err
- }
- unquotedValueStr, err := valueFormat(value, locale)
- if err != nil {
- return "", err
- }
- valueStr := quoteForCEL(value, unquotedValueStr)
- argPairs[i] = mapPair{keyStr, valueStr}
- i++
- }
- sort.SliceStable(argPairs, func(x, y int) bool {
- return argPairs[x].key < argPairs[y].key
- })
- var mapStrBuilder strings.Builder
- _, err := mapStrBuilder.WriteRune('{')
- if err != nil {
- return "", fmt.Errorf("error writing to map string: %w", err)
- }
- for i, entry := range argPairs {
- _, err = mapStrBuilder.WriteString(fmt.Sprintf("%s:%s", entry.key, entry.value))
- if err != nil {
- return "", fmt.Errorf("error writing to map string: %w", err)
- }
- if i < len(argPairs)-1 {
- _, err = mapStrBuilder.WriteString(", ")
- if err != nil {
- return "", fmt.Errorf("error writing to map string: %w", err)
- }
- }
- }
- _, err = mapStrBuilder.WriteRune('}')
- if err != nil {
- return "", fmt.Errorf("error writing to map string: %w", err)
- }
- return mapStrBuilder.String(), nil
-}
-
-// quoteForCEL takes a formatted, unquoted value and quotes it in a manner suitable
-// for embedding directly in CEL.
-func quoteForCEL(refVal ref.Val, unquotedValue string) string {
- switch refVal.Type() {
- case types.StringType:
- return fmt.Sprintf("%q", unquotedValue)
- case types.BytesType:
- return fmt.Sprintf("b%q", unquotedValue)
- case types.DoubleType:
- // special case to handle infinity/NaN
- num := refVal.Value().(float64)
- if math.IsInf(num, 1) || math.IsInf(num, -1) || math.IsNaN(num) {
- return fmt.Sprintf("%q", unquotedValue)
- }
- return unquotedValue
- default:
- return unquotedValue
- }
-}
-
-// FormatString returns the string representation of a CEL value.
-//
-// It is used to implement the %s specifier in the (string).format() extension function.
-func FormatString(arg ref.Val, locale string) (string, error) {
- switch arg.Type() {
- case types.ListType:
- return formatList(arg, locale)
- case types.MapType:
- return formatMap(arg, locale)
- case types.IntType, types.UintType, types.DoubleType,
- types.BoolType, types.StringType, types.TimestampType, types.BytesType, types.DurationType, types.TypeType:
- argStrVal := arg.ConvertToType(types.StringType)
- argStr, ok := argStrVal.Value().(string)
- if !ok {
- return "", fmt.Errorf("could not convert argument %q to string", argStrVal)
- }
- return argStr, nil
- case types.NullType:
- return "null", nil
- default:
- return "", stringFormatError(runtimeID, arg.Type().TypeName())
- }
-}
-
-func formatDecimal(arg ref.Val, locale string) (string, error) {
- switch arg.Type() {
- case types.IntType:
- argInt, ok := arg.ConvertToType(types.IntType).Value().(int64)
- if !ok {
- return "", fmt.Errorf("could not convert \"%s\" to int64", arg.Value())
- }
- return fmt.Sprintf("%d", argInt), nil
- case types.UintType:
- argInt, ok := arg.ConvertToType(types.UintType).Value().(uint64)
- if !ok {
- return "", fmt.Errorf("could not convert \"%s\" to uint64", arg.Value())
- }
- return fmt.Sprintf("%d", argInt), nil
- default:
- return "", decimalFormatError(runtimeID, arg.Type().TypeName())
- }
-}
-
-func matchLanguage(locale string) (language.Tag, error) {
- matcher, err := makeMatcher(locale)
- if err != nil {
- return language.Und, err
- }
- tag, _ := language.MatchStrings(matcher, locale)
- return tag, nil
-}
-
-func makeMatcher(locale string) (language.Matcher, error) {
- tags := make([]language.Tag, 0)
- tag, err := language.Parse(locale)
- if err != nil {
- return nil, err
- }
- tags = append(tags, tag)
- return language.NewMatcher(tags), nil
-}
-
-type stringFormatter struct{}
-
-func (c *stringFormatter) String(arg ref.Val, locale string) (string, error) {
- return FormatString(arg, locale)
-}
-
-func (c *stringFormatter) Decimal(arg ref.Val, locale string) (string, error) {
- return formatDecimal(arg, locale)
-}
-
-func (c *stringFormatter) Fixed(precision *int) func(ref.Val, string) (string, error) {
- if precision == nil {
- precision = new(int)
- *precision = defaultPrecision
- }
- return func(arg ref.Val, locale string) (string, error) {
- strException := false
- if arg.Type() == types.StringType {
- argStr := arg.Value().(string)
- if argStr == "NaN" || argStr == "Infinity" || argStr == "-Infinity" {
- strException = true
- }
- }
- if arg.Type() != types.DoubleType && !strException {
- return "", fixedPointFormatError(runtimeID, arg.Type().TypeName())
- }
- argFloatVal := arg.ConvertToType(types.DoubleType)
- argFloat, ok := argFloatVal.Value().(float64)
- if !ok {
- return "", fmt.Errorf("could not convert \"%s\" to float64", argFloatVal.Value())
- }
- fmtStr := fmt.Sprintf("%%.%df", *precision)
-
- matchedLocale, err := matchLanguage(locale)
- if err != nil {
- return "", fmt.Errorf("error matching locale: %w", err)
- }
- return message.NewPrinter(matchedLocale).Sprintf(fmtStr, argFloat), nil
- }
-}
-
-func (c *stringFormatter) Scientific(precision *int) func(ref.Val, string) (string, error) {
- if precision == nil {
- precision = new(int)
- *precision = defaultPrecision
- }
- return func(arg ref.Val, locale string) (string, error) {
- strException := false
- if arg.Type() == types.StringType {
- argStr := arg.Value().(string)
- if argStr == "NaN" || argStr == "Infinity" || argStr == "-Infinity" {
- strException = true
- }
- }
- if arg.Type() != types.DoubleType && !strException {
- return "", scientificFormatError(runtimeID, arg.Type().TypeName())
- }
- argFloatVal := arg.ConvertToType(types.DoubleType)
- argFloat, ok := argFloatVal.Value().(float64)
- if !ok {
- return "", fmt.Errorf("could not convert \"%v\" to float64", argFloatVal.Value())
- }
- matchedLocale, err := matchLanguage(locale)
- if err != nil {
- return "", fmt.Errorf("error matching locale: %w", err)
- }
- fmtStr := fmt.Sprintf("%%%de", *precision)
- return message.NewPrinter(matchedLocale).Sprintf(fmtStr, argFloat), nil
- }
-}
-
-func (c *stringFormatter) Binary(arg ref.Val, locale string) (string, error) {
- switch arg.Type() {
- case types.IntType:
- argInt := arg.Value().(int64)
- // locale is intentionally unused as integers formatted as binary
- // strings are locale-independent
- return fmt.Sprintf("%b", argInt), nil
- case types.UintType:
- argInt := arg.Value().(uint64)
- return fmt.Sprintf("%b", argInt), nil
- case types.BoolType:
- argBool := arg.Value().(bool)
- if argBool {
- return "1", nil
- }
- return "0", nil
- default:
- return "", binaryFormatError(runtimeID, arg.Type().TypeName())
- }
-}
-
-func (c *stringFormatter) Hex(useUpper bool) func(ref.Val, string) (string, error) {
- return func(arg ref.Val, locale string) (string, error) {
- fmtStr := "%x"
- if useUpper {
- fmtStr = "%X"
- }
- switch arg.Type() {
- case types.StringType, types.BytesType:
- if arg.Type() == types.BytesType {
- return fmt.Sprintf(fmtStr, arg.Value().([]byte)), nil
- }
- return fmt.Sprintf(fmtStr, arg.Value().(string)), nil
- case types.IntType:
- argInt, ok := arg.Value().(int64)
- if !ok {
- return "", fmt.Errorf("could not convert \"%s\" to int64", arg.Value())
- }
- return fmt.Sprintf(fmtStr, argInt), nil
- case types.UintType:
- argInt, ok := arg.Value().(uint64)
- if !ok {
- return "", fmt.Errorf("could not convert \"%s\" to uint64", arg.Value())
- }
- return fmt.Sprintf(fmtStr, argInt), nil
- default:
- return "", hexFormatError(runtimeID, arg.Type().TypeName())
- }
- }
-}
-
-func (c *stringFormatter) Octal(arg ref.Val, locale string) (string, error) {
- switch arg.Type() {
- case types.IntType:
- argInt := arg.Value().(int64)
- return fmt.Sprintf("%o", argInt), nil
- case types.UintType:
- argInt := arg.Value().(uint64)
- return fmt.Sprintf("%o", argInt), nil
- default:
- return "", octalFormatError(runtimeID, arg.Type().TypeName())
- }
-}
-
-// stringFormatValidator implements the cel.ASTValidator interface allowing for static validation
-// of string.format calls.
-type stringFormatValidator struct{}
-
-// Name returns the name of the validator.
-func (stringFormatValidator) Name() string {
- return "cel.lib.ext.validate.functions.string.format"
-}
-
-// Configure implements the ASTValidatorConfigurer interface and augments the list of functions to skip
-// during homogeneous aggregate literal type-checks.
-func (stringFormatValidator) Configure(config cel.MutableValidatorConfig) error {
- functions := config.GetOrDefault(cel.HomogeneousAggregateLiteralExemptFunctions, []string{}).([]string)
- functions = append(functions, "format")
- return config.Set(cel.HomogeneousAggregateLiteralExemptFunctions, functions)
-}
-
-// Validate parses all literal format strings and type checks the format clause against the argument
-// at the corresponding ordinal within the list literal argument to the function, if one is specified.
-func (stringFormatValidator) Validate(env *cel.Env, _ cel.ValidatorConfig, a *ast.AST, iss *cel.Issues) {
- root := ast.NavigateAST(a)
- formatCallExprs := ast.MatchDescendants(root, matchConstantFormatStringWithListLiteralArgs(a))
- for _, e := range formatCallExprs {
- call := e.AsCall()
- formatStr := call.Target().AsLiteral().Value().(string)
- args := call.Args()[0].AsList().Elements()
- formatCheck := &stringFormatChecker{
- args: args,
- ast: a,
- }
- // use a placeholder locale, since locale doesn't affect syntax
- _, err := parseFormatString(formatStr, formatCheck, formatCheck, "en_US")
- if err != nil {
- iss.ReportErrorAtID(getErrorExprID(e.ID(), err), err.Error())
- continue
- }
- seenArgs := formatCheck.argsRequested
- if len(args) > seenArgs {
- iss.ReportErrorAtID(e.ID(),
- "too many arguments supplied to string.format (expected %d, got %d)", seenArgs, len(args))
- }
- }
-}
-
-// getErrorExprID determines which list literal argument triggered a type-disagreement for the
-// purposes of more accurate error message reports.
-func getErrorExprID(id int64, err error) int64 {
- fmtErr, ok := err.(formatError)
- if ok {
- return fmtErr.id
- }
- wrapped := errors.Unwrap(err)
- if wrapped != nil {
- return getErrorExprID(id, wrapped)
- }
- return id
-}
-
-// matchConstantFormatStringWithListLiteralArgs matches all valid expression nodes for string
-// format checking.
-func matchConstantFormatStringWithListLiteralArgs(a *ast.AST) ast.ExprMatcher {
- return func(e ast.NavigableExpr) bool {
- if e.Kind() != ast.CallKind {
- return false
- }
- call := e.AsCall()
- if !call.IsMemberFunction() || call.FunctionName() != "format" {
- return false
- }
- overloadIDs := a.GetOverloadIDs(e.ID())
- if len(overloadIDs) != 0 {
- found := false
- for _, overload := range overloadIDs {
- if overload == overloads.ExtFormatString {
- found = true
- break
- }
- }
- if !found {
- return false
- }
- }
- formatString := call.Target()
- if formatString.Kind() != ast.LiteralKind && formatString.AsLiteral().Type() != cel.StringType {
- return false
- }
- args := call.Args()
- if len(args) != 1 {
- return false
- }
- formatArgs := args[0]
- return formatArgs.Kind() == ast.ListKind
- }
-}
-
-// stringFormatChecker implements the formatStringInterpolater interface
-type stringFormatChecker struct {
- args []ast.Expr
- argsRequested int
- currArgIndex int64
- ast *ast.AST
-}
-
-func (c *stringFormatChecker) String(arg ref.Val, locale string) (string, error) {
- formatArg := c.args[c.currArgIndex]
- valid, badID := c.verifyString(formatArg)
- if !valid {
- return "", stringFormatError(badID, c.typeOf(badID).TypeName())
- }
- return "", nil
-}
-
-func (c *stringFormatChecker) Decimal(arg ref.Val, locale string) (string, error) {
- id := c.args[c.currArgIndex].ID()
- valid := c.verifyTypeOneOf(id, types.IntType, types.UintType)
- if !valid {
- return "", decimalFormatError(id, c.typeOf(id).TypeName())
- }
- return "", nil
-}
-
-func (c *stringFormatChecker) Fixed(precision *int) func(ref.Val, string) (string, error) {
- return func(arg ref.Val, locale string) (string, error) {
- id := c.args[c.currArgIndex].ID()
- // we allow StringType since "NaN", "Infinity", and "-Infinity" are also valid values
- valid := c.verifyTypeOneOf(id, types.DoubleType, types.StringType)
- if !valid {
- return "", fixedPointFormatError(id, c.typeOf(id).TypeName())
- }
- return "", nil
- }
-}
-
-func (c *stringFormatChecker) Scientific(precision *int) func(ref.Val, string) (string, error) {
- return func(arg ref.Val, locale string) (string, error) {
- id := c.args[c.currArgIndex].ID()
- valid := c.verifyTypeOneOf(id, types.DoubleType, types.StringType)
- if !valid {
- return "", scientificFormatError(id, c.typeOf(id).TypeName())
- }
- return "", nil
- }
-}
-
-func (c *stringFormatChecker) Binary(arg ref.Val, locale string) (string, error) {
- id := c.args[c.currArgIndex].ID()
- valid := c.verifyTypeOneOf(id, types.IntType, types.UintType, types.BoolType)
- if !valid {
- return "", binaryFormatError(id, c.typeOf(id).TypeName())
- }
- return "", nil
-}
-
-func (c *stringFormatChecker) Hex(useUpper bool) func(ref.Val, string) (string, error) {
- return func(arg ref.Val, locale string) (string, error) {
- id := c.args[c.currArgIndex].ID()
- valid := c.verifyTypeOneOf(id, types.IntType, types.UintType, types.StringType, types.BytesType)
- if !valid {
- return "", hexFormatError(id, c.typeOf(id).TypeName())
- }
- return "", nil
- }
-}
-
-func (c *stringFormatChecker) Octal(arg ref.Val, locale string) (string, error) {
- id := c.args[c.currArgIndex].ID()
- valid := c.verifyTypeOneOf(id, types.IntType, types.UintType)
- if !valid {
- return "", octalFormatError(id, c.typeOf(id).TypeName())
- }
- return "", nil
-}
-
-func (c *stringFormatChecker) Arg(index int64) (ref.Val, error) {
- c.argsRequested++
- c.currArgIndex = index
- // return a dummy value - this is immediately passed to back to us
- // through one of the FormatCallback functions, so anything will do
- return types.Int(0), nil
-}
-
-func (c *stringFormatChecker) Size() int64 {
- return int64(len(c.args))
-}
-
-func (c *stringFormatChecker) typeOf(id int64) *cel.Type {
- return c.ast.GetType(id)
-}
-
-func (c *stringFormatChecker) verifyTypeOneOf(id int64, validTypes ...*cel.Type) bool {
- t := c.typeOf(id)
- if t == cel.DynType {
- return true
- }
- for _, vt := range validTypes {
- // Only check runtime type compatibility without delving deeper into parameterized types
- if t.Kind() == vt.Kind() {
- return true
- }
- }
- return false
-}
-
-func (c *stringFormatChecker) verifyString(sub ast.Expr) (bool, int64) {
- paramA := cel.TypeParamType("A")
- paramB := cel.TypeParamType("B")
- subVerified := c.verifyTypeOneOf(sub.ID(),
- cel.ListType(paramA), cel.MapType(paramA, paramB),
- cel.IntType, cel.UintType, cel.DoubleType, cel.BoolType, cel.StringType,
- cel.TimestampType, cel.BytesType, cel.DurationType, cel.TypeType, cel.NullType)
- if !subVerified {
- return false, sub.ID()
- }
- switch sub.Kind() {
- case ast.ListKind:
- for _, e := range sub.AsList().Elements() {
- // recursively verify if we're dealing with a list/map
- verified, id := c.verifyString(e)
- if !verified {
- return false, id
- }
- }
- return true, sub.ID()
- case ast.MapKind:
- for _, e := range sub.AsMap().Entries() {
- // recursively verify if we're dealing with a list/map
- entry := e.AsMapEntry()
- verified, id := c.verifyString(entry.Key())
- if !verified {
- return false, id
- }
- verified, id = c.verifyString(entry.Value())
- if !verified {
- return false, id
- }
- }
- return true, sub.ID()
- default:
- return true, sub.ID()
- }
-}
-
-// helper routines for reporting common errors during string formatting static validation and
-// runtime execution.
-
-func binaryFormatError(id int64, badType string) error {
- return newFormatError(id, "only integers and bools can be formatted as binary, was given %s", badType)
-}
-
-func decimalFormatError(id int64, badType string) error {
- return newFormatError(id, "decimal clause can only be used on integers, was given %s", badType)
-}
-
-func fixedPointFormatError(id int64, badType string) error {
- return newFormatError(id, "fixed-point clause can only be used on doubles, was given %s", badType)
-}
-
-func hexFormatError(id int64, badType string) error {
- return newFormatError(id, "only integers, byte buffers, and strings can be formatted as hex, was given %s", badType)
-}
-
-func octalFormatError(id int64, badType string) error {
- return newFormatError(id, "octal clause can only be used on integers, was given %s", badType)
-}
-
-func scientificFormatError(id int64, badType string) error {
- return newFormatError(id, "scientific clause can only be used on doubles, was given %s", badType)
-}
-
-func stringFormatError(id int64, badType string) error {
- return newFormatError(id, "string clause can only be used on strings, bools, bytes, ints, doubles, maps, lists, types, durations, and timestamps, was given %s", badType)
-}
-
-type formatError struct {
- id int64
- msg string
-}
-
-func newFormatError(id int64, msg string, args ...any) error {
- return formatError{
- id: id,
- msg: fmt.Sprintf(msg, args...),
- }
-}
-
-func (e formatError) Error() string {
- return e.msg
-}
-
-func (e formatError) Is(target error) bool {
- return e.msg == target.Error()
-}
-
-// stringArgList implements the formatListArgs interface.
-type stringArgList struct {
- args traits.Lister
-}
-
-func (c *stringArgList) Arg(index int64) (ref.Val, error) {
- if index >= c.args.Size().Value().(int64) {
- return nil, fmt.Errorf("index %d out of range", index)
- }
- return c.args.Get(types.Int(index)), nil
-}
-
-func (c *stringArgList) Size() int64 {
- return c.args.Size().Value().(int64)
-}
-
-// formatStringInterpolator is an interface that allows user-defined behavior
-// for formatting clause implementations, as well as argument retrieval.
-// Each function is expected to support the appropriate types as laid out in
-// the string.format documentation, and to return an error if given an inappropriate type.
-type formatStringInterpolator interface {
- // String takes a ref.Val and a string representing the current locale identifier
- // and returns the Val formatted as a string, or an error if one occurred.
- String(ref.Val, string) (string, error)
-
- // Decimal takes a ref.Val and a string representing the current locale identifier
- // and returns the Val formatted as a decimal integer, or an error if one occurred.
- Decimal(ref.Val, string) (string, error)
-
- // Fixed takes an int pointer representing precision (or nil if none was given) and
- // returns a function operating in a similar manner to String and Decimal, taking a
- // ref.Val and locale and returning the appropriate string. A closure is returned
- // so precision can be set without needing an additional function call/configuration.
- Fixed(*int) func(ref.Val, string) (string, error)
-
- // Scientific functions identically to Fixed, except the string returned from the closure
- // is expected to be in scientific notation.
- Scientific(*int) func(ref.Val, string) (string, error)
-
- // Binary takes a ref.Val and a string representing the current locale identifier
- // and returns the Val formatted as a binary integer, or an error if one occurred.
- Binary(ref.Val, string) (string, error)
-
- // Hex takes a boolean that, if true, indicates the hex string output by the returned
- // closure should use uppercase letters for A-F.
- Hex(bool) func(ref.Val, string) (string, error)
-
- // Octal takes a ref.Val and a string representing the current locale identifier and
- // returns the Val formatted in octal, or an error if one occurred.
- Octal(ref.Val, string) (string, error)
-}
-
-// formatListArgs is an interface that allows user-defined list-like datatypes to be used
-// for formatting clause implementations.
-type formatListArgs interface {
- // Arg returns the ref.Val at the given index, or an error if one occurred.
- Arg(int64) (ref.Val, error)
-
- // Size returns the length of the argument list.
- Size() int64
-}
-
-// parseFormatString formats a string according to the string.format syntax, taking the clause implementations
-// from the provided FormatCallback and the args from the given FormatList.
-func parseFormatString(formatStr string, callback formatStringInterpolator, list formatListArgs, locale string) (string, error) {
- i := 0
- argIndex := 0
- var builtStr strings.Builder
- for i < len(formatStr) {
- if formatStr[i] == '%' {
- if i+1 < len(formatStr) && formatStr[i+1] == '%' {
- err := builtStr.WriteByte('%')
- if err != nil {
- return "", fmt.Errorf("error writing format string: %w", err)
- }
- i += 2
- continue
- } else {
- argAny, err := list.Arg(int64(argIndex))
- if err != nil {
- return "", err
- }
- if i+1 >= len(formatStr) {
- return "", errors.New("unexpected end of string")
- }
- if int64(argIndex) >= list.Size() {
- return "", fmt.Errorf("index %d out of range", argIndex)
- }
- numRead, val, refErr := parseAndFormatClause(formatStr[i:], argAny, callback, list, locale)
- if refErr != nil {
- return "", refErr
- }
- _, err = builtStr.WriteString(val)
- if err != nil {
- return "", fmt.Errorf("error writing format string: %w", err)
- }
- i += numRead
- argIndex++
- }
- } else {
- err := builtStr.WriteByte(formatStr[i])
- if err != nil {
- return "", fmt.Errorf("error writing format string: %w", err)
- }
- i++
- }
- }
- return builtStr.String(), nil
-}
-
-// parseAndFormatClause parses the format clause at the start of the given string with val, and returns
-// how many characters were consumed and the substituted string form of val, or an error if one occurred.
-func parseAndFormatClause(formatStr string, val ref.Val, callback formatStringInterpolator, list formatListArgs, locale string) (int, string, error) {
- i := 1
- read, formatter, err := parseFormattingClause(formatStr[i:], callback)
- i += read
- if err != nil {
- return -1, "", newParseFormatError("could not parse formatting clause", err)
- }
-
- valStr, err := formatter(val, locale)
- if err != nil {
- return -1, "", newParseFormatError("error during formatting", err)
- }
- return i, valStr, nil
-}
-
-func parseFormattingClause(formatStr string, callback formatStringInterpolator) (int, clauseImpl, error) {
- i := 0
- read, precision, err := parsePrecision(formatStr[i:])
- i += read
- if err != nil {
- return -1, nil, fmt.Errorf("error while parsing precision: %w", err)
- }
- r := rune(formatStr[i])
- i++
- switch r {
- case 's':
- return i, callback.String, nil
- case 'd':
- return i, callback.Decimal, nil
- case 'f':
- return i, callback.Fixed(precision), nil
- case 'e':
- return i, callback.Scientific(precision), nil
- case 'b':
- return i, callback.Binary, nil
- case 'x', 'X':
- return i, callback.Hex(unicode.IsUpper(r)), nil
- case 'o':
- return i, callback.Octal, nil
- default:
- return -1, nil, fmt.Errorf("unrecognized formatting clause \"%c\"", r)
- }
-}
-
-func parsePrecision(formatStr string) (int, *int, error) {
- i := 0
- if formatStr[i] != '.' {
- return i, nil, nil
- }
- i++
- var buffer strings.Builder
- for {
- if i >= len(formatStr) {
- return -1, nil, errors.New("could not find end of precision specifier")
- }
- if !isASCIIDigit(rune(formatStr[i])) {
- break
- }
- buffer.WriteByte(formatStr[i])
- i++
- }
- precision, err := strconv.Atoi(buffer.String())
- if err != nil {
- return -1, nil, fmt.Errorf("error while converting precision to integer: %w", err)
- }
- return i, &precision, nil
-}
-
-func isASCIIDigit(r rune) bool {
- return r <= unicode.MaxASCII && unicode.IsDigit(r)
-}
-
-type parseFormatError struct {
- msg string
- wrapped error
-}
-
-func newParseFormatError(msg string, wrapped error) error {
- return parseFormatError{msg: msg, wrapped: wrapped}
-}
-
-func (e parseFormatError) Error() string {
- return fmt.Sprintf("%s: %s", e.msg, e.wrapped.Error())
-}
-
-func (e parseFormatError) Is(target error) bool {
- return e.Error() == target.Error()
-}
-
-func (e parseFormatError) Unwrap() error {
- return e.wrapped
-}
-
-const (
- runtimeID = int64(-1)
-)
diff --git a/vendor/github.com/google/cel-go/ext/guards.go b/vendor/github.com/google/cel-go/ext/guards.go
index 2c00bfe3a..785c8675b 100644
--- a/vendor/github.com/google/cel-go/ext/guards.go
+++ b/vendor/github.com/google/cel-go/ext/guards.go
@@ -15,9 +15,10 @@
package ext
import (
- "github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
// function invocation guards for common call signatures within extension functions.
@@ -50,10 +51,10 @@ func listStringOrError(strs []string, err error) ref.Val {
return types.DefaultTypeAdapter.NativeToValue(strs)
}
-func macroTargetMatchesNamespace(ns string, target ast.Expr) bool {
- switch target.Kind() {
- case ast.IdentKind:
- if target.AsIdent() != ns {
+func macroTargetMatchesNamespace(ns string, target *exprpb.Expr) bool {
+ switch target.GetExprKind().(type) {
+ case *exprpb.Expr_IdentExpr:
+ if target.GetIdentExpr().GetName() != ns {
return false
}
return true
diff --git a/vendor/github.com/google/cel-go/ext/math.go b/vendor/github.com/google/cel-go/ext/math.go
index 65d7e2eb0..0b9a36103 100644
--- a/vendor/github.com/google/cel-go/ext/math.go
+++ b/vendor/github.com/google/cel-go/ext/math.go
@@ -19,10 +19,11 @@ import (
"strings"
"github.com/google/cel-go/cel"
- "github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
// Math returns a cel.EnvOption to configure namespaced math helper macros and
@@ -110,9 +111,9 @@ func (mathLib) CompileOptions() []cel.EnvOption {
return []cel.EnvOption{
cel.Macros(
// math.least(num, ...)
- cel.ReceiverVarArgMacro(leastMacro, mathLeast),
+ cel.NewReceiverVarArgMacro(leastMacro, mathLeast),
// math.greatest(num, ...)
- cel.ReceiverVarArgMacro(greatestMacro, mathGreatest),
+ cel.NewReceiverVarArgMacro(greatestMacro, mathGreatest),
),
cel.Function(minFunc,
cel.Overload("math_@min_double", []*cel.Type{cel.DoubleType}, cel.DoubleType,
@@ -186,57 +187,57 @@ func (mathLib) ProgramOptions() []cel.ProgramOption {
return []cel.ProgramOption{}
}
-func mathLeast(meh cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) {
+func mathLeast(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *cel.Error) {
if !macroTargetMatchesNamespace(mathNamespace, target) {
return nil, nil
}
switch len(args) {
case 0:
- return nil, meh.NewError(target.ID(), "math.least() requires at least one argument")
+ return nil, meh.NewError(target.GetId(), "math.least() requires at least one argument")
case 1:
if isListLiteralWithValidArgs(args[0]) || isValidArgType(args[0]) {
- return meh.NewCall(minFunc, args[0]), nil
+ return meh.GlobalCall(minFunc, args[0]), nil
}
- return nil, meh.NewError(args[0].ID(), "math.least() invalid single argument value")
+ return nil, meh.NewError(args[0].GetId(), "math.least() invalid single argument value")
case 2:
err := checkInvalidArgs(meh, "math.least()", args)
if err != nil {
return nil, err
}
- return meh.NewCall(minFunc, args...), nil
+ return meh.GlobalCall(minFunc, args...), nil
default:
err := checkInvalidArgs(meh, "math.least()", args)
if err != nil {
return nil, err
}
- return meh.NewCall(minFunc, meh.NewList(args...)), nil
+ return meh.GlobalCall(minFunc, meh.NewList(args...)), nil
}
}
-func mathGreatest(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) {
+func mathGreatest(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *cel.Error) {
if !macroTargetMatchesNamespace(mathNamespace, target) {
return nil, nil
}
switch len(args) {
case 0:
- return nil, mef.NewError(target.ID(), "math.greatest() requires at least one argument")
+ return nil, meh.NewError(target.GetId(), "math.greatest() requires at least one argument")
case 1:
if isListLiteralWithValidArgs(args[0]) || isValidArgType(args[0]) {
- return mef.NewCall(maxFunc, args[0]), nil
+ return meh.GlobalCall(maxFunc, args[0]), nil
}
- return nil, mef.NewError(args[0].ID(), "math.greatest() invalid single argument value")
+ return nil, meh.NewError(args[0].GetId(), "math.greatest() invalid single argument value")
case 2:
- err := checkInvalidArgs(mef, "math.greatest()", args)
+ err := checkInvalidArgs(meh, "math.greatest()", args)
if err != nil {
return nil, err
}
- return mef.NewCall(maxFunc, args...), nil
+ return meh.GlobalCall(maxFunc, args...), nil
default:
- err := checkInvalidArgs(mef, "math.greatest()", args)
+ err := checkInvalidArgs(meh, "math.greatest()", args)
if err != nil {
return nil, err
}
- return mef.NewCall(maxFunc, mef.NewList(args...)), nil
+ return meh.GlobalCall(maxFunc, meh.NewList(args...)), nil
}
}
@@ -310,48 +311,48 @@ func maxList(numList ref.Val) ref.Val {
}
}
-func checkInvalidArgs(meh cel.MacroExprFactory, funcName string, args []ast.Expr) *cel.Error {
+func checkInvalidArgs(meh cel.MacroExprHelper, funcName string, args []*exprpb.Expr) *cel.Error {
for _, arg := range args {
err := checkInvalidArgLiteral(funcName, arg)
if err != nil {
- return meh.NewError(arg.ID(), err.Error())
+ return meh.NewError(arg.GetId(), err.Error())
}
}
return nil
}
-func checkInvalidArgLiteral(funcName string, arg ast.Expr) error {
+func checkInvalidArgLiteral(funcName string, arg *exprpb.Expr) error {
if !isValidArgType(arg) {
return fmt.Errorf("%s simple literal arguments must be numeric", funcName)
}
return nil
}
-func isValidArgType(arg ast.Expr) bool {
- switch arg.Kind() {
- case ast.LiteralKind:
- c := ref.Val(arg.AsLiteral())
- switch c.(type) {
- case types.Double, types.Int, types.Uint:
+func isValidArgType(arg *exprpb.Expr) bool {
+ switch arg.GetExprKind().(type) {
+ case *exprpb.Expr_ConstExpr:
+ c := arg.GetConstExpr()
+ switch c.GetConstantKind().(type) {
+ case *exprpb.Constant_DoubleValue, *exprpb.Constant_Int64Value, *exprpb.Constant_Uint64Value:
return true
default:
return false
}
- case ast.ListKind, ast.MapKind, ast.StructKind:
+ case *exprpb.Expr_ListExpr, *exprpb.Expr_StructExpr:
return false
default:
return true
}
}
-func isListLiteralWithValidArgs(arg ast.Expr) bool {
- switch arg.Kind() {
- case ast.ListKind:
- list := arg.AsList()
- if list.Size() == 0 {
+func isListLiteralWithValidArgs(arg *exprpb.Expr) bool {
+ switch arg.GetExprKind().(type) {
+ case *exprpb.Expr_ListExpr:
+ list := arg.GetListExpr()
+ if len(list.GetElements()) == 0 {
return false
}
- for _, e := range list.Elements() {
+ for _, e := range list.GetElements() {
if !isValidArgType(e) {
return false
}
diff --git a/vendor/github.com/google/cel-go/ext/native.go b/vendor/github.com/google/cel-go/ext/native.go
index 0c2cd52f9..0b5fc38ca 100644
--- a/vendor/github.com/google/cel-go/ext/native.go
+++ b/vendor/github.com/google/cel-go/ext/native.go
@@ -151,24 +151,6 @@ func (tp *nativeTypeProvider) FindStructType(typeName string) (*types.Type, bool
return tp.baseProvider.FindStructType(typeName)
}
-// FindStructFieldNames looks up the type definition first from the native types, then from
-// the backing provider type set. If found, a set of field names corresponding to the type
-// will be returned.
-func (tp *nativeTypeProvider) FindStructFieldNames(typeName string) ([]string, bool) {
- if t, found := tp.nativeTypes[typeName]; found {
- fieldCount := t.refType.NumField()
- fields := make([]string, fieldCount)
- for i := 0; i < fieldCount; i++ {
- fields[i] = t.refType.Field(i).Name
- }
- return fields, true
- }
- if celTypeFields, found := tp.baseProvider.FindStructFieldNames(typeName); found {
- return celTypeFields, true
- }
- return tp.baseProvider.FindStructFieldNames(typeName)
-}
-
// FindStructFieldType looks up a native type's field definition, and if the type name is not a native
// type then proxies to the composed types.Provider
func (tp *nativeTypeProvider) FindStructFieldType(typeName, fieldName string) (*types.FieldType, bool) {
diff --git a/vendor/github.com/google/cel-go/ext/protos.go b/vendor/github.com/google/cel-go/ext/protos.go
index 68796f60a..a7ca27a6a 100644
--- a/vendor/github.com/google/cel-go/ext/protos.go
+++ b/vendor/github.com/google/cel-go/ext/protos.go
@@ -16,7 +16,8 @@ package ext
import (
"github.com/google/cel-go/cel"
- "github.com/google/cel-go/common/ast"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
// Protos returns a cel.EnvOption to configure extended macros and functions for
@@ -71,9 +72,9 @@ func (protoLib) CompileOptions() []cel.EnvOption {
return []cel.EnvOption{
cel.Macros(
// proto.getExt(msg, select_expression)
- cel.ReceiverMacro(getExtension, 2, getProtoExt),
+ cel.NewReceiverMacro(getExtension, 2, getProtoExt),
// proto.hasExt(msg, select_expression)
- cel.ReceiverMacro(hasExtension, 2, hasProtoExt),
+ cel.NewReceiverMacro(hasExtension, 2, hasProtoExt),
),
}
}
@@ -84,56 +85,56 @@ func (protoLib) ProgramOptions() []cel.ProgramOption {
}
// hasProtoExt generates a test-only select expression for a fully-qualified extension name on a protobuf message.
-func hasProtoExt(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) {
+func hasProtoExt(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *cel.Error) {
if !macroTargetMatchesNamespace(protoNamespace, target) {
return nil, nil
}
- extensionField, err := getExtFieldName(mef, args[1])
+ extensionField, err := getExtFieldName(meh, args[1])
if err != nil {
return nil, err
}
- return mef.NewPresenceTest(args[0], extensionField), nil
+ return meh.PresenceTest(args[0], extensionField), nil
}
// getProtoExt generates a select expression for a fully-qualified extension name on a protobuf message.
-func getProtoExt(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) {
+func getProtoExt(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *cel.Error) {
if !macroTargetMatchesNamespace(protoNamespace, target) {
return nil, nil
}
- extFieldName, err := getExtFieldName(mef, args[1])
+ extFieldName, err := getExtFieldName(meh, args[1])
if err != nil {
return nil, err
}
- return mef.NewSelect(args[0], extFieldName), nil
+ return meh.Select(args[0], extFieldName), nil
}
-func getExtFieldName(mef cel.MacroExprFactory, expr ast.Expr) (string, *cel.Error) {
+func getExtFieldName(meh cel.MacroExprHelper, expr *exprpb.Expr) (string, *cel.Error) {
isValid := false
extensionField := ""
- switch expr.Kind() {
- case ast.SelectKind:
+ switch expr.GetExprKind().(type) {
+ case *exprpb.Expr_SelectExpr:
extensionField, isValid = validateIdentifier(expr)
}
if !isValid {
- return "", mef.NewError(expr.ID(), "invalid extension field")
+ return "", meh.NewError(expr.GetId(), "invalid extension field")
}
return extensionField, nil
}
-func validateIdentifier(expr ast.Expr) (string, bool) {
- switch expr.Kind() {
- case ast.IdentKind:
- return expr.AsIdent(), true
- case ast.SelectKind:
- sel := expr.AsSelect()
- if sel.IsTestOnly() {
+func validateIdentifier(expr *exprpb.Expr) (string, bool) {
+ switch expr.GetExprKind().(type) {
+ case *exprpb.Expr_IdentExpr:
+ return expr.GetIdentExpr().GetName(), true
+ case *exprpb.Expr_SelectExpr:
+ sel := expr.GetSelectExpr()
+ if sel.GetTestOnly() {
return "", false
}
- opStr, isIdent := validateIdentifier(sel.Operand())
+ opStr, isIdent := validateIdentifier(sel.GetOperand())
if !isIdent {
return "", false
}
- return opStr + "." + sel.FieldName(), true
+ return opStr + "." + sel.GetField(), true
default:
return "", false
}
diff --git a/vendor/github.com/google/cel-go/ext/strings.go b/vendor/github.com/google/cel-go/ext/strings.go
index 1faa6ed7d..88c119f2b 100644
--- a/vendor/github.com/google/cel-go/ext/strings.go
+++ b/vendor/github.com/google/cel-go/ext/strings.go
@@ -21,16 +21,19 @@ import (
"fmt"
"math"
"reflect"
+ "sort"
"strings"
"unicode"
"unicode/utf8"
"golang.org/x/text/language"
+ "golang.org/x/text/message"
"github.com/google/cel-go/cel"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits"
+ "github.com/google/cel-go/interpreter"
)
const (
@@ -202,8 +205,6 @@ const (
// 'hello hello'.replace('he', 'we', -1) // returns 'wello wello'
// 'hello hello'.replace('he', 'we', 1) // returns 'wello hello'
// 'hello hello'.replace('he', 'we', 0) // returns 'hello hello'
-// 'hello hello'.replace('', '_') // returns '_h_e_l_l_o_ _h_e_l_l_o_'
-// 'hello hello'.replace('h', '') // returns 'ello ello'
//
// # Split
//
@@ -269,26 +270,8 @@ const (
//
// 'TacoCat'.upperAscii() // returns 'TACOCAT'
// 'TacoCÆt Xii'.upperAscii() // returns 'TACOCÆT XII'
-//
-// # Reverse
-//
-// Introduced at version: 3
-//
-// Returns a new string whose characters are the same as the target string, only formatted in
-// reverse order.
-// This function relies on converting strings to rune arrays in order to reverse
-//
-// .reverse() ->
-//
-// Examples:
-//
-// 'gums'.reverse() // returns 'smug'
-// 'John Smith'.reverse() // returns 'htimS nhoJ'
func Strings(options ...StringsOption) cel.EnvOption {
- s := &stringLib{
- version: math.MaxUint32,
- validateFormat: true,
- }
+ s := &stringLib{version: math.MaxUint32}
for _, o := range options {
s = o(s)
}
@@ -296,9 +279,8 @@ func Strings(options ...StringsOption) cel.EnvOption {
}
type stringLib struct {
- locale string
- version uint32
- validateFormat bool
+ locale string
+ version uint32
}
// LibraryName implements the SingletonLibrary interface method.
@@ -335,17 +317,6 @@ func StringsVersion(version uint32) StringsOption {
}
}
-// StringsValidateFormatCalls validates type-checked ASTs to ensure that string.format() calls have
-// valid formatting clauses and valid argument types for each clause.
-//
-// Enabled by default.
-func StringsValidateFormatCalls(value bool) StringsOption {
- return func(s *stringLib) *stringLib {
- s.validateFormat = value
- return s
- }
-}
-
// CompileOptions implements the Library interface method.
func (lib *stringLib) CompileOptions() []cel.EnvOption {
formatLocale := "en_US"
@@ -469,15 +440,13 @@ func (lib *stringLib) CompileOptions() []cel.EnvOption {
cel.FunctionBinding(func(args ...ref.Val) ref.Val {
s := string(args[0].(types.String))
formatArgs := args[1].(traits.Lister)
- return stringOrError(parseFormatString(s, &stringFormatter{}, &stringArgList{formatArgs}, formatLocale))
+ return stringOrError(interpreter.ParseFormatString(s, &stringFormatter{}, &stringArgList{formatArgs}, formatLocale))
}))),
cel.Function("strings.quote", cel.Overload("strings_quote", []*cel.Type{cel.StringType}, cel.StringType,
cel.UnaryBinding(func(str ref.Val) ref.Val {
s := str.(types.String)
return stringOrError(quote(string(s)))
- }))),
-
- cel.ASTValidators(stringFormatValidator{}))
+ }))))
}
if lib.version >= 2 {
@@ -502,7 +471,7 @@ func (lib *stringLib) CompileOptions() []cel.EnvOption {
cel.UnaryBinding(func(list ref.Val) ref.Val {
l, err := list.ConvertToNative(stringListType)
if err != nil {
- return types.WrapErr(err)
+ return types.NewErr(err.Error())
}
return stringOrError(join(l.([]string)))
})),
@@ -510,26 +479,13 @@ func (lib *stringLib) CompileOptions() []cel.EnvOption {
cel.BinaryBinding(func(list, delim ref.Val) ref.Val {
l, err := list.ConvertToNative(stringListType)
if err != nil {
- return types.WrapErr(err)
+ return types.NewErr(err.Error())
}
d := delim.(types.String)
return stringOrError(joinSeparator(l.([]string), string(d)))
}))),
)
}
- if lib.version >= 3 {
- opts = append(opts,
- cel.Function("reverse",
- cel.MemberOverload("reverse", []*cel.Type{cel.StringType}, cel.StringType,
- cel.UnaryBinding(func(str ref.Val) ref.Val {
- s := str.(types.String)
- return stringOrError(reverse(string(s)))
- }))),
- )
- }
- if lib.validateFormat {
- opts = append(opts, cel.ASTValidators(stringFormatValidator{}))
- }
return opts
}
@@ -680,14 +636,6 @@ func upperASCII(str string) (string, error) {
return string(runes), nil
}
-func reverse(str string) (string, error) {
- chars := []rune(str)
- for i, j := 0, len(chars)-1; i < j; i, j = i+1, j-1 {
- chars[i], chars[j] = chars[j], chars[i]
- }
- return string(chars), nil
-}
-
func joinSeparator(strs []string, separator string) (string, error) {
return strings.Join(strs, separator), nil
}
@@ -713,6 +661,238 @@ func joinValSeparator(strs traits.Lister, separator string) (string, error) {
return sb.String(), nil
}
+type clauseImpl func(ref.Val, string) (string, error)
+
+func clauseForType(argType ref.Type) (clauseImpl, error) {
+ switch argType {
+ case types.IntType, types.UintType:
+ return formatDecimal, nil
+ case types.StringType, types.BytesType, types.BoolType, types.NullType, types.TypeType:
+ return FormatString, nil
+ case types.TimestampType, types.DurationType:
+ // special case to ensure timestamps/durations get printed as CEL literals
+ return func(arg ref.Val, locale string) (string, error) {
+ argStrVal := arg.ConvertToType(types.StringType)
+ argStr := argStrVal.Value().(string)
+ if arg.Type() == types.TimestampType {
+ return fmt.Sprintf("timestamp(%q)", argStr), nil
+ }
+ if arg.Type() == types.DurationType {
+ return fmt.Sprintf("duration(%q)", argStr), nil
+ }
+ return "", fmt.Errorf("cannot convert argument of type %s to timestamp/duration", arg.Type().TypeName())
+ }, nil
+ case types.ListType:
+ return formatList, nil
+ case types.MapType:
+ return formatMap, nil
+ case types.DoubleType:
+ // avoid formatFixed so we can output a period as the decimal separator in order
+ // to always be a valid CEL literal
+ return func(arg ref.Val, locale string) (string, error) {
+ argDouble, ok := arg.Value().(float64)
+ if !ok {
+ return "", fmt.Errorf("couldn't convert %s to float64", arg.Type().TypeName())
+ }
+ fmtStr := fmt.Sprintf("%%.%df", defaultPrecision)
+ return fmt.Sprintf(fmtStr, argDouble), nil
+ }, nil
+ case types.TypeType:
+ return func(arg ref.Val, locale string) (string, error) {
+ return fmt.Sprintf("type(%s)", arg.Value().(string)), nil
+ }, nil
+ default:
+ return nil, fmt.Errorf("no formatting function for %s", argType.TypeName())
+ }
+}
+
+func formatList(arg ref.Val, locale string) (string, error) {
+ argList := arg.(traits.Lister)
+ argIterator := argList.Iterator()
+ var listStrBuilder strings.Builder
+ _, err := listStrBuilder.WriteRune('[')
+ if err != nil {
+ return "", fmt.Errorf("error writing to list string: %w", err)
+ }
+ for argIterator.HasNext() == types.True {
+ member := argIterator.Next()
+ memberFormat, err := clauseForType(member.Type())
+ if err != nil {
+ return "", err
+ }
+ unquotedStr, err := memberFormat(member, locale)
+ if err != nil {
+ return "", err
+ }
+ str := quoteForCEL(member, unquotedStr)
+ _, err = listStrBuilder.WriteString(str)
+ if err != nil {
+ return "", fmt.Errorf("error writing to list string: %w", err)
+ }
+ if argIterator.HasNext() == types.True {
+ _, err = listStrBuilder.WriteString(", ")
+ if err != nil {
+ return "", fmt.Errorf("error writing to list string: %w", err)
+ }
+ }
+ }
+ _, err = listStrBuilder.WriteRune(']')
+ if err != nil {
+ return "", fmt.Errorf("error writing to list string: %w", err)
+ }
+ return listStrBuilder.String(), nil
+}
+
+func formatMap(arg ref.Val, locale string) (string, error) {
+ argMap := arg.(traits.Mapper)
+ argIterator := argMap.Iterator()
+ type mapPair struct {
+ key string
+ value string
+ }
+ argPairs := make([]mapPair, argMap.Size().Value().(int64))
+ i := 0
+ for argIterator.HasNext() == types.True {
+ key := argIterator.Next()
+ var keyFormat clauseImpl
+ switch key.Type() {
+ case types.StringType, types.BoolType:
+ keyFormat = FormatString
+ case types.IntType, types.UintType:
+ keyFormat = formatDecimal
+ default:
+ return "", fmt.Errorf("no formatting function for map key of type %s", key.Type().TypeName())
+ }
+ unquotedKeyStr, err := keyFormat(key, locale)
+ if err != nil {
+ return "", err
+ }
+ keyStr := quoteForCEL(key, unquotedKeyStr)
+ value, found := argMap.Find(key)
+ if !found {
+ return "", fmt.Errorf("could not find key: %q", key)
+ }
+ valueFormat, err := clauseForType(value.Type())
+ if err != nil {
+ return "", err
+ }
+ unquotedValueStr, err := valueFormat(value, locale)
+ if err != nil {
+ return "", err
+ }
+ valueStr := quoteForCEL(value, unquotedValueStr)
+ argPairs[i] = mapPair{keyStr, valueStr}
+ i++
+ }
+ sort.SliceStable(argPairs, func(x, y int) bool {
+ return argPairs[x].key < argPairs[y].key
+ })
+ var mapStrBuilder strings.Builder
+ _, err := mapStrBuilder.WriteRune('{')
+ if err != nil {
+ return "", fmt.Errorf("error writing to map string: %w", err)
+ }
+ for i, entry := range argPairs {
+ _, err = mapStrBuilder.WriteString(fmt.Sprintf("%s:%s", entry.key, entry.value))
+ if err != nil {
+ return "", fmt.Errorf("error writing to map string: %w", err)
+ }
+ if i < len(argPairs)-1 {
+ _, err = mapStrBuilder.WriteString(", ")
+ if err != nil {
+ return "", fmt.Errorf("error writing to map string: %w", err)
+ }
+ }
+ }
+ _, err = mapStrBuilder.WriteRune('}')
+ if err != nil {
+ return "", fmt.Errorf("error writing to map string: %w", err)
+ }
+ return mapStrBuilder.String(), nil
+}
+
+// quoteForCEL takes a formatted, unquoted value and quotes it in a manner
+// suitable for embedding directly in CEL.
+func quoteForCEL(refVal ref.Val, unquotedValue string) string {
+ switch refVal.Type() {
+ case types.StringType:
+ return fmt.Sprintf("%q", unquotedValue)
+ case types.BytesType:
+ return fmt.Sprintf("b%q", unquotedValue)
+ case types.DoubleType:
+ // special case to handle infinity/NaN
+ num := refVal.Value().(float64)
+ if math.IsInf(num, 1) || math.IsInf(num, -1) || math.IsNaN(num) {
+ return fmt.Sprintf("%q", unquotedValue)
+ }
+ return unquotedValue
+ default:
+ return unquotedValue
+ }
+}
+
+// FormatString returns the string representation of a CEL value.
+// It is used to implement the %s specifier in the (string).format() extension
+// function.
+func FormatString(arg ref.Val, locale string) (string, error) {
+ switch arg.Type() {
+ case types.ListType:
+ return formatList(arg, locale)
+ case types.MapType:
+ return formatMap(arg, locale)
+ case types.IntType, types.UintType, types.DoubleType,
+ types.BoolType, types.StringType, types.TimestampType, types.BytesType, types.DurationType, types.TypeType:
+ argStrVal := arg.ConvertToType(types.StringType)
+ argStr, ok := argStrVal.Value().(string)
+ if !ok {
+ return "", fmt.Errorf("could not convert argument %q to string", argStrVal)
+ }
+ return argStr, nil
+ case types.NullType:
+ return "null", nil
+ default:
+ return "", fmt.Errorf("string clause can only be used on strings, bools, bytes, ints, doubles, maps, lists, types, durations, and timestamps, was given %s", arg.Type().TypeName())
+ }
+}
+
+func formatDecimal(arg ref.Val, locale string) (string, error) {
+ switch arg.Type() {
+ case types.IntType:
+ argInt, ok := arg.ConvertToType(types.IntType).Value().(int64)
+ if !ok {
+ return "", fmt.Errorf("could not convert \"%s\" to int64", arg.Value())
+ }
+ return fmt.Sprintf("%d", argInt), nil
+ case types.UintType:
+ argInt, ok := arg.ConvertToType(types.UintType).Value().(uint64)
+ if !ok {
+ return "", fmt.Errorf("could not convert \"%s\" to uint64", arg.Value())
+ }
+ return fmt.Sprintf("%d", argInt), nil
+ default:
+ return "", fmt.Errorf("decimal clause can only be used on integers, was given %s", arg.Type().TypeName())
+ }
+}
+
+func matchLanguage(locale string) (language.Tag, error) {
+ matcher, err := makeMatcher(locale)
+ if err != nil {
+ return language.Und, err
+ }
+ tag, _ := language.MatchStrings(matcher, locale)
+ return tag, nil
+}
+
+func makeMatcher(locale string) (language.Matcher, error) {
+ tags := make([]language.Tag, 0)
+ tag, err := language.Parse(locale)
+ if err != nil {
+ return nil, err
+ }
+ tags = append(tags, tag)
+ return language.NewMatcher(tags), nil
+}
+
// quote implements a string quoting function. The string will be wrapped in
// double quotes, and all valid CEL escape sequences will be escaped to show up
// literally if printed. If the input contains any invalid UTF-8, the invalid runes
@@ -760,6 +940,156 @@ func sanitize(s string) string {
return sanitizedStringBuilder.String()
}
+type stringFormatter struct{}
+
+func (c *stringFormatter) String(arg ref.Val, locale string) (string, error) {
+ return FormatString(arg, locale)
+}
+
+func (c *stringFormatter) Decimal(arg ref.Val, locale string) (string, error) {
+ return formatDecimal(arg, locale)
+}
+
+func (c *stringFormatter) Fixed(precision *int) func(ref.Val, string) (string, error) {
+ if precision == nil {
+ precision = new(int)
+ *precision = defaultPrecision
+ }
+ return func(arg ref.Val, locale string) (string, error) {
+ strException := false
+ if arg.Type() == types.StringType {
+ argStr := arg.Value().(string)
+ if argStr == "NaN" || argStr == "Infinity" || argStr == "-Infinity" {
+ strException = true
+ }
+ }
+ if arg.Type() != types.DoubleType && !strException {
+ return "", fmt.Errorf("fixed-point clause can only be used on doubles, was given %s", arg.Type().TypeName())
+ }
+ argFloatVal := arg.ConvertToType(types.DoubleType)
+ argFloat, ok := argFloatVal.Value().(float64)
+ if !ok {
+ return "", fmt.Errorf("could not convert \"%s\" to float64", argFloatVal.Value())
+ }
+ fmtStr := fmt.Sprintf("%%.%df", *precision)
+
+ matchedLocale, err := matchLanguage(locale)
+ if err != nil {
+ return "", fmt.Errorf("error matching locale: %w", err)
+ }
+ return message.NewPrinter(matchedLocale).Sprintf(fmtStr, argFloat), nil
+ }
+}
+
+func (c *stringFormatter) Scientific(precision *int) func(ref.Val, string) (string, error) {
+ if precision == nil {
+ precision = new(int)
+ *precision = defaultPrecision
+ }
+ return func(arg ref.Val, locale string) (string, error) {
+ strException := false
+ if arg.Type() == types.StringType {
+ argStr := arg.Value().(string)
+ if argStr == "NaN" || argStr == "Infinity" || argStr == "-Infinity" {
+ strException = true
+ }
+ }
+ if arg.Type() != types.DoubleType && !strException {
+ return "", fmt.Errorf("scientific clause can only be used on doubles, was given %s", arg.Type().TypeName())
+ }
+ argFloatVal := arg.ConvertToType(types.DoubleType)
+ argFloat, ok := argFloatVal.Value().(float64)
+ if !ok {
+ return "", fmt.Errorf("could not convert \"%s\" to float64", argFloatVal.Value())
+ }
+ matchedLocale, err := matchLanguage(locale)
+ if err != nil {
+ return "", fmt.Errorf("error matching locale: %w", err)
+ }
+ fmtStr := fmt.Sprintf("%%%de", *precision)
+ return message.NewPrinter(matchedLocale).Sprintf(fmtStr, argFloat), nil
+ }
+}
+
+func (c *stringFormatter) Binary(arg ref.Val, locale string) (string, error) {
+ switch arg.Type() {
+ case types.IntType:
+ argInt := arg.Value().(int64)
+ // locale is intentionally unused as integers formatted as binary
+ // strings are locale-independent
+ return fmt.Sprintf("%b", argInt), nil
+ case types.UintType:
+ argInt := arg.Value().(uint64)
+ return fmt.Sprintf("%b", argInt), nil
+ case types.BoolType:
+ argBool := arg.Value().(bool)
+ if argBool {
+ return "1", nil
+ }
+ return "0", nil
+ default:
+ return "", fmt.Errorf("only integers and bools can be formatted as binary, was given %s", arg.Type().TypeName())
+ }
+}
+
+func (c *stringFormatter) Hex(useUpper bool) func(ref.Val, string) (string, error) {
+ return func(arg ref.Val, locale string) (string, error) {
+ fmtStr := "%x"
+ if useUpper {
+ fmtStr = "%X"
+ }
+ switch arg.Type() {
+ case types.StringType, types.BytesType:
+ if arg.Type() == types.BytesType {
+ return fmt.Sprintf(fmtStr, arg.Value().([]byte)), nil
+ }
+ return fmt.Sprintf(fmtStr, arg.Value().(string)), nil
+ case types.IntType:
+ argInt, ok := arg.Value().(int64)
+ if !ok {
+ return "", fmt.Errorf("could not convert \"%s\" to int64", arg.Value())
+ }
+ return fmt.Sprintf(fmtStr, argInt), nil
+ case types.UintType:
+ argInt, ok := arg.Value().(uint64)
+ if !ok {
+ return "", fmt.Errorf("could not convert \"%s\" to uint64", arg.Value())
+ }
+ return fmt.Sprintf(fmtStr, argInt), nil
+ default:
+ return "", fmt.Errorf("only integers, byte buffers, and strings can be formatted as hex, was given %s", arg.Type().TypeName())
+ }
+ }
+}
+
+func (c *stringFormatter) Octal(arg ref.Val, locale string) (string, error) {
+ switch arg.Type() {
+ case types.IntType:
+ argInt := arg.Value().(int64)
+ return fmt.Sprintf("%o", argInt), nil
+ case types.UintType:
+ argInt := arg.Value().(uint64)
+ return fmt.Sprintf("%o", argInt), nil
+ default:
+ return "", fmt.Errorf("octal clause can only be used on integers, was given %s", arg.Type().TypeName())
+ }
+}
+
+type stringArgList struct {
+ args traits.Lister
+}
+
+func (c *stringArgList) Arg(index int64) (ref.Val, error) {
+ if index >= c.args.Size().Value().(int64) {
+ return nil, fmt.Errorf("index %d out of range", index)
+ }
+ return c.args.Get(types.Int(index)), nil
+}
+
+func (c *stringArgList) ArgSize() int64 {
+ return c.args.Size().Value().(int64)
+}
+
var (
stringListType = reflect.TypeOf([]string{})
)
diff --git a/vendor/github.com/google/cel-go/interpreter/BUILD.bazel b/vendor/github.com/google/cel-go/interpreter/BUILD.bazel
index 220e23d47..3a5219eb5 100644
--- a/vendor/github.com/google/cel-go/interpreter/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/interpreter/BUILD.bazel
@@ -14,6 +14,7 @@ go_library(
"decorators.go",
"dispatcher.go",
"evalstate.go",
+ "formatting.go",
"interpretable.go",
"interpreter.go",
"optimizations.go",
diff --git a/vendor/github.com/google/cel-go/interpreter/formatting.go b/vendor/github.com/google/cel-go/interpreter/formatting.go
new file mode 100644
index 000000000..e3f753374
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/formatting.go
@@ -0,0 +1,383 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "unicode"
+
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+)
+
+type typeVerifier func(int64, ...ref.Type) (bool, error)
+
+// InterpolateFormattedString checks the syntax and cardinality of any string.format calls present in the expression and reports
+// any errors at compile time.
+func InterpolateFormattedString(verifier typeVerifier) InterpretableDecorator {
+ return func(inter Interpretable) (Interpretable, error) {
+ call, ok := inter.(InterpretableCall)
+ if !ok {
+ return inter, nil
+ }
+ if call.OverloadID() != "string_format" {
+ return inter, nil
+ }
+ args := call.Args()
+ if len(args) != 2 {
+ return nil, fmt.Errorf("wrong number of arguments to string.format (expected 2, got %d)", len(args))
+ }
+ fmtStrInter, ok := args[0].(InterpretableConst)
+ if !ok {
+ return inter, nil
+ }
+ var fmtArgsInter InterpretableConstructor
+ fmtArgsInter, ok = args[1].(InterpretableConstructor)
+ if !ok {
+ return inter, nil
+ }
+ if fmtArgsInter.Type() != types.ListType {
+ // don't necessarily return an error since the list may be DynType
+ return inter, nil
+ }
+ formatStr := fmtStrInter.Value().Value().(string)
+ initVals := fmtArgsInter.InitVals()
+
+ formatCheck := &formatCheck{
+ args: initVals,
+ verifier: verifier,
+ }
+ // use a placeholder locale, since locale doesn't affect syntax
+ _, err := ParseFormatString(formatStr, formatCheck, formatCheck, "en_US")
+ if err != nil {
+ return nil, err
+ }
+ seenArgs := formatCheck.argsRequested
+ if len(initVals) > seenArgs {
+ return nil, fmt.Errorf("too many arguments supplied to string.format (expected %d, got %d)", seenArgs, len(initVals))
+ }
+ return inter, nil
+ }
+}
+
+type formatCheck struct {
+ args []Interpretable
+ argsRequested int
+ curArgIndex int64
+ enableCheckArgTypes bool
+ verifier typeVerifier
+}
+
+func (c *formatCheck) String(arg ref.Val, locale string) (string, error) {
+ valid, err := verifyString(c.args[c.curArgIndex], c.verifier)
+ if err != nil {
+ return "", err
+ }
+ if !valid {
+ return "", errors.New("string clause can only be used on strings, bools, bytes, ints, doubles, maps, lists, types, durations, and timestamps")
+ }
+ return "", nil
+}
+
+func (c *formatCheck) Decimal(arg ref.Val, locale string) (string, error) {
+ id := c.args[c.curArgIndex].ID()
+ valid, err := c.verifier(id, types.IntType, types.UintType)
+ if err != nil {
+ return "", err
+ }
+ if !valid {
+ return "", errors.New("integer clause can only be used on integers")
+ }
+ return "", nil
+}
+
+func (c *formatCheck) Fixed(precision *int) func(ref.Val, string) (string, error) {
+ return func(arg ref.Val, locale string) (string, error) {
+ id := c.args[c.curArgIndex].ID()
+ // we allow StringType since "NaN", "Infinity", and "-Infinity" are also valid values
+ valid, err := c.verifier(id, types.DoubleType, types.StringType)
+ if err != nil {
+ return "", err
+ }
+ if !valid {
+ return "", errors.New("fixed-point clause can only be used on doubles")
+ }
+ return "", nil
+ }
+}
+
+func (c *formatCheck) Scientific(precision *int) func(ref.Val, string) (string, error) {
+ return func(arg ref.Val, locale string) (string, error) {
+ id := c.args[c.curArgIndex].ID()
+ valid, err := c.verifier(id, types.DoubleType, types.StringType)
+ if err != nil {
+ return "", err
+ }
+ if !valid {
+ return "", errors.New("scientific clause can only be used on doubles")
+ }
+ return "", nil
+ }
+}
+
+func (c *formatCheck) Binary(arg ref.Val, locale string) (string, error) {
+ id := c.args[c.curArgIndex].ID()
+ valid, err := c.verifier(id, types.IntType, types.UintType, types.BoolType)
+ if err != nil {
+ return "", err
+ }
+ if !valid {
+ return "", errors.New("only integers and bools can be formatted as binary")
+ }
+ return "", nil
+}
+
+func (c *formatCheck) Hex(useUpper bool) func(ref.Val, string) (string, error) {
+ return func(arg ref.Val, locale string) (string, error) {
+ id := c.args[c.curArgIndex].ID()
+ valid, err := c.verifier(id, types.IntType, types.UintType, types.StringType, types.BytesType)
+ if err != nil {
+ return "", err
+ }
+ if !valid {
+ return "", errors.New("only integers, byte buffers, and strings can be formatted as hex")
+ }
+ return "", nil
+ }
+}
+
+func (c *formatCheck) Octal(arg ref.Val, locale string) (string, error) {
+ id := c.args[c.curArgIndex].ID()
+ valid, err := c.verifier(id, types.IntType, types.UintType)
+ if err != nil {
+ return "", err
+ }
+ if !valid {
+ return "", errors.New("octal clause can only be used on integers")
+ }
+ return "", nil
+}
+
+func (c *formatCheck) Arg(index int64) (ref.Val, error) {
+ c.argsRequested++
+ c.curArgIndex = index
+ // return a dummy value - this is immediately passed to back to us
+ // through one of the FormatCallback functions, so anything will do
+ return types.Int(0), nil
+}
+
+func (c *formatCheck) ArgSize() int64 {
+ return int64(len(c.args))
+}
+
+func verifyString(sub Interpretable, verifier typeVerifier) (bool, error) {
+ subVerified, err := verifier(sub.ID(),
+ types.ListType, types.MapType, types.IntType, types.UintType, types.DoubleType,
+ types.BoolType, types.StringType, types.TimestampType, types.BytesType, types.DurationType, types.TypeType, types.NullType)
+ if err != nil {
+ return false, err
+ }
+ if !subVerified {
+ return false, nil
+ }
+ con, ok := sub.(InterpretableConstructor)
+ if ok {
+ members := con.InitVals()
+ for _, m := range members {
+ // recursively verify if we're dealing with a list/map
+ verified, err := verifyString(m, verifier)
+ if err != nil {
+ return false, err
+ }
+ if !verified {
+ return false, nil
+ }
+ }
+ }
+ return true, nil
+
+}
+
+// FormatStringInterpolator is an interface that allows user-defined behavior
+// for formatting clause implementations, as well as argument retrieval.
+// Each function is expected to support the appropriate types as laid out in
+// the string.format documentation, and to return an error if given an inappropriate type.
+type FormatStringInterpolator interface {
+ // String takes a ref.Val and a string representing the current locale identifier
+ // and returns the Val formatted as a string, or an error if one occurred.
+ String(ref.Val, string) (string, error)
+
+ // Decimal takes a ref.Val and a string representing the current locale identifier
+ // and returns the Val formatted as a decimal integer, or an error if one occurred.
+ Decimal(ref.Val, string) (string, error)
+
+ // Fixed takes an int pointer representing precision (or nil if none was given) and
+ // returns a function operating in a similar manner to String and Decimal, taking a
+ // ref.Val and locale and returning the appropriate string. A closure is returned
+ // so precision can be set without needing an additional function call/configuration.
+ Fixed(*int) func(ref.Val, string) (string, error)
+
+ // Scientific functions identically to Fixed, except the string returned from the closure
+ // is expected to be in scientific notation.
+ Scientific(*int) func(ref.Val, string) (string, error)
+
+ // Binary takes a ref.Val and a string representing the current locale identifier
+ // and returns the Val formatted as a binary integer, or an error if one occurred.
+ Binary(ref.Val, string) (string, error)
+
+ // Hex takes a boolean that, if true, indicates the hex string output by the returned
+ // closure should use uppercase letters for A-F.
+ Hex(bool) func(ref.Val, string) (string, error)
+
+ // Octal takes a ref.Val and a string representing the current locale identifier and
+ // returns the Val formatted in octal, or an error if one occurred.
+ Octal(ref.Val, string) (string, error)
+}
+
+// FormatList is an interface that allows user-defined list-like datatypes to be used
+// for formatting clause implementations.
+type FormatList interface {
+ // Arg returns the ref.Val at the given index, or an error if one occurred.
+ Arg(int64) (ref.Val, error)
+ // ArgSize returns the length of the argument list.
+ ArgSize() int64
+}
+
+type clauseImpl func(ref.Val, string) (string, error)
+
+// ParseFormatString formats a string according to the string.format syntax, taking the clause implementations
+// from the provided FormatCallback and the args from the given FormatList.
+func ParseFormatString(formatStr string, callback FormatStringInterpolator, list FormatList, locale string) (string, error) {
+ i := 0
+ argIndex := 0
+ var builtStr strings.Builder
+ for i < len(formatStr) {
+ if formatStr[i] == '%' {
+ if i+1 < len(formatStr) && formatStr[i+1] == '%' {
+ err := builtStr.WriteByte('%')
+ if err != nil {
+ return "", fmt.Errorf("error writing format string: %w", err)
+ }
+ i += 2
+ continue
+ } else {
+ argAny, err := list.Arg(int64(argIndex))
+ if err != nil {
+ return "", err
+ }
+ if i+1 >= len(formatStr) {
+ return "", errors.New("unexpected end of string")
+ }
+ if int64(argIndex) >= list.ArgSize() {
+ return "", fmt.Errorf("index %d out of range", argIndex)
+ }
+ numRead, val, refErr := parseAndFormatClause(formatStr[i:], argAny, callback, list, locale)
+ if refErr != nil {
+ return "", refErr
+ }
+ _, err = builtStr.WriteString(val)
+ if err != nil {
+ return "", fmt.Errorf("error writing format string: %w", err)
+ }
+ i += numRead
+ argIndex++
+ }
+ } else {
+ err := builtStr.WriteByte(formatStr[i])
+ if err != nil {
+ return "", fmt.Errorf("error writing format string: %w", err)
+ }
+ i++
+ }
+ }
+ return builtStr.String(), nil
+}
+
+// parseAndFormatClause parses the format clause at the start of the given string with val, and returns
+// how many characters were consumed and the substituted string form of val, or an error if one occurred.
+func parseAndFormatClause(formatStr string, val ref.Val, callback FormatStringInterpolator, list FormatList, locale string) (int, string, error) {
+ i := 1
+ read, formatter, err := parseFormattingClause(formatStr[i:], callback)
+ i += read
+ if err != nil {
+ return -1, "", fmt.Errorf("could not parse formatting clause: %s", err)
+ }
+
+ valStr, err := formatter(val, locale)
+ if err != nil {
+ return -1, "", fmt.Errorf("error during formatting: %s", err)
+ }
+ return i, valStr, nil
+}
+
+func parseFormattingClause(formatStr string, callback FormatStringInterpolator) (int, clauseImpl, error) {
+ i := 0
+ read, precision, err := parsePrecision(formatStr[i:])
+ i += read
+ if err != nil {
+ return -1, nil, fmt.Errorf("error while parsing precision: %w", err)
+ }
+ r := rune(formatStr[i])
+ i++
+ switch r {
+ case 's':
+ return i, callback.String, nil
+ case 'd':
+ return i, callback.Decimal, nil
+ case 'f':
+ return i, callback.Fixed(precision), nil
+ case 'e':
+ return i, callback.Scientific(precision), nil
+ case 'b':
+ return i, callback.Binary, nil
+ case 'x', 'X':
+ return i, callback.Hex(unicode.IsUpper(r)), nil
+ case 'o':
+ return i, callback.Octal, nil
+ default:
+ return -1, nil, fmt.Errorf("unrecognized formatting clause \"%c\"", r)
+ }
+}
+
+func parsePrecision(formatStr string) (int, *int, error) {
+ i := 0
+ if formatStr[i] != '.' {
+ return i, nil, nil
+ }
+ i++
+ var buffer strings.Builder
+ for {
+ if i >= len(formatStr) {
+ return -1, nil, errors.New("could not find end of precision specifier")
+ }
+ if !isASCIIDigit(rune(formatStr[i])) {
+ break
+ }
+ buffer.WriteByte(formatStr[i])
+ i++
+ }
+ precision, err := strconv.Atoi(buffer.String())
+ if err != nil {
+ return -1, nil, fmt.Errorf("error while converting precision to integer: %w", err)
+ }
+ return i, &precision, nil
+}
+
+func isASCIIDigit(r rune) bool {
+ return r <= unicode.MaxASCII && unicode.IsDigit(r)
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/interpreter.go b/vendor/github.com/google/cel-go/interpreter/interpreter.go
index 0aca74d88..00fc74732 100644
--- a/vendor/github.com/google/cel-go/interpreter/interpreter.go
+++ b/vendor/github.com/google/cel-go/interpreter/interpreter.go
@@ -22,13 +22,19 @@ import (
"github.com/google/cel-go/common/containers"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
// Interpreter generates a new Interpretable from a checked or unchecked expression.
type Interpreter interface {
// NewInterpretable creates an Interpretable from a checked expression and an
// optional list of InterpretableDecorator values.
- NewInterpretable(exprAST *ast.AST, decorators ...InterpretableDecorator) (Interpretable, error)
+ NewInterpretable(checked *ast.CheckedAST, decorators ...InterpretableDecorator) (Interpretable, error)
+
+ // NewUncheckedInterpretable returns an Interpretable from a parsed expression
+ // and an optional list of InterpretableDecorator values.
+ NewUncheckedInterpretable(expr *exprpb.Expr, decorators ...InterpretableDecorator) (Interpretable, error)
}
// EvalObserver is a functional interface that accepts an expression id and an observed value.
@@ -171,7 +177,7 @@ func NewInterpreter(dispatcher Dispatcher,
// NewIntepretable implements the Interpreter interface method.
func (i *exprInterpreter) NewInterpretable(
- checked *ast.AST,
+ checked *ast.CheckedAST,
decorators ...InterpretableDecorator) (Interpretable, error) {
p := newPlanner(
i.dispatcher,
@@ -181,5 +187,19 @@ func (i *exprInterpreter) NewInterpretable(
i.container,
checked,
decorators...)
- return p.Plan(checked.Expr())
+ return p.Plan(checked.Expr)
+}
+
+// NewUncheckedIntepretable implements the Interpreter interface method.
+func (i *exprInterpreter) NewUncheckedInterpretable(
+ expr *exprpb.Expr,
+ decorators ...InterpretableDecorator) (Interpretable, error) {
+ p := newUncheckedPlanner(
+ i.dispatcher,
+ i.provider,
+ i.adapter,
+ i.attrFactory,
+ i.container,
+ decorators...)
+ return p.Plan(expr)
}
diff --git a/vendor/github.com/google/cel-go/interpreter/planner.go b/vendor/github.com/google/cel-go/interpreter/planner.go
index cf371f95d..757cd080e 100644
--- a/vendor/github.com/google/cel-go/interpreter/planner.go
+++ b/vendor/github.com/google/cel-go/interpreter/planner.go
@@ -23,12 +23,15 @@ import (
"github.com/google/cel-go/common/functions"
"github.com/google/cel-go/common/operators"
"github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
// interpretablePlanner creates an Interpretable evaluation plan from a proto Expr value.
type interpretablePlanner interface {
// Plan generates an Interpretable value (or error) from the input proto Expr.
- Plan(expr ast.Expr) (Interpretable, error)
+ Plan(expr *exprpb.Expr) (Interpretable, error)
}
// newPlanner creates an interpretablePlanner which references a Dispatcher, TypeProvider,
@@ -40,7 +43,28 @@ func newPlanner(disp Dispatcher,
adapter types.Adapter,
attrFactory AttributeFactory,
cont *containers.Container,
- exprAST *ast.AST,
+ checked *ast.CheckedAST,
+ decorators ...InterpretableDecorator) interpretablePlanner {
+ return &planner{
+ disp: disp,
+ provider: provider,
+ adapter: adapter,
+ attrFactory: attrFactory,
+ container: cont,
+ refMap: checked.ReferenceMap,
+ typeMap: checked.TypeMap,
+ decorators: decorators,
+ }
+}
+
+// newUncheckedPlanner creates an interpretablePlanner which references a Dispatcher, TypeProvider,
+// TypeAdapter, and Container to resolve functions and types at plan time. Namespaces present in
+// Select expressions are resolved lazily at evaluation time.
+func newUncheckedPlanner(disp Dispatcher,
+ provider types.Provider,
+ adapter types.Adapter,
+ attrFactory AttributeFactory,
+ cont *containers.Container,
decorators ...InterpretableDecorator) interpretablePlanner {
return &planner{
disp: disp,
@@ -48,8 +72,8 @@ func newPlanner(disp Dispatcher,
adapter: adapter,
attrFactory: attrFactory,
container: cont,
- refMap: exprAST.ReferenceMap(),
- typeMap: exprAST.TypeMap(),
+ refMap: make(map[int64]*ast.ReferenceInfo),
+ typeMap: make(map[int64]*types.Type),
decorators: decorators,
}
}
@@ -71,24 +95,22 @@ type planner struct {
// useful for layering functionality into the evaluation that is not natively understood by CEL,
// such as state-tracking, expression re-write, and possibly efficient thread-safe memoization of
// repeated expressions.
-func (p *planner) Plan(expr ast.Expr) (Interpretable, error) {
- switch expr.Kind() {
- case ast.CallKind:
+func (p *planner) Plan(expr *exprpb.Expr) (Interpretable, error) {
+ switch expr.GetExprKind().(type) {
+ case *exprpb.Expr_CallExpr:
return p.decorate(p.planCall(expr))
- case ast.IdentKind:
+ case *exprpb.Expr_IdentExpr:
return p.decorate(p.planIdent(expr))
- case ast.LiteralKind:
- return p.decorate(p.planConst(expr))
- case ast.SelectKind:
+ case *exprpb.Expr_SelectExpr:
return p.decorate(p.planSelect(expr))
- case ast.ListKind:
+ case *exprpb.Expr_ListExpr:
return p.decorate(p.planCreateList(expr))
- case ast.MapKind:
- return p.decorate(p.planCreateMap(expr))
- case ast.StructKind:
+ case *exprpb.Expr_StructExpr:
return p.decorate(p.planCreateStruct(expr))
- case ast.ComprehensionKind:
+ case *exprpb.Expr_ComprehensionExpr:
return p.decorate(p.planComprehension(expr))
+ case *exprpb.Expr_ConstExpr:
+ return p.decorate(p.planConst(expr))
}
return nil, fmt.Errorf("unsupported expr: %v", expr)
}
@@ -110,16 +132,16 @@ func (p *planner) decorate(i Interpretable, err error) (Interpretable, error) {
}
// planIdent creates an Interpretable that resolves an identifier from an Activation.
-func (p *planner) planIdent(expr ast.Expr) (Interpretable, error) {
+func (p *planner) planIdent(expr *exprpb.Expr) (Interpretable, error) {
// Establish whether the identifier is in the reference map.
- if identRef, found := p.refMap[expr.ID()]; found {
- return p.planCheckedIdent(expr.ID(), identRef)
+ if identRef, found := p.refMap[expr.GetId()]; found {
+ return p.planCheckedIdent(expr.GetId(), identRef)
}
// Create the possible attribute list for the unresolved reference.
- ident := expr.AsIdent()
+ ident := expr.GetIdentExpr()
return &evalAttr{
adapter: p.adapter,
- attr: p.attrFactory.MaybeAttribute(expr.ID(), ident),
+ attr: p.attrFactory.MaybeAttribute(expr.GetId(), ident.Name),
}, nil
}
@@ -152,20 +174,20 @@ func (p *planner) planCheckedIdent(id int64, identRef *ast.ReferenceInfo) (Inter
// a) selects a field from a map or proto.
// b) creates a field presence test for a select within a has() macro.
// c) resolves the select expression to a namespaced identifier.
-func (p *planner) planSelect(expr ast.Expr) (Interpretable, error) {
+func (p *planner) planSelect(expr *exprpb.Expr) (Interpretable, error) {
// If the Select id appears in the reference map from the CheckedExpr proto then it is either
// a namespaced identifier or enum value.
- if identRef, found := p.refMap[expr.ID()]; found {
- return p.planCheckedIdent(expr.ID(), identRef)
+ if identRef, found := p.refMap[expr.GetId()]; found {
+ return p.planCheckedIdent(expr.GetId(), identRef)
}
- sel := expr.AsSelect()
+ sel := expr.GetSelectExpr()
// Plan the operand evaluation.
- op, err := p.Plan(sel.Operand())
+ op, err := p.Plan(sel.GetOperand())
if err != nil {
return nil, err
}
- opType := p.typeMap[sel.Operand().ID()]
+ opType := p.typeMap[sel.GetOperand().GetId()]
// If the Select was marked TestOnly, this is a presence test.
//
@@ -189,14 +211,14 @@ func (p *planner) planSelect(expr ast.Expr) (Interpretable, error) {
}
// Build a qualifier for the attribute.
- qual, err := p.attrFactory.NewQualifier(opType, expr.ID(), sel.FieldName(), false)
+ qual, err := p.attrFactory.NewQualifier(opType, expr.GetId(), sel.GetField(), false)
if err != nil {
return nil, err
}
// Modify the attribute to be test-only.
- if sel.IsTestOnly() {
+ if sel.GetTestOnly() {
attr = &evalTestOnly{
- id: expr.ID(),
+ id: expr.GetId(),
InterpretableAttribute: attr,
}
}
@@ -208,10 +230,10 @@ func (p *planner) planSelect(expr ast.Expr) (Interpretable, error) {
// planCall creates a callable Interpretable while specializing for common functions and invocation
// patterns. Specifically, conditional operators &&, ||, ?:, and (in)equality functions result in
// optimized Interpretable values.
-func (p *planner) planCall(expr ast.Expr) (Interpretable, error) {
- call := expr.AsCall()
+func (p *planner) planCall(expr *exprpb.Expr) (Interpretable, error) {
+ call := expr.GetCallExpr()
target, fnName, oName := p.resolveFunction(expr)
- argCount := len(call.Args())
+ argCount := len(call.GetArgs())
var offset int
if target != nil {
argCount++
@@ -226,7 +248,7 @@ func (p *planner) planCall(expr ast.Expr) (Interpretable, error) {
}
args[0] = arg
}
- for i, argExpr := range call.Args() {
+ for i, argExpr := range call.GetArgs() {
arg, err := p.Plan(argExpr)
if err != nil {
return nil, err
@@ -285,7 +307,7 @@ func (p *planner) planCall(expr ast.Expr) (Interpretable, error) {
}
// planCallZero generates a zero-arity callable Interpretable.
-func (p *planner) planCallZero(expr ast.Expr,
+func (p *planner) planCallZero(expr *exprpb.Expr,
function string,
overload string,
impl *functions.Overload) (Interpretable, error) {
@@ -293,7 +315,7 @@ func (p *planner) planCallZero(expr ast.Expr,
return nil, fmt.Errorf("no such overload: %s()", function)
}
return &evalZeroArity{
- id: expr.ID(),
+ id: expr.GetId(),
function: function,
overload: overload,
impl: impl.Function,
@@ -301,7 +323,7 @@ func (p *planner) planCallZero(expr ast.Expr,
}
// planCallUnary generates a unary callable Interpretable.
-func (p *planner) planCallUnary(expr ast.Expr,
+func (p *planner) planCallUnary(expr *exprpb.Expr,
function string,
overload string,
impl *functions.Overload,
@@ -318,7 +340,7 @@ func (p *planner) planCallUnary(expr ast.Expr,
nonStrict = impl.NonStrict
}
return &evalUnary{
- id: expr.ID(),
+ id: expr.GetId(),
function: function,
overload: overload,
arg: args[0],
@@ -329,7 +351,7 @@ func (p *planner) planCallUnary(expr ast.Expr,
}
// planCallBinary generates a binary callable Interpretable.
-func (p *planner) planCallBinary(expr ast.Expr,
+func (p *planner) planCallBinary(expr *exprpb.Expr,
function string,
overload string,
impl *functions.Overload,
@@ -346,7 +368,7 @@ func (p *planner) planCallBinary(expr ast.Expr,
nonStrict = impl.NonStrict
}
return &evalBinary{
- id: expr.ID(),
+ id: expr.GetId(),
function: function,
overload: overload,
lhs: args[0],
@@ -358,7 +380,7 @@ func (p *planner) planCallBinary(expr ast.Expr,
}
// planCallVarArgs generates a variable argument callable Interpretable.
-func (p *planner) planCallVarArgs(expr ast.Expr,
+func (p *planner) planCallVarArgs(expr *exprpb.Expr,
function string,
overload string,
impl *functions.Overload,
@@ -375,7 +397,7 @@ func (p *planner) planCallVarArgs(expr ast.Expr,
nonStrict = impl.NonStrict
}
return &evalVarArgs{
- id: expr.ID(),
+ id: expr.GetId(),
function: function,
overload: overload,
args: args,
@@ -386,41 +408,41 @@ func (p *planner) planCallVarArgs(expr ast.Expr,
}
// planCallEqual generates an equals (==) Interpretable.
-func (p *planner) planCallEqual(expr ast.Expr, args []Interpretable) (Interpretable, error) {
+func (p *planner) planCallEqual(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) {
return &evalEq{
- id: expr.ID(),
+ id: expr.GetId(),
lhs: args[0],
rhs: args[1],
}, nil
}
// planCallNotEqual generates a not equals (!=) Interpretable.
-func (p *planner) planCallNotEqual(expr ast.Expr, args []Interpretable) (Interpretable, error) {
+func (p *planner) planCallNotEqual(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) {
return &evalNe{
- id: expr.ID(),
+ id: expr.GetId(),
lhs: args[0],
rhs: args[1],
}, nil
}
// planCallLogicalAnd generates a logical and (&&) Interpretable.
-func (p *planner) planCallLogicalAnd(expr ast.Expr, args []Interpretable) (Interpretable, error) {
+func (p *planner) planCallLogicalAnd(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) {
return &evalAnd{
- id: expr.ID(),
+ id: expr.GetId(),
terms: args,
}, nil
}
// planCallLogicalOr generates a logical or (||) Interpretable.
-func (p *planner) planCallLogicalOr(expr ast.Expr, args []Interpretable) (Interpretable, error) {
+func (p *planner) planCallLogicalOr(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) {
return &evalOr{
- id: expr.ID(),
+ id: expr.GetId(),
terms: args,
}, nil
}
// planCallConditional generates a conditional / ternary (c ? t : f) Interpretable.
-func (p *planner) planCallConditional(expr ast.Expr, args []Interpretable) (Interpretable, error) {
+func (p *planner) planCallConditional(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) {
cond := args[0]
t := args[1]
var tAttr Attribute
@@ -442,13 +464,13 @@ func (p *planner) planCallConditional(expr ast.Expr, args []Interpretable) (Inte
return &evalAttr{
adapter: p.adapter,
- attr: p.attrFactory.ConditionalAttribute(expr.ID(), cond, tAttr, fAttr),
+ attr: p.attrFactory.ConditionalAttribute(expr.GetId(), cond, tAttr, fAttr),
}, nil
}
// planCallIndex either extends an attribute with the argument to the index operation, or creates
// a relative attribute based on the return of a function call or operation.
-func (p *planner) planCallIndex(expr ast.Expr, args []Interpretable, optional bool) (Interpretable, error) {
+func (p *planner) planCallIndex(expr *exprpb.Expr, args []Interpretable, optional bool) (Interpretable, error) {
op := args[0]
ind := args[1]
opType := p.typeMap[op.ID()]
@@ -467,11 +489,11 @@ func (p *planner) planCallIndex(expr ast.Expr, args []Interpretable, optional bo
var qual Qualifier
switch ind := ind.(type) {
case InterpretableConst:
- qual, err = p.attrFactory.NewQualifier(opType, expr.ID(), ind.Value(), optional)
+ qual, err = p.attrFactory.NewQualifier(opType, expr.GetId(), ind.Value(), optional)
case InterpretableAttribute:
- qual, err = p.attrFactory.NewQualifier(opType, expr.ID(), ind, optional)
+ qual, err = p.attrFactory.NewQualifier(opType, expr.GetId(), ind, optional)
default:
- qual, err = p.relativeAttr(expr.ID(), ind, optional)
+ qual, err = p.relativeAttr(expr.GetId(), ind, optional)
}
if err != nil {
return nil, err
@@ -483,10 +505,10 @@ func (p *planner) planCallIndex(expr ast.Expr, args []Interpretable, optional bo
}
// planCreateList generates a list construction Interpretable.
-func (p *planner) planCreateList(expr ast.Expr) (Interpretable, error) {
- list := expr.AsList()
- optionalIndices := list.OptionalIndices()
- elements := list.Elements()
+func (p *planner) planCreateList(expr *exprpb.Expr) (Interpretable, error) {
+ list := expr.GetListExpr()
+ optionalIndices := list.GetOptionalIndices()
+ elements := list.GetElements()
optionals := make([]bool, len(elements))
for _, index := range optionalIndices {
if index < 0 || index >= int32(len(elements)) {
@@ -503,7 +525,7 @@ func (p *planner) planCreateList(expr ast.Expr) (Interpretable, error) {
elems[i] = elemVal
}
return &evalList{
- id: expr.ID(),
+ id: expr.GetId(),
elems: elems,
optionals: optionals,
hasOptionals: len(optionals) != 0,
@@ -512,29 +534,31 @@ func (p *planner) planCreateList(expr ast.Expr) (Interpretable, error) {
}
// planCreateStruct generates a map or object construction Interpretable.
-func (p *planner) planCreateMap(expr ast.Expr) (Interpretable, error) {
- m := expr.AsMap()
- entries := m.Entries()
+func (p *planner) planCreateStruct(expr *exprpb.Expr) (Interpretable, error) {
+ str := expr.GetStructExpr()
+ if len(str.MessageName) != 0 {
+ return p.planCreateObj(expr)
+ }
+ entries := str.GetEntries()
optionals := make([]bool, len(entries))
keys := make([]Interpretable, len(entries))
vals := make([]Interpretable, len(entries))
- for i, e := range entries {
- entry := e.AsMapEntry()
- keyVal, err := p.Plan(entry.Key())
+ for i, entry := range entries {
+ keyVal, err := p.Plan(entry.GetMapKey())
if err != nil {
return nil, err
}
keys[i] = keyVal
- valVal, err := p.Plan(entry.Value())
+ valVal, err := p.Plan(entry.GetValue())
if err != nil {
return nil, err
}
vals[i] = valVal
- optionals[i] = entry.IsOptional()
+ optionals[i] = entry.GetOptionalEntry()
}
return &evalMap{
- id: expr.ID(),
+ id: expr.GetId(),
keys: keys,
vals: vals,
optionals: optionals,
@@ -544,28 +568,27 @@ func (p *planner) planCreateMap(expr ast.Expr) (Interpretable, error) {
}
// planCreateObj generates an object construction Interpretable.
-func (p *planner) planCreateStruct(expr ast.Expr) (Interpretable, error) {
- obj := expr.AsStruct()
- typeName, defined := p.resolveTypeName(obj.TypeName())
+func (p *planner) planCreateObj(expr *exprpb.Expr) (Interpretable, error) {
+ obj := expr.GetStructExpr()
+ typeName, defined := p.resolveTypeName(obj.GetMessageName())
if !defined {
- return nil, fmt.Errorf("unknown type: %s", obj.TypeName())
- }
- objFields := obj.Fields()
- optionals := make([]bool, len(objFields))
- fields := make([]string, len(objFields))
- vals := make([]Interpretable, len(objFields))
- for i, f := range objFields {
- field := f.AsStructField()
- fields[i] = field.Name()
- val, err := p.Plan(field.Value())
+ return nil, fmt.Errorf("unknown type: %s", obj.GetMessageName())
+ }
+ entries := obj.GetEntries()
+ optionals := make([]bool, len(entries))
+ fields := make([]string, len(entries))
+ vals := make([]Interpretable, len(entries))
+ for i, entry := range entries {
+ fields[i] = entry.GetFieldKey()
+ val, err := p.Plan(entry.GetValue())
if err != nil {
return nil, err
}
vals[i] = val
- optionals[i] = field.IsOptional()
+ optionals[i] = entry.GetOptionalEntry()
}
return &evalObj{
- id: expr.ID(),
+ id: expr.GetId(),
typeName: typeName,
fields: fields,
vals: vals,
@@ -576,33 +599,33 @@ func (p *planner) planCreateStruct(expr ast.Expr) (Interpretable, error) {
}
// planComprehension generates an Interpretable fold operation.
-func (p *planner) planComprehension(expr ast.Expr) (Interpretable, error) {
- fold := expr.AsComprehension()
- accu, err := p.Plan(fold.AccuInit())
+func (p *planner) planComprehension(expr *exprpb.Expr) (Interpretable, error) {
+ fold := expr.GetComprehensionExpr()
+ accu, err := p.Plan(fold.GetAccuInit())
if err != nil {
return nil, err
}
- iterRange, err := p.Plan(fold.IterRange())
+ iterRange, err := p.Plan(fold.GetIterRange())
if err != nil {
return nil, err
}
- cond, err := p.Plan(fold.LoopCondition())
+ cond, err := p.Plan(fold.GetLoopCondition())
if err != nil {
return nil, err
}
- step, err := p.Plan(fold.LoopStep())
+ step, err := p.Plan(fold.GetLoopStep())
if err != nil {
return nil, err
}
- result, err := p.Plan(fold.Result())
+ result, err := p.Plan(fold.GetResult())
if err != nil {
return nil, err
}
return &evalFold{
- id: expr.ID(),
- accuVar: fold.AccuVar(),
+ id: expr.GetId(),
+ accuVar: fold.AccuVar,
accu: accu,
- iterVar: fold.IterVar(),
+ iterVar: fold.IterVar,
iterRange: iterRange,
cond: cond,
step: step,
@@ -612,8 +635,37 @@ func (p *planner) planComprehension(expr ast.Expr) (Interpretable, error) {
}
// planConst generates a constant valued Interpretable.
-func (p *planner) planConst(expr ast.Expr) (Interpretable, error) {
- return NewConstValue(expr.ID(), expr.AsLiteral()), nil
+func (p *planner) planConst(expr *exprpb.Expr) (Interpretable, error) {
+ val, err := p.constValue(expr.GetConstExpr())
+ if err != nil {
+ return nil, err
+ }
+ return NewConstValue(expr.GetId(), val), nil
+}
+
+// constValue converts a proto Constant value to a ref.Val.
+func (p *planner) constValue(c *exprpb.Constant) (ref.Val, error) {
+ switch c.GetConstantKind().(type) {
+ case *exprpb.Constant_BoolValue:
+ return p.adapter.NativeToValue(c.GetBoolValue()), nil
+ case *exprpb.Constant_BytesValue:
+ return p.adapter.NativeToValue(c.GetBytesValue()), nil
+ case *exprpb.Constant_DoubleValue:
+ return p.adapter.NativeToValue(c.GetDoubleValue()), nil
+ case *exprpb.Constant_DurationValue:
+ return p.adapter.NativeToValue(c.GetDurationValue().AsDuration()), nil
+ case *exprpb.Constant_Int64Value:
+ return p.adapter.NativeToValue(c.GetInt64Value()), nil
+ case *exprpb.Constant_NullValue:
+ return p.adapter.NativeToValue(c.GetNullValue()), nil
+ case *exprpb.Constant_StringValue:
+ return p.adapter.NativeToValue(c.GetStringValue()), nil
+ case *exprpb.Constant_TimestampValue:
+ return p.adapter.NativeToValue(c.GetTimestampValue().AsTime()), nil
+ case *exprpb.Constant_Uint64Value:
+ return p.adapter.NativeToValue(c.GetUint64Value()), nil
+ }
+ return nil, fmt.Errorf("unknown constant type: %v", c)
}
// resolveTypeName takes a qualified string constructed at parse time, applies the proto
@@ -635,20 +687,17 @@ func (p *planner) resolveTypeName(typeName string) (string, bool) {
// - The target expression may only consist of ident and select expressions.
// - The function is declared in the environment using its fully-qualified name.
// - The fully-qualified function name matches the string serialized target value.
-func (p *planner) resolveFunction(expr ast.Expr) (ast.Expr, string, string) {
+func (p *planner) resolveFunction(expr *exprpb.Expr) (*exprpb.Expr, string, string) {
// Note: similar logic exists within the `checker/checker.go`. If making changes here
// please consider the impact on checker.go and consolidate implementations or mirror code
// as appropriate.
- call := expr.AsCall()
- var target ast.Expr = nil
- if call.IsMemberFunction() {
- target = call.Target()
- }
- fnName := call.FunctionName()
+ call := expr.GetCallExpr()
+ target := call.GetTarget()
+ fnName := call.GetFunction()
// Checked expressions always have a reference map entry, and _should_ have the fully qualified
// function name as the fnName value.
- oRef, hasOverload := p.refMap[expr.ID()]
+ oRef, hasOverload := p.refMap[expr.GetId()]
if hasOverload {
if len(oRef.OverloadIDs) == 1 {
return target, fnName, oRef.OverloadIDs[0]
@@ -722,30 +771,16 @@ func (p *planner) relativeAttr(id int64, eval Interpretable, opt bool) (Interpre
// toQualifiedName converts an expression AST into a qualified name if possible, with a boolean
// 'found' value that indicates if the conversion is successful.
-func (p *planner) toQualifiedName(operand ast.Expr) (string, bool) {
+func (p *planner) toQualifiedName(operand *exprpb.Expr) (string, bool) {
// If the checker identified the expression as an attribute by the type-checker, then it can't
// possibly be part of qualified name in a namespace.
- _, isAttr := p.refMap[operand.ID()]
+ _, isAttr := p.refMap[operand.GetId()]
if isAttr {
return "", false
}
// Since functions cannot be both namespaced and receiver functions, if the operand is not an
// qualified variable name, return the (possibly) qualified name given the expressions.
- switch operand.Kind() {
- case ast.IdentKind:
- id := operand.AsIdent()
- return id, true
- case ast.SelectKind:
- sel := operand.AsSelect()
- // Test only expressions are not valid as qualified names.
- if sel.IsTestOnly() {
- return "", false
- }
- if qual, found := p.toQualifiedName(sel.Operand()); found {
- return qual + "." + sel.FieldName(), true
- }
- }
- return "", false
+ return containers.ToQualifiedName(operand)
}
func stripLeadingDot(name string) string {
diff --git a/vendor/github.com/google/cel-go/interpreter/prune.go b/vendor/github.com/google/cel-go/interpreter/prune.go
index 410d80dc4..b8834b1cb 100644
--- a/vendor/github.com/google/cel-go/interpreter/prune.go
+++ b/vendor/github.com/google/cel-go/interpreter/prune.go
@@ -15,18 +15,19 @@
package interpreter
import (
- "github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/operators"
"github.com/google/cel-go/common/overloads"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ structpb "google.golang.org/protobuf/types/known/structpb"
)
type astPruner struct {
- ast.ExprFactory
- expr ast.Expr
- macroCalls map[int64]ast.Expr
+ expr *exprpb.Expr
+ macroCalls map[int64]*exprpb.Expr
state EvalState
nextExprID int64
}
@@ -66,44 +67,84 @@ type astPruner struct {
// compiled and constant folded expressions, but is not willing to constant
// fold(and thus cache results of) some external calls, then they can prepare
// the overloads accordingly.
-func PruneAst(expr ast.Expr, macroCalls map[int64]ast.Expr, state EvalState) *ast.AST {
+func PruneAst(expr *exprpb.Expr, macroCalls map[int64]*exprpb.Expr, state EvalState) *exprpb.ParsedExpr {
pruneState := NewEvalState()
for _, id := range state.IDs() {
v, _ := state.Value(id)
pruneState.SetValue(id, v)
}
pruner := &astPruner{
- ExprFactory: ast.NewExprFactory(),
- expr: expr,
- macroCalls: macroCalls,
- state: pruneState,
- nextExprID: getMaxID(expr)}
+ expr: expr,
+ macroCalls: macroCalls,
+ state: pruneState,
+ nextExprID: getMaxID(expr)}
newExpr, _ := pruner.maybePrune(expr)
- newInfo := ast.NewSourceInfo(nil)
- for id, call := range pruner.macroCalls {
- newInfo.SetMacroCall(id, call)
+ return &exprpb.ParsedExpr{
+ Expr: newExpr,
+ SourceInfo: &exprpb.SourceInfo{MacroCalls: pruner.macroCalls},
+ }
+}
+
+func (p *astPruner) createLiteral(id int64, val *exprpb.Constant) *exprpb.Expr {
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_ConstExpr{
+ ConstExpr: val,
+ },
}
- return ast.NewAST(newExpr, newInfo)
}
-func (p *astPruner) maybeCreateLiteral(id int64, val ref.Val) (ast.Expr, bool) {
+func (p *astPruner) maybeCreateLiteral(id int64, val ref.Val) (*exprpb.Expr, bool) {
switch v := val.(type) {
- case types.Bool, types.Bytes, types.Double, types.Int, types.Null, types.String, types.Uint:
+ case types.Bool:
+ p.state.SetValue(id, val)
+ return p.createLiteral(id,
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: bool(v)}}), true
+ case types.Bytes:
p.state.SetValue(id, val)
- return p.NewLiteral(id, val), true
+ return p.createLiteral(id,
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: []byte(v)}}), true
+ case types.Double:
+ p.state.SetValue(id, val)
+ return p.createLiteral(id,
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: float64(v)}}), true
case types.Duration:
p.state.SetValue(id, val)
- durationString := v.ConvertToType(types.StringType).(types.String)
- return p.NewCall(id, overloads.TypeConvertDuration, p.NewLiteral(p.nextID(), durationString)), true
- case types.Timestamp:
- timestampString := v.ConvertToType(types.StringType).(types.String)
- return p.NewCall(id, overloads.TypeConvertTimestamp, p.NewLiteral(p.nextID(), timestampString)), true
+ durationString := string(v.ConvertToType(types.StringType).(types.String))
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_CallExpr{
+ CallExpr: &exprpb.Expr_Call{
+ Function: overloads.TypeConvertDuration,
+ Args: []*exprpb.Expr{
+ p.createLiteral(p.nextID(),
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: durationString}}),
+ },
+ },
+ },
+ }, true
+ case types.Int:
+ p.state.SetValue(id, val)
+ return p.createLiteral(id,
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: int64(v)}}), true
+ case types.Uint:
+ p.state.SetValue(id, val)
+ return p.createLiteral(id,
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: uint64(v)}}), true
+ case types.String:
+ p.state.SetValue(id, val)
+ return p.createLiteral(id,
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: string(v)}}), true
+ case types.Null:
+ p.state.SetValue(id, val)
+ return p.createLiteral(id,
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_NullValue{NullValue: v.Value().(structpb.NullValue)}}), true
}
// Attempt to build a list literal.
if list, isList := val.(traits.Lister); isList {
sz := list.Size().(types.Int)
- elemExprs := make([]ast.Expr, sz)
+ elemExprs := make([]*exprpb.Expr, sz)
for i := types.Int(0); i < sz; i++ {
elem := list.Get(i)
if types.IsUnknownOrError(elem) {
@@ -116,13 +157,20 @@ func (p *astPruner) maybeCreateLiteral(id int64, val ref.Val) (ast.Expr, bool) {
elemExprs[i] = elemExpr
}
p.state.SetValue(id, val)
- return p.NewList(id, elemExprs, []int32{}), true
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_ListExpr{
+ ListExpr: &exprpb.Expr_CreateList{
+ Elements: elemExprs,
+ },
+ },
+ }, true
}
// Create a map literal if possible.
if mp, isMap := val.(traits.Mapper); isMap {
it := mp.Iterator()
- entries := make([]ast.EntryExpr, mp.Size().(types.Int))
+ entries := make([]*exprpb.Expr_CreateStruct_Entry, mp.Size().(types.Int))
i := 0
for it.HasNext() != types.False {
key := it.Next()
@@ -138,12 +186,25 @@ func (p *astPruner) maybeCreateLiteral(id int64, val ref.Val) (ast.Expr, bool) {
if !ok {
return nil, false
}
- entry := p.NewMapEntry(p.nextID(), keyExpr, valExpr, false)
+ entry := &exprpb.Expr_CreateStruct_Entry{
+ Id: p.nextID(),
+ KeyKind: &exprpb.Expr_CreateStruct_Entry_MapKey{
+ MapKey: keyExpr,
+ },
+ Value: valExpr,
+ }
entries[i] = entry
i++
}
p.state.SetValue(id, val)
- return p.NewMap(id, entries), true
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_StructExpr{
+ StructExpr: &exprpb.Expr_CreateStruct{
+ Entries: entries,
+ },
+ },
+ }, true
}
// TODO(issues/377) To construct message literals, the type provider will need to support
@@ -151,206 +212,215 @@ func (p *astPruner) maybeCreateLiteral(id int64, val ref.Val) (ast.Expr, bool) {
return nil, false
}
-func (p *astPruner) maybePruneOptional(elem ast.Expr) (ast.Expr, bool) {
- elemVal, found := p.value(elem.ID())
+func (p *astPruner) maybePruneOptional(elem *exprpb.Expr) (*exprpb.Expr, bool) {
+ elemVal, found := p.value(elem.GetId())
if found && elemVal.Type() == types.OptionalType {
opt := elemVal.(*types.Optional)
if !opt.HasValue() {
return nil, true
}
- if newElem, pruned := p.maybeCreateLiteral(elem.ID(), opt.GetValue()); pruned {
+ if newElem, pruned := p.maybeCreateLiteral(elem.GetId(), opt.GetValue()); pruned {
return newElem, true
}
}
return elem, false
}
-func (p *astPruner) maybePruneIn(node ast.Expr) (ast.Expr, bool) {
+func (p *astPruner) maybePruneIn(node *exprpb.Expr) (*exprpb.Expr, bool) {
// elem in list
- call := node.AsCall()
- val, exists := p.maybeValue(call.Args()[1].ID())
+ call := node.GetCallExpr()
+ val, exists := p.maybeValue(call.GetArgs()[1].GetId())
if !exists {
return nil, false
}
if sz, ok := val.(traits.Sizer); ok && sz.Size() == types.IntZero {
- return p.maybeCreateLiteral(node.ID(), types.False)
+ return p.maybeCreateLiteral(node.GetId(), types.False)
}
return nil, false
}
-func (p *astPruner) maybePruneLogicalNot(node ast.Expr) (ast.Expr, bool) {
- call := node.AsCall()
- arg := call.Args()[0]
- val, exists := p.maybeValue(arg.ID())
+func (p *astPruner) maybePruneLogicalNot(node *exprpb.Expr) (*exprpb.Expr, bool) {
+ call := node.GetCallExpr()
+ arg := call.GetArgs()[0]
+ val, exists := p.maybeValue(arg.GetId())
if !exists {
return nil, false
}
if b, ok := val.(types.Bool); ok {
- return p.maybeCreateLiteral(node.ID(), !b)
+ return p.maybeCreateLiteral(node.GetId(), !b)
}
return nil, false
}
-func (p *astPruner) maybePruneOr(node ast.Expr) (ast.Expr, bool) {
- call := node.AsCall()
+func (p *astPruner) maybePruneOr(node *exprpb.Expr) (*exprpb.Expr, bool) {
+ call := node.GetCallExpr()
// We know result is unknown, so we have at least one unknown arg
// and if one side is a known value, we know we can ignore it.
- if v, exists := p.maybeValue(call.Args()[0].ID()); exists {
+ if v, exists := p.maybeValue(call.GetArgs()[0].GetId()); exists {
if v == types.True {
- return p.maybeCreateLiteral(node.ID(), types.True)
+ return p.maybeCreateLiteral(node.GetId(), types.True)
}
- return call.Args()[1], true
+ return call.GetArgs()[1], true
}
- if v, exists := p.maybeValue(call.Args()[1].ID()); exists {
+ if v, exists := p.maybeValue(call.GetArgs()[1].GetId()); exists {
if v == types.True {
- return p.maybeCreateLiteral(node.ID(), types.True)
+ return p.maybeCreateLiteral(node.GetId(), types.True)
}
- return call.Args()[0], true
+ return call.GetArgs()[0], true
}
return nil, false
}
-func (p *astPruner) maybePruneAnd(node ast.Expr) (ast.Expr, bool) {
- call := node.AsCall()
+func (p *astPruner) maybePruneAnd(node *exprpb.Expr) (*exprpb.Expr, bool) {
+ call := node.GetCallExpr()
// We know result is unknown, so we have at least one unknown arg
// and if one side is a known value, we know we can ignore it.
- if v, exists := p.maybeValue(call.Args()[0].ID()); exists {
+ if v, exists := p.maybeValue(call.GetArgs()[0].GetId()); exists {
if v == types.False {
- return p.maybeCreateLiteral(node.ID(), types.False)
+ return p.maybeCreateLiteral(node.GetId(), types.False)
}
- return call.Args()[1], true
+ return call.GetArgs()[1], true
}
- if v, exists := p.maybeValue(call.Args()[1].ID()); exists {
+ if v, exists := p.maybeValue(call.GetArgs()[1].GetId()); exists {
if v == types.False {
- return p.maybeCreateLiteral(node.ID(), types.False)
+ return p.maybeCreateLiteral(node.GetId(), types.False)
}
- return call.Args()[0], true
+ return call.GetArgs()[0], true
}
return nil, false
}
-func (p *astPruner) maybePruneConditional(node ast.Expr) (ast.Expr, bool) {
- call := node.AsCall()
- cond, exists := p.maybeValue(call.Args()[0].ID())
+func (p *astPruner) maybePruneConditional(node *exprpb.Expr) (*exprpb.Expr, bool) {
+ call := node.GetCallExpr()
+ cond, exists := p.maybeValue(call.GetArgs()[0].GetId())
if !exists {
return nil, false
}
if cond.Value().(bool) {
- return call.Args()[1], true
+ return call.GetArgs()[1], true
}
- return call.Args()[2], true
+ return call.GetArgs()[2], true
}
-func (p *astPruner) maybePruneFunction(node ast.Expr) (ast.Expr, bool) {
- if _, exists := p.value(node.ID()); !exists {
+func (p *astPruner) maybePruneFunction(node *exprpb.Expr) (*exprpb.Expr, bool) {
+ if _, exists := p.value(node.GetId()); !exists {
return nil, false
}
- call := node.AsCall()
- if call.FunctionName() == operators.LogicalOr {
+ call := node.GetCallExpr()
+ if call.Function == operators.LogicalOr {
return p.maybePruneOr(node)
}
- if call.FunctionName() == operators.LogicalAnd {
+ if call.Function == operators.LogicalAnd {
return p.maybePruneAnd(node)
}
- if call.FunctionName() == operators.Conditional {
+ if call.Function == operators.Conditional {
return p.maybePruneConditional(node)
}
- if call.FunctionName() == operators.In {
+ if call.Function == operators.In {
return p.maybePruneIn(node)
}
- if call.FunctionName() == operators.LogicalNot {
+ if call.Function == operators.LogicalNot {
return p.maybePruneLogicalNot(node)
}
return nil, false
}
-func (p *astPruner) maybePrune(node ast.Expr) (ast.Expr, bool) {
+func (p *astPruner) maybePrune(node *exprpb.Expr) (*exprpb.Expr, bool) {
return p.prune(node)
}
-func (p *astPruner) prune(node ast.Expr) (ast.Expr, bool) {
+func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) {
if node == nil {
return node, false
}
- val, valueExists := p.maybeValue(node.ID())
+ val, valueExists := p.maybeValue(node.GetId())
if valueExists {
- if newNode, ok := p.maybeCreateLiteral(node.ID(), val); ok {
- delete(p.macroCalls, node.ID())
+ if newNode, ok := p.maybeCreateLiteral(node.GetId(), val); ok {
+ delete(p.macroCalls, node.GetId())
return newNode, true
}
}
- if macro, found := p.macroCalls[node.ID()]; found {
+ if macro, found := p.macroCalls[node.GetId()]; found {
// Ensure that intermediate values for the comprehension are cleared during pruning
- if node.Kind() == ast.ComprehensionKind {
- compre := node.AsComprehension()
- visit(macro, clearIterVarVisitor(compre.IterVar(), p.state))
+ compre := node.GetComprehensionExpr()
+ if compre != nil {
+ visit(macro, clearIterVarVisitor(compre.IterVar, p.state))
}
// prune the expression in terms of the macro call instead of the expanded form.
if newMacro, pruned := p.prune(macro); pruned {
- p.macroCalls[node.ID()] = newMacro
+ p.macroCalls[node.GetId()] = newMacro
}
}
// We have either an unknown/error value, or something we don't want to
// transform, or expression was not evaluated. If possible, drill down
// more.
- switch node.Kind() {
- case ast.SelectKind:
- sel := node.AsSelect()
- if operand, isPruned := p.maybePrune(sel.Operand()); isPruned {
- if sel.IsTestOnly() {
- return p.NewPresenceTest(node.ID(), operand, sel.FieldName()), true
- }
- return p.NewSelect(node.ID(), operand, sel.FieldName()), true
+ switch node.GetExprKind().(type) {
+ case *exprpb.Expr_SelectExpr:
+ if operand, pruned := p.maybePrune(node.GetSelectExpr().GetOperand()); pruned {
+ return &exprpb.Expr{
+ Id: node.GetId(),
+ ExprKind: &exprpb.Expr_SelectExpr{
+ SelectExpr: &exprpb.Expr_Select{
+ Operand: operand,
+ Field: node.GetSelectExpr().GetField(),
+ TestOnly: node.GetSelectExpr().GetTestOnly(),
+ },
+ },
+ }, true
}
- case ast.CallKind:
- argsPruned := false
- call := node.AsCall()
- args := call.Args()
- newArgs := make([]ast.Expr, len(args))
- for i, a := range args {
- newArgs[i] = a
- if arg, isPruned := p.maybePrune(a); isPruned {
- argsPruned = true
- newArgs[i] = arg
- }
+ case *exprpb.Expr_CallExpr:
+ var prunedCall bool
+ call := node.GetCallExpr()
+ args := call.GetArgs()
+ newArgs := make([]*exprpb.Expr, len(args))
+ newCall := &exprpb.Expr_Call{
+ Function: call.GetFunction(),
+ Target: call.GetTarget(),
+ Args: newArgs,
}
- if !call.IsMemberFunction() {
- newCall := p.NewCall(node.ID(), call.FunctionName(), newArgs...)
- if prunedCall, isPruned := p.maybePruneFunction(newCall); isPruned {
- return prunedCall, true
+ for i, arg := range args {
+ newArgs[i] = arg
+ if newArg, prunedArg := p.maybePrune(arg); prunedArg {
+ prunedCall = true
+ newArgs[i] = newArg
}
- return newCall, argsPruned
}
- newTarget := call.Target()
- targetPruned := false
- if prunedTarget, isPruned := p.maybePrune(call.Target()); isPruned {
- targetPruned = true
- newTarget = prunedTarget
+ if newTarget, prunedTarget := p.maybePrune(call.GetTarget()); prunedTarget {
+ prunedCall = true
+ newCall.Target = newTarget
+ }
+ newNode := &exprpb.Expr{
+ Id: node.GetId(),
+ ExprKind: &exprpb.Expr_CallExpr{
+ CallExpr: newCall,
+ },
}
- newCall := p.NewMemberCall(node.ID(), call.FunctionName(), newTarget, newArgs...)
- if prunedCall, isPruned := p.maybePruneFunction(newCall); isPruned {
- return prunedCall, true
+ if newExpr, pruned := p.maybePruneFunction(newNode); pruned {
+ newExpr, _ = p.maybePrune(newExpr)
+ return newExpr, true
}
- return newCall, targetPruned || argsPruned
- case ast.ListKind:
- l := node.AsList()
- elems := l.Elements()
- optIndices := l.OptionalIndices()
+ if prunedCall {
+ return newNode, true
+ }
+ case *exprpb.Expr_ListExpr:
+ elems := node.GetListExpr().GetElements()
+ optIndices := node.GetListExpr().GetOptionalIndices()
optIndexMap := map[int32]bool{}
for _, i := range optIndices {
optIndexMap[i] = true
}
newOptIndexMap := make(map[int32]bool, len(optIndexMap))
- newElems := make([]ast.Expr, 0, len(elems))
- var listPruned bool
+ newElems := make([]*exprpb.Expr, 0, len(elems))
+ var prunedList bool
+
prunedIdx := 0
for i, elem := range elems {
_, isOpt := optIndexMap[int32(i)]
if isOpt {
newElem, pruned := p.maybePruneOptional(elem)
if pruned {
- listPruned = true
+ prunedList = true
if newElem != nil {
newElems = append(newElems, newElem)
prunedIdx++
@@ -361,7 +431,7 @@ func (p *astPruner) prune(node ast.Expr) (ast.Expr, bool) {
}
if newElem, prunedElem := p.maybePrune(elem); prunedElem {
newElems = append(newElems, newElem)
- listPruned = true
+ prunedList = true
} else {
newElems = append(newElems, elem)
}
@@ -373,64 +443,76 @@ func (p *astPruner) prune(node ast.Expr) (ast.Expr, bool) {
optIndices[idx] = i
idx++
}
- if listPruned {
- return p.NewList(node.ID(), newElems, optIndices), true
+ if prunedList {
+ return &exprpb.Expr{
+ Id: node.GetId(),
+ ExprKind: &exprpb.Expr_ListExpr{
+ ListExpr: &exprpb.Expr_CreateList{
+ Elements: newElems,
+ OptionalIndices: optIndices,
+ },
+ },
+ }, true
}
- case ast.MapKind:
- var mapPruned bool
- m := node.AsMap()
- entries := m.Entries()
- newEntries := make([]ast.EntryExpr, len(entries))
+ case *exprpb.Expr_StructExpr:
+ var prunedStruct bool
+ entries := node.GetStructExpr().GetEntries()
+ messageType := node.GetStructExpr().GetMessageName()
+ newEntries := make([]*exprpb.Expr_CreateStruct_Entry, len(entries))
for i, entry := range entries {
newEntries[i] = entry
- e := entry.AsMapEntry()
- newKey, keyPruned := p.maybePrune(e.Key())
- newValue, valuePruned := p.maybePrune(e.Value())
- if !keyPruned && !valuePruned {
+ newKey, prunedKey := p.maybePrune(entry.GetMapKey())
+ newValue, prunedValue := p.maybePrune(entry.GetValue())
+ if !prunedKey && !prunedValue {
continue
}
- mapPruned = true
- newEntry := p.NewMapEntry(entry.ID(), newKey, newValue, e.IsOptional())
- newEntries[i] = newEntry
- }
- if mapPruned {
- return p.NewMap(node.ID(), newEntries), true
- }
- case ast.StructKind:
- var structPruned bool
- obj := node.AsStruct()
- fields := obj.Fields()
- newFields := make([]ast.EntryExpr, len(fields))
- for i, field := range fields {
- newFields[i] = field
- f := field.AsStructField()
- newValue, prunedValue := p.maybePrune(f.Value())
- if !prunedValue {
- continue
+ prunedStruct = true
+ newEntry := &exprpb.Expr_CreateStruct_Entry{
+ Value: newValue,
}
- structPruned = true
- newEntry := p.NewStructField(field.ID(), f.Name(), newValue, f.IsOptional())
- newFields[i] = newEntry
+ if messageType != "" {
+ newEntry.KeyKind = &exprpb.Expr_CreateStruct_Entry_FieldKey{
+ FieldKey: entry.GetFieldKey(),
+ }
+ } else {
+ newEntry.KeyKind = &exprpb.Expr_CreateStruct_Entry_MapKey{
+ MapKey: newKey,
+ }
+ }
+ newEntry.OptionalEntry = entry.GetOptionalEntry()
+ newEntries[i] = newEntry
}
- if structPruned {
- return p.NewStruct(node.ID(), obj.TypeName(), newFields), true
+ if prunedStruct {
+ return &exprpb.Expr{
+ Id: node.GetId(),
+ ExprKind: &exprpb.Expr_StructExpr{
+ StructExpr: &exprpb.Expr_CreateStruct{
+ MessageName: messageType,
+ Entries: newEntries,
+ },
+ },
+ }, true
}
- case ast.ComprehensionKind:
- compre := node.AsComprehension()
+ case *exprpb.Expr_ComprehensionExpr:
+ compre := node.GetComprehensionExpr()
// Only the range of the comprehension is pruned since the state tracking only records
// the last iteration of the comprehension and not each step in the evaluation which
// means that the any residuals computed in between might be inaccurate.
- if newRange, pruned := p.maybePrune(compre.IterRange()); pruned {
- return p.NewComprehension(
- node.ID(),
- newRange,
- compre.IterVar(),
- compre.AccuVar(),
- compre.AccuInit(),
- compre.LoopCondition(),
- compre.LoopStep(),
- compre.Result(),
- ), true
+ if newRange, pruned := p.maybePrune(compre.GetIterRange()); pruned {
+ return &exprpb.Expr{
+ Id: node.GetId(),
+ ExprKind: &exprpb.Expr_ComprehensionExpr{
+ ComprehensionExpr: &exprpb.Expr_Comprehension{
+ IterVar: compre.GetIterVar(),
+ IterRange: newRange,
+ AccuVar: compre.GetAccuVar(),
+ AccuInit: compre.GetAccuInit(),
+ LoopCondition: compre.GetLoopCondition(),
+ LoopStep: compre.GetLoopStep(),
+ Result: compre.GetResult(),
+ },
+ },
+ }, true
}
}
return node, false
@@ -457,12 +539,12 @@ func (p *astPruner) nextID() int64 {
type astVisitor struct {
// visitEntry is called on every expr node, including those within a map/struct entry.
- visitExpr func(expr ast.Expr)
+ visitExpr func(expr *exprpb.Expr)
// visitEntry is called before entering the key, value of a map/struct entry.
- visitEntry func(entry ast.EntryExpr)
+ visitEntry func(entry *exprpb.Expr_CreateStruct_Entry)
}
-func getMaxID(expr ast.Expr) int64 {
+func getMaxID(expr *exprpb.Expr) int64 {
maxID := int64(1)
visit(expr, maxIDVisitor(&maxID))
return maxID
@@ -470,9 +552,10 @@ func getMaxID(expr ast.Expr) int64 {
func clearIterVarVisitor(varName string, state EvalState) astVisitor {
return astVisitor{
- visitExpr: func(e ast.Expr) {
- if e.Kind() == ast.IdentKind && e.AsIdent() == varName {
- state.SetValue(e.ID(), nil)
+ visitExpr: func(e *exprpb.Expr) {
+ ident := e.GetIdentExpr()
+ if ident != nil && ident.GetName() == varName {
+ state.SetValue(e.GetId(), nil)
}
},
}
@@ -480,63 +563,56 @@ func clearIterVarVisitor(varName string, state EvalState) astVisitor {
func maxIDVisitor(maxID *int64) astVisitor {
return astVisitor{
- visitExpr: func(e ast.Expr) {
- if e.ID() >= *maxID {
- *maxID = e.ID() + 1
+ visitExpr: func(e *exprpb.Expr) {
+ if e.GetId() >= *maxID {
+ *maxID = e.GetId() + 1
}
},
- visitEntry: func(e ast.EntryExpr) {
- if e.ID() >= *maxID {
- *maxID = e.ID() + 1
+ visitEntry: func(e *exprpb.Expr_CreateStruct_Entry) {
+ if e.GetId() >= *maxID {
+ *maxID = e.GetId() + 1
}
},
}
}
-func visit(expr ast.Expr, visitor astVisitor) {
- exprs := []ast.Expr{expr}
+func visit(expr *exprpb.Expr, visitor astVisitor) {
+ exprs := []*exprpb.Expr{expr}
for len(exprs) != 0 {
e := exprs[0]
if visitor.visitExpr != nil {
visitor.visitExpr(e)
}
exprs = exprs[1:]
- switch e.Kind() {
- case ast.SelectKind:
- exprs = append(exprs, e.AsSelect().Operand())
- case ast.CallKind:
- call := e.AsCall()
- if call.Target() != nil {
- exprs = append(exprs, call.Target())
+ switch e.GetExprKind().(type) {
+ case *exprpb.Expr_SelectExpr:
+ exprs = append(exprs, e.GetSelectExpr().GetOperand())
+ case *exprpb.Expr_CallExpr:
+ call := e.GetCallExpr()
+ if call.GetTarget() != nil {
+ exprs = append(exprs, call.GetTarget())
}
- exprs = append(exprs, call.Args()...)
- case ast.ComprehensionKind:
- compre := e.AsComprehension()
+ exprs = append(exprs, call.GetArgs()...)
+ case *exprpb.Expr_ComprehensionExpr:
+ compre := e.GetComprehensionExpr()
exprs = append(exprs,
- compre.IterRange(),
- compre.AccuInit(),
- compre.LoopCondition(),
- compre.LoopStep(),
- compre.Result())
- case ast.ListKind:
- list := e.AsList()
- exprs = append(exprs, list.Elements()...)
- case ast.MapKind:
- for _, entry := range e.AsMap().Entries() {
- e := entry.AsMapEntry()
+ compre.GetIterRange(),
+ compre.GetAccuInit(),
+ compre.GetLoopCondition(),
+ compre.GetLoopStep(),
+ compre.GetResult())
+ case *exprpb.Expr_ListExpr:
+ list := e.GetListExpr()
+ exprs = append(exprs, list.GetElements()...)
+ case *exprpb.Expr_StructExpr:
+ for _, entry := range e.GetStructExpr().GetEntries() {
if visitor.visitEntry != nil {
visitor.visitEntry(entry)
}
- exprs = append(exprs, e.Key())
- exprs = append(exprs, e.Value())
- }
- case ast.StructKind:
- for _, entry := range e.AsStruct().Fields() {
- f := entry.AsStructField()
- if visitor.visitEntry != nil {
- visitor.visitEntry(entry)
+ if entry.GetMapKey() != nil {
+ exprs = append(exprs, entry.GetMapKey())
}
- exprs = append(exprs, f.Value())
+ exprs = append(exprs, entry.GetValue())
}
}
}
diff --git a/vendor/github.com/google/cel-go/parser/BUILD.bazel b/vendor/github.com/google/cel-go/parser/BUILD.bazel
index 97bc9bd43..67ecc9554 100644
--- a/vendor/github.com/google/cel-go/parser/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/parser/BUILD.bazel
@@ -20,13 +20,10 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//common:go_default_library",
- "//common/ast:go_default_library",
"//common/operators:go_default_library",
"//common/runes:go_default_library",
- "//common/types:go_default_library",
- "//common/types/ref:go_default_library",
"//parser/gen:go_default_library",
- "@com_github_antlr4_go_antlr_v4//:go_default_library",
+ "@com_github_antlr_antlr4_runtime_go_antlr_v4//:go_default_library",
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_google_protobuf//types/known/structpb:go_default_library",
@@ -46,12 +43,10 @@ go_test(
":go_default_library",
],
deps = [
- "//common/ast:go_default_library",
"//common/debug:go_default_library",
- "//common/types:go_default_library",
"//parser/gen:go_default_library",
"//test:go_default_library",
- "@com_github_antlr4_go_antlr_v4//:go_default_library",
+ "@com_github_antlr_antlr4_runtime_go_antlr_v4//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_google_protobuf//testing/protocmp:go_default_library",
],
diff --git a/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel b/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel
index e70433483..654d1de7a 100644
--- a/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel
@@ -21,6 +21,6 @@ go_library(
],
importpath = "github.com/google/cel-go/parser/gen",
deps = [
- "@com_github_antlr4_go_antlr_v4//:go_default_library",
+ "@com_github_antlr_antlr4_runtime_go_antlr_v4//:go_default_library",
],
)
diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go b/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go
index c49d03867..0247f470a 100644
--- a/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go
+++ b/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go
@@ -1,7 +1,7 @@
-// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT.
+// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT.
package gen // CEL
-import "github.com/antlr4-go/antlr/v4"
+import "github.com/antlr/antlr4/runtime/Go/antlr/v4"
// BaseCELListener is a complete listener for a parse tree produced by CELParser.
type BaseCELListener struct{}
diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go b/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go
index b2c0783d3..52a7f4dc5 100644
--- a/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go
+++ b/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go
@@ -1,8 +1,7 @@
-// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT.
+// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT.
package gen // CEL
-import "github.com/antlr4-go/antlr/v4"
-
+import "github.com/antlr/antlr4/runtime/Go/antlr/v4"
type BaseCELVisitor struct {
*antlr.BaseParseTreeVisitor
diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go b/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go
index e026cc46f..98ddc06d0 100644
--- a/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go
+++ b/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go
@@ -1,278 +1,280 @@
-// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT.
+// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT.
package gen
+
import (
"fmt"
- "sync"
+ "sync"
"unicode"
- "github.com/antlr4-go/antlr/v4"
+
+ "github.com/antlr/antlr4/runtime/Go/antlr/v4"
)
+
// Suppress unused import error
var _ = fmt.Printf
var _ = sync.Once{}
var _ = unicode.IsLetter
-
type CELLexer struct {
*antlr.BaseLexer
channelNames []string
- modeNames []string
+ modeNames []string
// TODO: EOF string
}
-var CELLexerLexerStaticData struct {
- once sync.Once
- serializedATN []int32
- ChannelNames []string
- ModeNames []string
- LiteralNames []string
- SymbolicNames []string
- RuleNames []string
- PredictionContextCache *antlr.PredictionContextCache
- atn *antlr.ATN
- decisionToDFA []*antlr.DFA
+var cellexerLexerStaticData struct {
+ once sync.Once
+ serializedATN []int32
+ channelNames []string
+ modeNames []string
+ literalNames []string
+ symbolicNames []string
+ ruleNames []string
+ predictionContextCache *antlr.PredictionContextCache
+ atn *antlr.ATN
+ decisionToDFA []*antlr.DFA
}
func cellexerLexerInit() {
- staticData := &CELLexerLexerStaticData
- staticData.ChannelNames = []string{
- "DEFAULT_TOKEN_CHANNEL", "HIDDEN",
- }
- staticData.ModeNames = []string{
- "DEFAULT_MODE",
- }
- staticData.LiteralNames = []string{
- "", "'=='", "'!='", "'in'", "'<'", "'<='", "'>='", "'>'", "'&&'", "'||'",
- "'['", "']'", "'{'", "'}'", "'('", "')'", "'.'", "','", "'-'", "'!'",
- "'?'", "':'", "'+'", "'*'", "'/'", "'%'", "'true'", "'false'", "'null'",
- }
- staticData.SymbolicNames = []string{
- "", "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS",
- "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE",
- "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK",
- "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE",
- "NUL", "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT",
- "STRING", "BYTES", "IDENTIFIER",
- }
- staticData.RuleNames = []string{
- "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS",
- "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE",
- "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK",
- "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE",
- "NUL", "BACKSLASH", "LETTER", "DIGIT", "EXPONENT", "HEXDIGIT", "RAW",
- "ESC_SEQ", "ESC_CHAR_SEQ", "ESC_OCT_SEQ", "ESC_BYTE_SEQ", "ESC_UNI_SEQ",
- "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", "STRING",
- "BYTES", "IDENTIFIER",
- }
- staticData.PredictionContextCache = antlr.NewPredictionContextCache()
- staticData.serializedATN = []int32{
- 4, 0, 36, 423, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2,
- 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2,
- 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15,
- 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7,
- 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25,
- 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2,
- 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36,
- 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7,
- 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46,
- 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4,
- 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8,
- 1, 8, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 1, 13,
- 1, 14, 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 18, 1, 18, 1,
- 19, 1, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 22, 1, 22, 1, 23, 1, 23, 1, 24,
- 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1,
- 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 29, 1, 29,
- 1, 30, 1, 30, 1, 31, 1, 31, 3, 31, 177, 8, 31, 1, 31, 4, 31, 180, 8, 31,
- 11, 31, 12, 31, 181, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1,
- 34, 3, 34, 192, 8, 34, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 1, 36,
- 1, 36, 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 1,
- 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38,
- 1, 38, 1, 38, 1, 38, 3, 38, 225, 8, 38, 1, 39, 4, 39, 228, 8, 39, 11, 39,
- 12, 39, 229, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 40, 5, 40, 238, 8, 40,
- 10, 40, 12, 40, 241, 9, 40, 1, 40, 1, 40, 1, 41, 4, 41, 246, 8, 41, 11,
- 41, 12, 41, 247, 1, 41, 1, 41, 4, 41, 252, 8, 41, 11, 41, 12, 41, 253,
- 1, 41, 3, 41, 257, 8, 41, 1, 41, 4, 41, 260, 8, 41, 11, 41, 12, 41, 261,
- 1, 41, 1, 41, 1, 41, 1, 41, 4, 41, 268, 8, 41, 11, 41, 12, 41, 269, 1,
- 41, 3, 41, 273, 8, 41, 3, 41, 275, 8, 41, 1, 42, 4, 42, 278, 8, 42, 11,
- 42, 12, 42, 279, 1, 42, 1, 42, 1, 42, 1, 42, 4, 42, 286, 8, 42, 11, 42,
- 12, 42, 287, 3, 42, 290, 8, 42, 1, 43, 4, 43, 293, 8, 43, 11, 43, 12, 43,
- 294, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 4, 43, 303, 8, 43, 11, 43,
- 12, 43, 304, 1, 43, 1, 43, 3, 43, 309, 8, 43, 1, 44, 1, 44, 1, 44, 5, 44,
- 314, 8, 44, 10, 44, 12, 44, 317, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5,
- 44, 323, 8, 44, 10, 44, 12, 44, 326, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44,
- 1, 44, 1, 44, 1, 44, 5, 44, 335, 8, 44, 10, 44, 12, 44, 338, 9, 44, 1,
- 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 349,
- 8, 44, 10, 44, 12, 44, 352, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1,
- 44, 5, 44, 360, 8, 44, 10, 44, 12, 44, 363, 9, 44, 1, 44, 1, 44, 1, 44,
- 1, 44, 1, 44, 5, 44, 370, 8, 44, 10, 44, 12, 44, 373, 9, 44, 1, 44, 1,
- 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 383, 8, 44, 10, 44,
- 12, 44, 386, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1,
- 44, 1, 44, 1, 44, 5, 44, 398, 8, 44, 10, 44, 12, 44, 401, 9, 44, 1, 44,
- 1, 44, 1, 44, 1, 44, 3, 44, 407, 8, 44, 1, 45, 1, 45, 1, 45, 1, 46, 1,
- 46, 3, 46, 414, 8, 46, 1, 46, 1, 46, 1, 46, 5, 46, 419, 8, 46, 10, 46,
- 12, 46, 422, 9, 46, 4, 336, 350, 384, 399, 0, 47, 1, 1, 3, 2, 5, 3, 7,
- 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27,
- 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45,
- 23, 47, 24, 49, 25, 51, 26, 53, 27, 55, 28, 57, 0, 59, 0, 61, 0, 63, 0,
- 65, 0, 67, 0, 69, 0, 71, 0, 73, 0, 75, 0, 77, 0, 79, 29, 81, 30, 83, 31,
- 85, 32, 87, 33, 89, 34, 91, 35, 93, 36, 1, 0, 16, 2, 0, 65, 90, 97, 122,
- 2, 0, 69, 69, 101, 101, 2, 0, 43, 43, 45, 45, 3, 0, 48, 57, 65, 70, 97,
- 102, 2, 0, 82, 82, 114, 114, 10, 0, 34, 34, 39, 39, 63, 63, 92, 92, 96,
- 98, 102, 102, 110, 110, 114, 114, 116, 116, 118, 118, 2, 0, 88, 88, 120,
- 120, 3, 0, 9, 10, 12, 13, 32, 32, 1, 0, 10, 10, 2, 0, 85, 85, 117, 117,
- 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 4, 0, 10, 10, 13, 13, 39, 39, 92,
- 92, 1, 0, 92, 92, 3, 0, 10, 10, 13, 13, 34, 34, 3, 0, 10, 10, 13, 13, 39,
- 39, 2, 0, 66, 66, 98, 98, 456, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5,
- 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13,
- 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0,
- 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0,
- 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0,
- 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0,
- 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1,
- 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 79, 1, 0, 0, 0, 0, 81,
- 1, 0, 0, 0, 0, 83, 1, 0, 0, 0, 0, 85, 1, 0, 0, 0, 0, 87, 1, 0, 0, 0, 0,
- 89, 1, 0, 0, 0, 0, 91, 1, 0, 0, 0, 0, 93, 1, 0, 0, 0, 1, 95, 1, 0, 0, 0,
- 3, 98, 1, 0, 0, 0, 5, 101, 1, 0, 0, 0, 7, 104, 1, 0, 0, 0, 9, 106, 1, 0,
- 0, 0, 11, 109, 1, 0, 0, 0, 13, 112, 1, 0, 0, 0, 15, 114, 1, 0, 0, 0, 17,
- 117, 1, 0, 0, 0, 19, 120, 1, 0, 0, 0, 21, 122, 1, 0, 0, 0, 23, 124, 1,
- 0, 0, 0, 25, 126, 1, 0, 0, 0, 27, 128, 1, 0, 0, 0, 29, 130, 1, 0, 0, 0,
- 31, 132, 1, 0, 0, 0, 33, 134, 1, 0, 0, 0, 35, 136, 1, 0, 0, 0, 37, 138,
- 1, 0, 0, 0, 39, 140, 1, 0, 0, 0, 41, 142, 1, 0, 0, 0, 43, 144, 1, 0, 0,
- 0, 45, 146, 1, 0, 0, 0, 47, 148, 1, 0, 0, 0, 49, 150, 1, 0, 0, 0, 51, 152,
- 1, 0, 0, 0, 53, 157, 1, 0, 0, 0, 55, 163, 1, 0, 0, 0, 57, 168, 1, 0, 0,
- 0, 59, 170, 1, 0, 0, 0, 61, 172, 1, 0, 0, 0, 63, 174, 1, 0, 0, 0, 65, 183,
- 1, 0, 0, 0, 67, 185, 1, 0, 0, 0, 69, 191, 1, 0, 0, 0, 71, 193, 1, 0, 0,
- 0, 73, 196, 1, 0, 0, 0, 75, 201, 1, 0, 0, 0, 77, 224, 1, 0, 0, 0, 79, 227,
- 1, 0, 0, 0, 81, 233, 1, 0, 0, 0, 83, 274, 1, 0, 0, 0, 85, 289, 1, 0, 0,
- 0, 87, 308, 1, 0, 0, 0, 89, 406, 1, 0, 0, 0, 91, 408, 1, 0, 0, 0, 93, 413,
- 1, 0, 0, 0, 95, 96, 5, 61, 0, 0, 96, 97, 5, 61, 0, 0, 97, 2, 1, 0, 0, 0,
- 98, 99, 5, 33, 0, 0, 99, 100, 5, 61, 0, 0, 100, 4, 1, 0, 0, 0, 101, 102,
- 5, 105, 0, 0, 102, 103, 5, 110, 0, 0, 103, 6, 1, 0, 0, 0, 104, 105, 5,
- 60, 0, 0, 105, 8, 1, 0, 0, 0, 106, 107, 5, 60, 0, 0, 107, 108, 5, 61, 0,
- 0, 108, 10, 1, 0, 0, 0, 109, 110, 5, 62, 0, 0, 110, 111, 5, 61, 0, 0, 111,
- 12, 1, 0, 0, 0, 112, 113, 5, 62, 0, 0, 113, 14, 1, 0, 0, 0, 114, 115, 5,
- 38, 0, 0, 115, 116, 5, 38, 0, 0, 116, 16, 1, 0, 0, 0, 117, 118, 5, 124,
- 0, 0, 118, 119, 5, 124, 0, 0, 119, 18, 1, 0, 0, 0, 120, 121, 5, 91, 0,
- 0, 121, 20, 1, 0, 0, 0, 122, 123, 5, 93, 0, 0, 123, 22, 1, 0, 0, 0, 124,
- 125, 5, 123, 0, 0, 125, 24, 1, 0, 0, 0, 126, 127, 5, 125, 0, 0, 127, 26,
- 1, 0, 0, 0, 128, 129, 5, 40, 0, 0, 129, 28, 1, 0, 0, 0, 130, 131, 5, 41,
- 0, 0, 131, 30, 1, 0, 0, 0, 132, 133, 5, 46, 0, 0, 133, 32, 1, 0, 0, 0,
- 134, 135, 5, 44, 0, 0, 135, 34, 1, 0, 0, 0, 136, 137, 5, 45, 0, 0, 137,
- 36, 1, 0, 0, 0, 138, 139, 5, 33, 0, 0, 139, 38, 1, 0, 0, 0, 140, 141, 5,
- 63, 0, 0, 141, 40, 1, 0, 0, 0, 142, 143, 5, 58, 0, 0, 143, 42, 1, 0, 0,
- 0, 144, 145, 5, 43, 0, 0, 145, 44, 1, 0, 0, 0, 146, 147, 5, 42, 0, 0, 147,
- 46, 1, 0, 0, 0, 148, 149, 5, 47, 0, 0, 149, 48, 1, 0, 0, 0, 150, 151, 5,
- 37, 0, 0, 151, 50, 1, 0, 0, 0, 152, 153, 5, 116, 0, 0, 153, 154, 5, 114,
- 0, 0, 154, 155, 5, 117, 0, 0, 155, 156, 5, 101, 0, 0, 156, 52, 1, 0, 0,
- 0, 157, 158, 5, 102, 0, 0, 158, 159, 5, 97, 0, 0, 159, 160, 5, 108, 0,
- 0, 160, 161, 5, 115, 0, 0, 161, 162, 5, 101, 0, 0, 162, 54, 1, 0, 0, 0,
- 163, 164, 5, 110, 0, 0, 164, 165, 5, 117, 0, 0, 165, 166, 5, 108, 0, 0,
- 166, 167, 5, 108, 0, 0, 167, 56, 1, 0, 0, 0, 168, 169, 5, 92, 0, 0, 169,
- 58, 1, 0, 0, 0, 170, 171, 7, 0, 0, 0, 171, 60, 1, 0, 0, 0, 172, 173, 2,
- 48, 57, 0, 173, 62, 1, 0, 0, 0, 174, 176, 7, 1, 0, 0, 175, 177, 7, 2, 0,
- 0, 176, 175, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 179, 1, 0, 0, 0, 178,
- 180, 3, 61, 30, 0, 179, 178, 1, 0, 0, 0, 180, 181, 1, 0, 0, 0, 181, 179,
- 1, 0, 0, 0, 181, 182, 1, 0, 0, 0, 182, 64, 1, 0, 0, 0, 183, 184, 7, 3,
- 0, 0, 184, 66, 1, 0, 0, 0, 185, 186, 7, 4, 0, 0, 186, 68, 1, 0, 0, 0, 187,
- 192, 3, 71, 35, 0, 188, 192, 3, 75, 37, 0, 189, 192, 3, 77, 38, 0, 190,
- 192, 3, 73, 36, 0, 191, 187, 1, 0, 0, 0, 191, 188, 1, 0, 0, 0, 191, 189,
- 1, 0, 0, 0, 191, 190, 1, 0, 0, 0, 192, 70, 1, 0, 0, 0, 193, 194, 3, 57,
- 28, 0, 194, 195, 7, 5, 0, 0, 195, 72, 1, 0, 0, 0, 196, 197, 3, 57, 28,
- 0, 197, 198, 2, 48, 51, 0, 198, 199, 2, 48, 55, 0, 199, 200, 2, 48, 55,
- 0, 200, 74, 1, 0, 0, 0, 201, 202, 3, 57, 28, 0, 202, 203, 7, 6, 0, 0, 203,
- 204, 3, 65, 32, 0, 204, 205, 3, 65, 32, 0, 205, 76, 1, 0, 0, 0, 206, 207,
- 3, 57, 28, 0, 207, 208, 5, 117, 0, 0, 208, 209, 3, 65, 32, 0, 209, 210,
- 3, 65, 32, 0, 210, 211, 3, 65, 32, 0, 211, 212, 3, 65, 32, 0, 212, 225,
- 1, 0, 0, 0, 213, 214, 3, 57, 28, 0, 214, 215, 5, 85, 0, 0, 215, 216, 3,
- 65, 32, 0, 216, 217, 3, 65, 32, 0, 217, 218, 3, 65, 32, 0, 218, 219, 3,
- 65, 32, 0, 219, 220, 3, 65, 32, 0, 220, 221, 3, 65, 32, 0, 221, 222, 3,
- 65, 32, 0, 222, 223, 3, 65, 32, 0, 223, 225, 1, 0, 0, 0, 224, 206, 1, 0,
- 0, 0, 224, 213, 1, 0, 0, 0, 225, 78, 1, 0, 0, 0, 226, 228, 7, 7, 0, 0,
- 227, 226, 1, 0, 0, 0, 228, 229, 1, 0, 0, 0, 229, 227, 1, 0, 0, 0, 229,
- 230, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 232, 6, 39, 0, 0, 232, 80,
- 1, 0, 0, 0, 233, 234, 5, 47, 0, 0, 234, 235, 5, 47, 0, 0, 235, 239, 1,
- 0, 0, 0, 236, 238, 8, 8, 0, 0, 237, 236, 1, 0, 0, 0, 238, 241, 1, 0, 0,
- 0, 239, 237, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0, 240, 242, 1, 0, 0, 0, 241,
- 239, 1, 0, 0, 0, 242, 243, 6, 40, 0, 0, 243, 82, 1, 0, 0, 0, 244, 246,
- 3, 61, 30, 0, 245, 244, 1, 0, 0, 0, 246, 247, 1, 0, 0, 0, 247, 245, 1,
- 0, 0, 0, 247, 248, 1, 0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 251, 5, 46, 0,
- 0, 250, 252, 3, 61, 30, 0, 251, 250, 1, 0, 0, 0, 252, 253, 1, 0, 0, 0,
- 253, 251, 1, 0, 0, 0, 253, 254, 1, 0, 0, 0, 254, 256, 1, 0, 0, 0, 255,
- 257, 3, 63, 31, 0, 256, 255, 1, 0, 0, 0, 256, 257, 1, 0, 0, 0, 257, 275,
- 1, 0, 0, 0, 258, 260, 3, 61, 30, 0, 259, 258, 1, 0, 0, 0, 260, 261, 1,
- 0, 0, 0, 261, 259, 1, 0, 0, 0, 261, 262, 1, 0, 0, 0, 262, 263, 1, 0, 0,
- 0, 263, 264, 3, 63, 31, 0, 264, 275, 1, 0, 0, 0, 265, 267, 5, 46, 0, 0,
- 266, 268, 3, 61, 30, 0, 267, 266, 1, 0, 0, 0, 268, 269, 1, 0, 0, 0, 269,
- 267, 1, 0, 0, 0, 269, 270, 1, 0, 0, 0, 270, 272, 1, 0, 0, 0, 271, 273,
- 3, 63, 31, 0, 272, 271, 1, 0, 0, 0, 272, 273, 1, 0, 0, 0, 273, 275, 1,
- 0, 0, 0, 274, 245, 1, 0, 0, 0, 274, 259, 1, 0, 0, 0, 274, 265, 1, 0, 0,
- 0, 275, 84, 1, 0, 0, 0, 276, 278, 3, 61, 30, 0, 277, 276, 1, 0, 0, 0, 278,
- 279, 1, 0, 0, 0, 279, 277, 1, 0, 0, 0, 279, 280, 1, 0, 0, 0, 280, 290,
- 1, 0, 0, 0, 281, 282, 5, 48, 0, 0, 282, 283, 5, 120, 0, 0, 283, 285, 1,
- 0, 0, 0, 284, 286, 3, 65, 32, 0, 285, 284, 1, 0, 0, 0, 286, 287, 1, 0,
- 0, 0, 287, 285, 1, 0, 0, 0, 287, 288, 1, 0, 0, 0, 288, 290, 1, 0, 0, 0,
- 289, 277, 1, 0, 0, 0, 289, 281, 1, 0, 0, 0, 290, 86, 1, 0, 0, 0, 291, 293,
- 3, 61, 30, 0, 292, 291, 1, 0, 0, 0, 293, 294, 1, 0, 0, 0, 294, 292, 1,
- 0, 0, 0, 294, 295, 1, 0, 0, 0, 295, 296, 1, 0, 0, 0, 296, 297, 7, 9, 0,
- 0, 297, 309, 1, 0, 0, 0, 298, 299, 5, 48, 0, 0, 299, 300, 5, 120, 0, 0,
- 300, 302, 1, 0, 0, 0, 301, 303, 3, 65, 32, 0, 302, 301, 1, 0, 0, 0, 303,
- 304, 1, 0, 0, 0, 304, 302, 1, 0, 0, 0, 304, 305, 1, 0, 0, 0, 305, 306,
- 1, 0, 0, 0, 306, 307, 7, 9, 0, 0, 307, 309, 1, 0, 0, 0, 308, 292, 1, 0,
- 0, 0, 308, 298, 1, 0, 0, 0, 309, 88, 1, 0, 0, 0, 310, 315, 5, 34, 0, 0,
- 311, 314, 3, 69, 34, 0, 312, 314, 8, 10, 0, 0, 313, 311, 1, 0, 0, 0, 313,
- 312, 1, 0, 0, 0, 314, 317, 1, 0, 0, 0, 315, 313, 1, 0, 0, 0, 315, 316,
- 1, 0, 0, 0, 316, 318, 1, 0, 0, 0, 317, 315, 1, 0, 0, 0, 318, 407, 5, 34,
- 0, 0, 319, 324, 5, 39, 0, 0, 320, 323, 3, 69, 34, 0, 321, 323, 8, 11, 0,
- 0, 322, 320, 1, 0, 0, 0, 322, 321, 1, 0, 0, 0, 323, 326, 1, 0, 0, 0, 324,
- 322, 1, 0, 0, 0, 324, 325, 1, 0, 0, 0, 325, 327, 1, 0, 0, 0, 326, 324,
- 1, 0, 0, 0, 327, 407, 5, 39, 0, 0, 328, 329, 5, 34, 0, 0, 329, 330, 5,
- 34, 0, 0, 330, 331, 5, 34, 0, 0, 331, 336, 1, 0, 0, 0, 332, 335, 3, 69,
- 34, 0, 333, 335, 8, 12, 0, 0, 334, 332, 1, 0, 0, 0, 334, 333, 1, 0, 0,
- 0, 335, 338, 1, 0, 0, 0, 336, 337, 1, 0, 0, 0, 336, 334, 1, 0, 0, 0, 337,
- 339, 1, 0, 0, 0, 338, 336, 1, 0, 0, 0, 339, 340, 5, 34, 0, 0, 340, 341,
- 5, 34, 0, 0, 341, 407, 5, 34, 0, 0, 342, 343, 5, 39, 0, 0, 343, 344, 5,
- 39, 0, 0, 344, 345, 5, 39, 0, 0, 345, 350, 1, 0, 0, 0, 346, 349, 3, 69,
- 34, 0, 347, 349, 8, 12, 0, 0, 348, 346, 1, 0, 0, 0, 348, 347, 1, 0, 0,
- 0, 349, 352, 1, 0, 0, 0, 350, 351, 1, 0, 0, 0, 350, 348, 1, 0, 0, 0, 351,
- 353, 1, 0, 0, 0, 352, 350, 1, 0, 0, 0, 353, 354, 5, 39, 0, 0, 354, 355,
- 5, 39, 0, 0, 355, 407, 5, 39, 0, 0, 356, 357, 3, 67, 33, 0, 357, 361, 5,
- 34, 0, 0, 358, 360, 8, 13, 0, 0, 359, 358, 1, 0, 0, 0, 360, 363, 1, 0,
- 0, 0, 361, 359, 1, 0, 0, 0, 361, 362, 1, 0, 0, 0, 362, 364, 1, 0, 0, 0,
- 363, 361, 1, 0, 0, 0, 364, 365, 5, 34, 0, 0, 365, 407, 1, 0, 0, 0, 366,
- 367, 3, 67, 33, 0, 367, 371, 5, 39, 0, 0, 368, 370, 8, 14, 0, 0, 369, 368,
- 1, 0, 0, 0, 370, 373, 1, 0, 0, 0, 371, 369, 1, 0, 0, 0, 371, 372, 1, 0,
- 0, 0, 372, 374, 1, 0, 0, 0, 373, 371, 1, 0, 0, 0, 374, 375, 5, 39, 0, 0,
- 375, 407, 1, 0, 0, 0, 376, 377, 3, 67, 33, 0, 377, 378, 5, 34, 0, 0, 378,
- 379, 5, 34, 0, 0, 379, 380, 5, 34, 0, 0, 380, 384, 1, 0, 0, 0, 381, 383,
- 9, 0, 0, 0, 382, 381, 1, 0, 0, 0, 383, 386, 1, 0, 0, 0, 384, 385, 1, 0,
- 0, 0, 384, 382, 1, 0, 0, 0, 385, 387, 1, 0, 0, 0, 386, 384, 1, 0, 0, 0,
- 387, 388, 5, 34, 0, 0, 388, 389, 5, 34, 0, 0, 389, 390, 5, 34, 0, 0, 390,
- 407, 1, 0, 0, 0, 391, 392, 3, 67, 33, 0, 392, 393, 5, 39, 0, 0, 393, 394,
- 5, 39, 0, 0, 394, 395, 5, 39, 0, 0, 395, 399, 1, 0, 0, 0, 396, 398, 9,
- 0, 0, 0, 397, 396, 1, 0, 0, 0, 398, 401, 1, 0, 0, 0, 399, 400, 1, 0, 0,
- 0, 399, 397, 1, 0, 0, 0, 400, 402, 1, 0, 0, 0, 401, 399, 1, 0, 0, 0, 402,
- 403, 5, 39, 0, 0, 403, 404, 5, 39, 0, 0, 404, 405, 5, 39, 0, 0, 405, 407,
- 1, 0, 0, 0, 406, 310, 1, 0, 0, 0, 406, 319, 1, 0, 0, 0, 406, 328, 1, 0,
- 0, 0, 406, 342, 1, 0, 0, 0, 406, 356, 1, 0, 0, 0, 406, 366, 1, 0, 0, 0,
- 406, 376, 1, 0, 0, 0, 406, 391, 1, 0, 0, 0, 407, 90, 1, 0, 0, 0, 408, 409,
- 7, 15, 0, 0, 409, 410, 3, 89, 44, 0, 410, 92, 1, 0, 0, 0, 411, 414, 3,
- 59, 29, 0, 412, 414, 5, 95, 0, 0, 413, 411, 1, 0, 0, 0, 413, 412, 1, 0,
- 0, 0, 414, 420, 1, 0, 0, 0, 415, 419, 3, 59, 29, 0, 416, 419, 3, 61, 30,
- 0, 417, 419, 5, 95, 0, 0, 418, 415, 1, 0, 0, 0, 418, 416, 1, 0, 0, 0, 418,
- 417, 1, 0, 0, 0, 419, 422, 1, 0, 0, 0, 420, 418, 1, 0, 0, 0, 420, 421,
- 1, 0, 0, 0, 421, 94, 1, 0, 0, 0, 422, 420, 1, 0, 0, 0, 36, 0, 176, 181,
- 191, 224, 229, 239, 247, 253, 256, 261, 269, 272, 274, 279, 287, 289, 294,
- 304, 308, 313, 315, 322, 324, 334, 336, 348, 350, 361, 371, 384, 399, 406,
- 413, 418, 420, 1, 0, 1, 0,
-}
- deserializer := antlr.NewATNDeserializer(nil)
- staticData.atn = deserializer.Deserialize(staticData.serializedATN)
- atn := staticData.atn
- staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState))
- decisionToDFA := staticData.decisionToDFA
- for index, state := range atn.DecisionToState {
- decisionToDFA[index] = antlr.NewDFA(state, index)
- }
+ staticData := &cellexerLexerStaticData
+ staticData.channelNames = []string{
+ "DEFAULT_TOKEN_CHANNEL", "HIDDEN",
+ }
+ staticData.modeNames = []string{
+ "DEFAULT_MODE",
+ }
+ staticData.literalNames = []string{
+ "", "'=='", "'!='", "'in'", "'<'", "'<='", "'>='", "'>'", "'&&'", "'||'",
+ "'['", "']'", "'{'", "'}'", "'('", "')'", "'.'", "','", "'-'", "'!'",
+ "'?'", "':'", "'+'", "'*'", "'/'", "'%'", "'true'", "'false'", "'null'",
+ }
+ staticData.symbolicNames = []string{
+ "", "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS",
+ "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE",
+ "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK",
+ "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE",
+ "NUL", "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT",
+ "STRING", "BYTES", "IDENTIFIER",
+ }
+ staticData.ruleNames = []string{
+ "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS",
+ "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE",
+ "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK",
+ "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE",
+ "NUL", "BACKSLASH", "LETTER", "DIGIT", "EXPONENT", "HEXDIGIT", "RAW",
+ "ESC_SEQ", "ESC_CHAR_SEQ", "ESC_OCT_SEQ", "ESC_BYTE_SEQ", "ESC_UNI_SEQ",
+ "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", "STRING",
+ "BYTES", "IDENTIFIER",
+ }
+ staticData.predictionContextCache = antlr.NewPredictionContextCache()
+ staticData.serializedATN = []int32{
+ 4, 0, 36, 423, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2,
+ 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2,
+ 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15,
+ 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7,
+ 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25,
+ 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2,
+ 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36,
+ 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7,
+ 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46,
+ 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4,
+ 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8,
+ 1, 8, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 1, 13,
+ 1, 14, 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 18, 1, 18, 1,
+ 19, 1, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 22, 1, 22, 1, 23, 1, 23, 1, 24,
+ 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1,
+ 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 29, 1, 29,
+ 1, 30, 1, 30, 1, 31, 1, 31, 3, 31, 177, 8, 31, 1, 31, 4, 31, 180, 8, 31,
+ 11, 31, 12, 31, 181, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1,
+ 34, 3, 34, 192, 8, 34, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 1, 36,
+ 1, 36, 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 1,
+ 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38,
+ 1, 38, 1, 38, 1, 38, 3, 38, 225, 8, 38, 1, 39, 4, 39, 228, 8, 39, 11, 39,
+ 12, 39, 229, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 40, 5, 40, 238, 8, 40,
+ 10, 40, 12, 40, 241, 9, 40, 1, 40, 1, 40, 1, 41, 4, 41, 246, 8, 41, 11,
+ 41, 12, 41, 247, 1, 41, 1, 41, 4, 41, 252, 8, 41, 11, 41, 12, 41, 253,
+ 1, 41, 3, 41, 257, 8, 41, 1, 41, 4, 41, 260, 8, 41, 11, 41, 12, 41, 261,
+ 1, 41, 1, 41, 1, 41, 1, 41, 4, 41, 268, 8, 41, 11, 41, 12, 41, 269, 1,
+ 41, 3, 41, 273, 8, 41, 3, 41, 275, 8, 41, 1, 42, 4, 42, 278, 8, 42, 11,
+ 42, 12, 42, 279, 1, 42, 1, 42, 1, 42, 1, 42, 4, 42, 286, 8, 42, 11, 42,
+ 12, 42, 287, 3, 42, 290, 8, 42, 1, 43, 4, 43, 293, 8, 43, 11, 43, 12, 43,
+ 294, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 4, 43, 303, 8, 43, 11, 43,
+ 12, 43, 304, 1, 43, 1, 43, 3, 43, 309, 8, 43, 1, 44, 1, 44, 1, 44, 5, 44,
+ 314, 8, 44, 10, 44, 12, 44, 317, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5,
+ 44, 323, 8, 44, 10, 44, 12, 44, 326, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44,
+ 1, 44, 1, 44, 1, 44, 5, 44, 335, 8, 44, 10, 44, 12, 44, 338, 9, 44, 1,
+ 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 349,
+ 8, 44, 10, 44, 12, 44, 352, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1,
+ 44, 5, 44, 360, 8, 44, 10, 44, 12, 44, 363, 9, 44, 1, 44, 1, 44, 1, 44,
+ 1, 44, 1, 44, 5, 44, 370, 8, 44, 10, 44, 12, 44, 373, 9, 44, 1, 44, 1,
+ 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 383, 8, 44, 10, 44,
+ 12, 44, 386, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1,
+ 44, 1, 44, 1, 44, 5, 44, 398, 8, 44, 10, 44, 12, 44, 401, 9, 44, 1, 44,
+ 1, 44, 1, 44, 1, 44, 3, 44, 407, 8, 44, 1, 45, 1, 45, 1, 45, 1, 46, 1,
+ 46, 3, 46, 414, 8, 46, 1, 46, 1, 46, 1, 46, 5, 46, 419, 8, 46, 10, 46,
+ 12, 46, 422, 9, 46, 4, 336, 350, 384, 399, 0, 47, 1, 1, 3, 2, 5, 3, 7,
+ 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27,
+ 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45,
+ 23, 47, 24, 49, 25, 51, 26, 53, 27, 55, 28, 57, 0, 59, 0, 61, 0, 63, 0,
+ 65, 0, 67, 0, 69, 0, 71, 0, 73, 0, 75, 0, 77, 0, 79, 29, 81, 30, 83, 31,
+ 85, 32, 87, 33, 89, 34, 91, 35, 93, 36, 1, 0, 16, 2, 0, 65, 90, 97, 122,
+ 2, 0, 69, 69, 101, 101, 2, 0, 43, 43, 45, 45, 3, 0, 48, 57, 65, 70, 97,
+ 102, 2, 0, 82, 82, 114, 114, 10, 0, 34, 34, 39, 39, 63, 63, 92, 92, 96,
+ 98, 102, 102, 110, 110, 114, 114, 116, 116, 118, 118, 2, 0, 88, 88, 120,
+ 120, 3, 0, 9, 10, 12, 13, 32, 32, 1, 0, 10, 10, 2, 0, 85, 85, 117, 117,
+ 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 4, 0, 10, 10, 13, 13, 39, 39, 92,
+ 92, 1, 0, 92, 92, 3, 0, 10, 10, 13, 13, 34, 34, 3, 0, 10, 10, 13, 13, 39,
+ 39, 2, 0, 66, 66, 98, 98, 456, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5,
+ 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13,
+ 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0,
+ 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0,
+ 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0,
+ 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0,
+ 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1,
+ 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 79, 1, 0, 0, 0, 0, 81,
+ 1, 0, 0, 0, 0, 83, 1, 0, 0, 0, 0, 85, 1, 0, 0, 0, 0, 87, 1, 0, 0, 0, 0,
+ 89, 1, 0, 0, 0, 0, 91, 1, 0, 0, 0, 0, 93, 1, 0, 0, 0, 1, 95, 1, 0, 0, 0,
+ 3, 98, 1, 0, 0, 0, 5, 101, 1, 0, 0, 0, 7, 104, 1, 0, 0, 0, 9, 106, 1, 0,
+ 0, 0, 11, 109, 1, 0, 0, 0, 13, 112, 1, 0, 0, 0, 15, 114, 1, 0, 0, 0, 17,
+ 117, 1, 0, 0, 0, 19, 120, 1, 0, 0, 0, 21, 122, 1, 0, 0, 0, 23, 124, 1,
+ 0, 0, 0, 25, 126, 1, 0, 0, 0, 27, 128, 1, 0, 0, 0, 29, 130, 1, 0, 0, 0,
+ 31, 132, 1, 0, 0, 0, 33, 134, 1, 0, 0, 0, 35, 136, 1, 0, 0, 0, 37, 138,
+ 1, 0, 0, 0, 39, 140, 1, 0, 0, 0, 41, 142, 1, 0, 0, 0, 43, 144, 1, 0, 0,
+ 0, 45, 146, 1, 0, 0, 0, 47, 148, 1, 0, 0, 0, 49, 150, 1, 0, 0, 0, 51, 152,
+ 1, 0, 0, 0, 53, 157, 1, 0, 0, 0, 55, 163, 1, 0, 0, 0, 57, 168, 1, 0, 0,
+ 0, 59, 170, 1, 0, 0, 0, 61, 172, 1, 0, 0, 0, 63, 174, 1, 0, 0, 0, 65, 183,
+ 1, 0, 0, 0, 67, 185, 1, 0, 0, 0, 69, 191, 1, 0, 0, 0, 71, 193, 1, 0, 0,
+ 0, 73, 196, 1, 0, 0, 0, 75, 201, 1, 0, 0, 0, 77, 224, 1, 0, 0, 0, 79, 227,
+ 1, 0, 0, 0, 81, 233, 1, 0, 0, 0, 83, 274, 1, 0, 0, 0, 85, 289, 1, 0, 0,
+ 0, 87, 308, 1, 0, 0, 0, 89, 406, 1, 0, 0, 0, 91, 408, 1, 0, 0, 0, 93, 413,
+ 1, 0, 0, 0, 95, 96, 5, 61, 0, 0, 96, 97, 5, 61, 0, 0, 97, 2, 1, 0, 0, 0,
+ 98, 99, 5, 33, 0, 0, 99, 100, 5, 61, 0, 0, 100, 4, 1, 0, 0, 0, 101, 102,
+ 5, 105, 0, 0, 102, 103, 5, 110, 0, 0, 103, 6, 1, 0, 0, 0, 104, 105, 5,
+ 60, 0, 0, 105, 8, 1, 0, 0, 0, 106, 107, 5, 60, 0, 0, 107, 108, 5, 61, 0,
+ 0, 108, 10, 1, 0, 0, 0, 109, 110, 5, 62, 0, 0, 110, 111, 5, 61, 0, 0, 111,
+ 12, 1, 0, 0, 0, 112, 113, 5, 62, 0, 0, 113, 14, 1, 0, 0, 0, 114, 115, 5,
+ 38, 0, 0, 115, 116, 5, 38, 0, 0, 116, 16, 1, 0, 0, 0, 117, 118, 5, 124,
+ 0, 0, 118, 119, 5, 124, 0, 0, 119, 18, 1, 0, 0, 0, 120, 121, 5, 91, 0,
+ 0, 121, 20, 1, 0, 0, 0, 122, 123, 5, 93, 0, 0, 123, 22, 1, 0, 0, 0, 124,
+ 125, 5, 123, 0, 0, 125, 24, 1, 0, 0, 0, 126, 127, 5, 125, 0, 0, 127, 26,
+ 1, 0, 0, 0, 128, 129, 5, 40, 0, 0, 129, 28, 1, 0, 0, 0, 130, 131, 5, 41,
+ 0, 0, 131, 30, 1, 0, 0, 0, 132, 133, 5, 46, 0, 0, 133, 32, 1, 0, 0, 0,
+ 134, 135, 5, 44, 0, 0, 135, 34, 1, 0, 0, 0, 136, 137, 5, 45, 0, 0, 137,
+ 36, 1, 0, 0, 0, 138, 139, 5, 33, 0, 0, 139, 38, 1, 0, 0, 0, 140, 141, 5,
+ 63, 0, 0, 141, 40, 1, 0, 0, 0, 142, 143, 5, 58, 0, 0, 143, 42, 1, 0, 0,
+ 0, 144, 145, 5, 43, 0, 0, 145, 44, 1, 0, 0, 0, 146, 147, 5, 42, 0, 0, 147,
+ 46, 1, 0, 0, 0, 148, 149, 5, 47, 0, 0, 149, 48, 1, 0, 0, 0, 150, 151, 5,
+ 37, 0, 0, 151, 50, 1, 0, 0, 0, 152, 153, 5, 116, 0, 0, 153, 154, 5, 114,
+ 0, 0, 154, 155, 5, 117, 0, 0, 155, 156, 5, 101, 0, 0, 156, 52, 1, 0, 0,
+ 0, 157, 158, 5, 102, 0, 0, 158, 159, 5, 97, 0, 0, 159, 160, 5, 108, 0,
+ 0, 160, 161, 5, 115, 0, 0, 161, 162, 5, 101, 0, 0, 162, 54, 1, 0, 0, 0,
+ 163, 164, 5, 110, 0, 0, 164, 165, 5, 117, 0, 0, 165, 166, 5, 108, 0, 0,
+ 166, 167, 5, 108, 0, 0, 167, 56, 1, 0, 0, 0, 168, 169, 5, 92, 0, 0, 169,
+ 58, 1, 0, 0, 0, 170, 171, 7, 0, 0, 0, 171, 60, 1, 0, 0, 0, 172, 173, 2,
+ 48, 57, 0, 173, 62, 1, 0, 0, 0, 174, 176, 7, 1, 0, 0, 175, 177, 7, 2, 0,
+ 0, 176, 175, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 179, 1, 0, 0, 0, 178,
+ 180, 3, 61, 30, 0, 179, 178, 1, 0, 0, 0, 180, 181, 1, 0, 0, 0, 181, 179,
+ 1, 0, 0, 0, 181, 182, 1, 0, 0, 0, 182, 64, 1, 0, 0, 0, 183, 184, 7, 3,
+ 0, 0, 184, 66, 1, 0, 0, 0, 185, 186, 7, 4, 0, 0, 186, 68, 1, 0, 0, 0, 187,
+ 192, 3, 71, 35, 0, 188, 192, 3, 75, 37, 0, 189, 192, 3, 77, 38, 0, 190,
+ 192, 3, 73, 36, 0, 191, 187, 1, 0, 0, 0, 191, 188, 1, 0, 0, 0, 191, 189,
+ 1, 0, 0, 0, 191, 190, 1, 0, 0, 0, 192, 70, 1, 0, 0, 0, 193, 194, 3, 57,
+ 28, 0, 194, 195, 7, 5, 0, 0, 195, 72, 1, 0, 0, 0, 196, 197, 3, 57, 28,
+ 0, 197, 198, 2, 48, 51, 0, 198, 199, 2, 48, 55, 0, 199, 200, 2, 48, 55,
+ 0, 200, 74, 1, 0, 0, 0, 201, 202, 3, 57, 28, 0, 202, 203, 7, 6, 0, 0, 203,
+ 204, 3, 65, 32, 0, 204, 205, 3, 65, 32, 0, 205, 76, 1, 0, 0, 0, 206, 207,
+ 3, 57, 28, 0, 207, 208, 5, 117, 0, 0, 208, 209, 3, 65, 32, 0, 209, 210,
+ 3, 65, 32, 0, 210, 211, 3, 65, 32, 0, 211, 212, 3, 65, 32, 0, 212, 225,
+ 1, 0, 0, 0, 213, 214, 3, 57, 28, 0, 214, 215, 5, 85, 0, 0, 215, 216, 3,
+ 65, 32, 0, 216, 217, 3, 65, 32, 0, 217, 218, 3, 65, 32, 0, 218, 219, 3,
+ 65, 32, 0, 219, 220, 3, 65, 32, 0, 220, 221, 3, 65, 32, 0, 221, 222, 3,
+ 65, 32, 0, 222, 223, 3, 65, 32, 0, 223, 225, 1, 0, 0, 0, 224, 206, 1, 0,
+ 0, 0, 224, 213, 1, 0, 0, 0, 225, 78, 1, 0, 0, 0, 226, 228, 7, 7, 0, 0,
+ 227, 226, 1, 0, 0, 0, 228, 229, 1, 0, 0, 0, 229, 227, 1, 0, 0, 0, 229,
+ 230, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 232, 6, 39, 0, 0, 232, 80,
+ 1, 0, 0, 0, 233, 234, 5, 47, 0, 0, 234, 235, 5, 47, 0, 0, 235, 239, 1,
+ 0, 0, 0, 236, 238, 8, 8, 0, 0, 237, 236, 1, 0, 0, 0, 238, 241, 1, 0, 0,
+ 0, 239, 237, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0, 240, 242, 1, 0, 0, 0, 241,
+ 239, 1, 0, 0, 0, 242, 243, 6, 40, 0, 0, 243, 82, 1, 0, 0, 0, 244, 246,
+ 3, 61, 30, 0, 245, 244, 1, 0, 0, 0, 246, 247, 1, 0, 0, 0, 247, 245, 1,
+ 0, 0, 0, 247, 248, 1, 0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 251, 5, 46, 0,
+ 0, 250, 252, 3, 61, 30, 0, 251, 250, 1, 0, 0, 0, 252, 253, 1, 0, 0, 0,
+ 253, 251, 1, 0, 0, 0, 253, 254, 1, 0, 0, 0, 254, 256, 1, 0, 0, 0, 255,
+ 257, 3, 63, 31, 0, 256, 255, 1, 0, 0, 0, 256, 257, 1, 0, 0, 0, 257, 275,
+ 1, 0, 0, 0, 258, 260, 3, 61, 30, 0, 259, 258, 1, 0, 0, 0, 260, 261, 1,
+ 0, 0, 0, 261, 259, 1, 0, 0, 0, 261, 262, 1, 0, 0, 0, 262, 263, 1, 0, 0,
+ 0, 263, 264, 3, 63, 31, 0, 264, 275, 1, 0, 0, 0, 265, 267, 5, 46, 0, 0,
+ 266, 268, 3, 61, 30, 0, 267, 266, 1, 0, 0, 0, 268, 269, 1, 0, 0, 0, 269,
+ 267, 1, 0, 0, 0, 269, 270, 1, 0, 0, 0, 270, 272, 1, 0, 0, 0, 271, 273,
+ 3, 63, 31, 0, 272, 271, 1, 0, 0, 0, 272, 273, 1, 0, 0, 0, 273, 275, 1,
+ 0, 0, 0, 274, 245, 1, 0, 0, 0, 274, 259, 1, 0, 0, 0, 274, 265, 1, 0, 0,
+ 0, 275, 84, 1, 0, 0, 0, 276, 278, 3, 61, 30, 0, 277, 276, 1, 0, 0, 0, 278,
+ 279, 1, 0, 0, 0, 279, 277, 1, 0, 0, 0, 279, 280, 1, 0, 0, 0, 280, 290,
+ 1, 0, 0, 0, 281, 282, 5, 48, 0, 0, 282, 283, 5, 120, 0, 0, 283, 285, 1,
+ 0, 0, 0, 284, 286, 3, 65, 32, 0, 285, 284, 1, 0, 0, 0, 286, 287, 1, 0,
+ 0, 0, 287, 285, 1, 0, 0, 0, 287, 288, 1, 0, 0, 0, 288, 290, 1, 0, 0, 0,
+ 289, 277, 1, 0, 0, 0, 289, 281, 1, 0, 0, 0, 290, 86, 1, 0, 0, 0, 291, 293,
+ 3, 61, 30, 0, 292, 291, 1, 0, 0, 0, 293, 294, 1, 0, 0, 0, 294, 292, 1,
+ 0, 0, 0, 294, 295, 1, 0, 0, 0, 295, 296, 1, 0, 0, 0, 296, 297, 7, 9, 0,
+ 0, 297, 309, 1, 0, 0, 0, 298, 299, 5, 48, 0, 0, 299, 300, 5, 120, 0, 0,
+ 300, 302, 1, 0, 0, 0, 301, 303, 3, 65, 32, 0, 302, 301, 1, 0, 0, 0, 303,
+ 304, 1, 0, 0, 0, 304, 302, 1, 0, 0, 0, 304, 305, 1, 0, 0, 0, 305, 306,
+ 1, 0, 0, 0, 306, 307, 7, 9, 0, 0, 307, 309, 1, 0, 0, 0, 308, 292, 1, 0,
+ 0, 0, 308, 298, 1, 0, 0, 0, 309, 88, 1, 0, 0, 0, 310, 315, 5, 34, 0, 0,
+ 311, 314, 3, 69, 34, 0, 312, 314, 8, 10, 0, 0, 313, 311, 1, 0, 0, 0, 313,
+ 312, 1, 0, 0, 0, 314, 317, 1, 0, 0, 0, 315, 313, 1, 0, 0, 0, 315, 316,
+ 1, 0, 0, 0, 316, 318, 1, 0, 0, 0, 317, 315, 1, 0, 0, 0, 318, 407, 5, 34,
+ 0, 0, 319, 324, 5, 39, 0, 0, 320, 323, 3, 69, 34, 0, 321, 323, 8, 11, 0,
+ 0, 322, 320, 1, 0, 0, 0, 322, 321, 1, 0, 0, 0, 323, 326, 1, 0, 0, 0, 324,
+ 322, 1, 0, 0, 0, 324, 325, 1, 0, 0, 0, 325, 327, 1, 0, 0, 0, 326, 324,
+ 1, 0, 0, 0, 327, 407, 5, 39, 0, 0, 328, 329, 5, 34, 0, 0, 329, 330, 5,
+ 34, 0, 0, 330, 331, 5, 34, 0, 0, 331, 336, 1, 0, 0, 0, 332, 335, 3, 69,
+ 34, 0, 333, 335, 8, 12, 0, 0, 334, 332, 1, 0, 0, 0, 334, 333, 1, 0, 0,
+ 0, 335, 338, 1, 0, 0, 0, 336, 337, 1, 0, 0, 0, 336, 334, 1, 0, 0, 0, 337,
+ 339, 1, 0, 0, 0, 338, 336, 1, 0, 0, 0, 339, 340, 5, 34, 0, 0, 340, 341,
+ 5, 34, 0, 0, 341, 407, 5, 34, 0, 0, 342, 343, 5, 39, 0, 0, 343, 344, 5,
+ 39, 0, 0, 344, 345, 5, 39, 0, 0, 345, 350, 1, 0, 0, 0, 346, 349, 3, 69,
+ 34, 0, 347, 349, 8, 12, 0, 0, 348, 346, 1, 0, 0, 0, 348, 347, 1, 0, 0,
+ 0, 349, 352, 1, 0, 0, 0, 350, 351, 1, 0, 0, 0, 350, 348, 1, 0, 0, 0, 351,
+ 353, 1, 0, 0, 0, 352, 350, 1, 0, 0, 0, 353, 354, 5, 39, 0, 0, 354, 355,
+ 5, 39, 0, 0, 355, 407, 5, 39, 0, 0, 356, 357, 3, 67, 33, 0, 357, 361, 5,
+ 34, 0, 0, 358, 360, 8, 13, 0, 0, 359, 358, 1, 0, 0, 0, 360, 363, 1, 0,
+ 0, 0, 361, 359, 1, 0, 0, 0, 361, 362, 1, 0, 0, 0, 362, 364, 1, 0, 0, 0,
+ 363, 361, 1, 0, 0, 0, 364, 365, 5, 34, 0, 0, 365, 407, 1, 0, 0, 0, 366,
+ 367, 3, 67, 33, 0, 367, 371, 5, 39, 0, 0, 368, 370, 8, 14, 0, 0, 369, 368,
+ 1, 0, 0, 0, 370, 373, 1, 0, 0, 0, 371, 369, 1, 0, 0, 0, 371, 372, 1, 0,
+ 0, 0, 372, 374, 1, 0, 0, 0, 373, 371, 1, 0, 0, 0, 374, 375, 5, 39, 0, 0,
+ 375, 407, 1, 0, 0, 0, 376, 377, 3, 67, 33, 0, 377, 378, 5, 34, 0, 0, 378,
+ 379, 5, 34, 0, 0, 379, 380, 5, 34, 0, 0, 380, 384, 1, 0, 0, 0, 381, 383,
+ 9, 0, 0, 0, 382, 381, 1, 0, 0, 0, 383, 386, 1, 0, 0, 0, 384, 385, 1, 0,
+ 0, 0, 384, 382, 1, 0, 0, 0, 385, 387, 1, 0, 0, 0, 386, 384, 1, 0, 0, 0,
+ 387, 388, 5, 34, 0, 0, 388, 389, 5, 34, 0, 0, 389, 390, 5, 34, 0, 0, 390,
+ 407, 1, 0, 0, 0, 391, 392, 3, 67, 33, 0, 392, 393, 5, 39, 0, 0, 393, 394,
+ 5, 39, 0, 0, 394, 395, 5, 39, 0, 0, 395, 399, 1, 0, 0, 0, 396, 398, 9,
+ 0, 0, 0, 397, 396, 1, 0, 0, 0, 398, 401, 1, 0, 0, 0, 399, 400, 1, 0, 0,
+ 0, 399, 397, 1, 0, 0, 0, 400, 402, 1, 0, 0, 0, 401, 399, 1, 0, 0, 0, 402,
+ 403, 5, 39, 0, 0, 403, 404, 5, 39, 0, 0, 404, 405, 5, 39, 0, 0, 405, 407,
+ 1, 0, 0, 0, 406, 310, 1, 0, 0, 0, 406, 319, 1, 0, 0, 0, 406, 328, 1, 0,
+ 0, 0, 406, 342, 1, 0, 0, 0, 406, 356, 1, 0, 0, 0, 406, 366, 1, 0, 0, 0,
+ 406, 376, 1, 0, 0, 0, 406, 391, 1, 0, 0, 0, 407, 90, 1, 0, 0, 0, 408, 409,
+ 7, 15, 0, 0, 409, 410, 3, 89, 44, 0, 410, 92, 1, 0, 0, 0, 411, 414, 3,
+ 59, 29, 0, 412, 414, 5, 95, 0, 0, 413, 411, 1, 0, 0, 0, 413, 412, 1, 0,
+ 0, 0, 414, 420, 1, 0, 0, 0, 415, 419, 3, 59, 29, 0, 416, 419, 3, 61, 30,
+ 0, 417, 419, 5, 95, 0, 0, 418, 415, 1, 0, 0, 0, 418, 416, 1, 0, 0, 0, 418,
+ 417, 1, 0, 0, 0, 419, 422, 1, 0, 0, 0, 420, 418, 1, 0, 0, 0, 420, 421,
+ 1, 0, 0, 0, 421, 94, 1, 0, 0, 0, 422, 420, 1, 0, 0, 0, 36, 0, 176, 181,
+ 191, 224, 229, 239, 247, 253, 256, 261, 269, 272, 274, 279, 287, 289, 294,
+ 304, 308, 313, 315, 322, 324, 334, 336, 348, 350, 361, 371, 384, 399, 406,
+ 413, 418, 420, 1, 0, 1, 0,
+ }
+ deserializer := antlr.NewATNDeserializer(nil)
+ staticData.atn = deserializer.Deserialize(staticData.serializedATN)
+ atn := staticData.atn
+ staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState))
+ decisionToDFA := staticData.decisionToDFA
+ for index, state := range atn.DecisionToState {
+ decisionToDFA[index] = antlr.NewDFA(state, index)
+ }
}
// CELLexerInit initializes any static state used to implement CELLexer. By default the
@@ -280,22 +282,22 @@ func cellexerLexerInit() {
// NewCELLexer(). You can call this function if you wish to initialize the static state ahead
// of time.
func CELLexerInit() {
- staticData := &CELLexerLexerStaticData
- staticData.once.Do(cellexerLexerInit)
+ staticData := &cellexerLexerStaticData
+ staticData.once.Do(cellexerLexerInit)
}
// NewCELLexer produces a new lexer instance for the optional input antlr.CharStream.
func NewCELLexer(input antlr.CharStream) *CELLexer {
- CELLexerInit()
+ CELLexerInit()
l := new(CELLexer)
l.BaseLexer = antlr.NewBaseLexer(input)
- staticData := &CELLexerLexerStaticData
- l.Interpreter = antlr.NewLexerATNSimulator(l, staticData.atn, staticData.decisionToDFA, staticData.PredictionContextCache)
- l.channelNames = staticData.ChannelNames
- l.modeNames = staticData.ModeNames
- l.RuleNames = staticData.RuleNames
- l.LiteralNames = staticData.LiteralNames
- l.SymbolicNames = staticData.SymbolicNames
+ staticData := &cellexerLexerStaticData
+ l.Interpreter = antlr.NewLexerATNSimulator(l, staticData.atn, staticData.decisionToDFA, staticData.predictionContextCache)
+ l.channelNames = staticData.channelNames
+ l.modeNames = staticData.modeNames
+ l.RuleNames = staticData.ruleNames
+ l.LiteralNames = staticData.literalNames
+ l.SymbolicNames = staticData.symbolicNames
l.GrammarFileName = "CEL.g4"
// TODO: l.EOF = antlr.TokenEOF
@@ -304,41 +306,40 @@ func NewCELLexer(input antlr.CharStream) *CELLexer {
// CELLexer tokens.
const (
- CELLexerEQUALS = 1
- CELLexerNOT_EQUALS = 2
- CELLexerIN = 3
- CELLexerLESS = 4
- CELLexerLESS_EQUALS = 5
+ CELLexerEQUALS = 1
+ CELLexerNOT_EQUALS = 2
+ CELLexerIN = 3
+ CELLexerLESS = 4
+ CELLexerLESS_EQUALS = 5
CELLexerGREATER_EQUALS = 6
- CELLexerGREATER = 7
- CELLexerLOGICAL_AND = 8
- CELLexerLOGICAL_OR = 9
- CELLexerLBRACKET = 10
- CELLexerRPRACKET = 11
- CELLexerLBRACE = 12
- CELLexerRBRACE = 13
- CELLexerLPAREN = 14
- CELLexerRPAREN = 15
- CELLexerDOT = 16
- CELLexerCOMMA = 17
- CELLexerMINUS = 18
- CELLexerEXCLAM = 19
- CELLexerQUESTIONMARK = 20
- CELLexerCOLON = 21
- CELLexerPLUS = 22
- CELLexerSTAR = 23
- CELLexerSLASH = 24
- CELLexerPERCENT = 25
- CELLexerCEL_TRUE = 26
- CELLexerCEL_FALSE = 27
- CELLexerNUL = 28
- CELLexerWHITESPACE = 29
- CELLexerCOMMENT = 30
- CELLexerNUM_FLOAT = 31
- CELLexerNUM_INT = 32
- CELLexerNUM_UINT = 33
- CELLexerSTRING = 34
- CELLexerBYTES = 35
- CELLexerIDENTIFIER = 36
+ CELLexerGREATER = 7
+ CELLexerLOGICAL_AND = 8
+ CELLexerLOGICAL_OR = 9
+ CELLexerLBRACKET = 10
+ CELLexerRPRACKET = 11
+ CELLexerLBRACE = 12
+ CELLexerRBRACE = 13
+ CELLexerLPAREN = 14
+ CELLexerRPAREN = 15
+ CELLexerDOT = 16
+ CELLexerCOMMA = 17
+ CELLexerMINUS = 18
+ CELLexerEXCLAM = 19
+ CELLexerQUESTIONMARK = 20
+ CELLexerCOLON = 21
+ CELLexerPLUS = 22
+ CELLexerSTAR = 23
+ CELLexerSLASH = 24
+ CELLexerPERCENT = 25
+ CELLexerCEL_TRUE = 26
+ CELLexerCEL_FALSE = 27
+ CELLexerNUL = 28
+ CELLexerWHITESPACE = 29
+ CELLexerCOMMENT = 30
+ CELLexerNUM_FLOAT = 31
+ CELLexerNUM_INT = 32
+ CELLexerNUM_UINT = 33
+ CELLexerSTRING = 34
+ CELLexerBYTES = 35
+ CELLexerIDENTIFIER = 36
)
-
diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_listener.go b/vendor/github.com/google/cel-go/parser/gen/cel_listener.go
index 22dc99789..73b7f1d39 100644
--- a/vendor/github.com/google/cel-go/parser/gen/cel_listener.go
+++ b/vendor/github.com/google/cel-go/parser/gen/cel_listener.go
@@ -1,8 +1,7 @@
-// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT.
+// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT.
package gen // CEL
-import "github.com/antlr4-go/antlr/v4"
-
+import "github.com/antlr/antlr4/runtime/Go/antlr/v4"
// CELListener is a complete listener for a parse tree produced by CELParser.
type CELListener interface {
diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_parser.go b/vendor/github.com/google/cel-go/parser/gen/cel_parser.go
index 35334af61..0cb6c8eae 100644
--- a/vendor/github.com/google/cel-go/parser/gen/cel_parser.go
+++ b/vendor/github.com/google/cel-go/parser/gen/cel_parser.go
@@ -1,12 +1,12 @@
-// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT.
+// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT.
package gen // CEL
import (
"fmt"
"strconv"
- "sync"
+ "sync"
- "github.com/antlr4-go/antlr/v4"
+ "github.com/antlr/antlr4/runtime/Go/antlr/v4"
)
// Suppress unused import errors
@@ -14,167 +14,166 @@ var _ = fmt.Printf
var _ = strconv.Itoa
var _ = sync.Once{}
-
type CELParser struct {
*antlr.BaseParser
}
-var CELParserStaticData struct {
- once sync.Once
- serializedATN []int32
- LiteralNames []string
- SymbolicNames []string
- RuleNames []string
- PredictionContextCache *antlr.PredictionContextCache
- atn *antlr.ATN
- decisionToDFA []*antlr.DFA
+var celParserStaticData struct {
+ once sync.Once
+ serializedATN []int32
+ literalNames []string
+ symbolicNames []string
+ ruleNames []string
+ predictionContextCache *antlr.PredictionContextCache
+ atn *antlr.ATN
+ decisionToDFA []*antlr.DFA
}
func celParserInit() {
- staticData := &CELParserStaticData
- staticData.LiteralNames = []string{
- "", "'=='", "'!='", "'in'", "'<'", "'<='", "'>='", "'>'", "'&&'", "'||'",
- "'['", "']'", "'{'", "'}'", "'('", "')'", "'.'", "','", "'-'", "'!'",
- "'?'", "':'", "'+'", "'*'", "'/'", "'%'", "'true'", "'false'", "'null'",
- }
- staticData.SymbolicNames = []string{
- "", "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS",
- "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE",
- "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK",
- "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE",
- "NUL", "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT",
- "STRING", "BYTES", "IDENTIFIER",
- }
- staticData.RuleNames = []string{
- "start", "expr", "conditionalOr", "conditionalAnd", "relation", "calc",
- "unary", "member", "primary", "exprList", "listInit", "fieldInitializerList",
- "optField", "mapInitializerList", "optExpr", "literal",
- }
- staticData.PredictionContextCache = antlr.NewPredictionContextCache()
- staticData.serializedATN = []int32{
- 4, 1, 36, 251, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7,
- 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7,
- 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15,
- 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 42, 8, 1, 1,
- 2, 1, 2, 1, 2, 5, 2, 47, 8, 2, 10, 2, 12, 2, 50, 9, 2, 1, 3, 1, 3, 1, 3,
- 5, 3, 55, 8, 3, 10, 3, 12, 3, 58, 9, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1,
- 4, 5, 4, 66, 8, 4, 10, 4, 12, 4, 69, 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5,
- 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 80, 8, 5, 10, 5, 12, 5, 83, 9, 5, 1, 6, 1,
- 6, 4, 6, 87, 8, 6, 11, 6, 12, 6, 88, 1, 6, 1, 6, 4, 6, 93, 8, 6, 11, 6,
- 12, 6, 94, 1, 6, 3, 6, 98, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3,
- 7, 106, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 114, 8, 7, 1, 7,
- 1, 7, 1, 7, 1, 7, 3, 7, 120, 8, 7, 1, 7, 1, 7, 1, 7, 5, 7, 125, 8, 7, 10,
- 7, 12, 7, 128, 9, 7, 1, 8, 3, 8, 131, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 136,
- 8, 8, 1, 8, 3, 8, 139, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8,
- 147, 8, 8, 1, 8, 3, 8, 150, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 155, 8, 8, 1,
- 8, 3, 8, 158, 8, 8, 1, 8, 1, 8, 3, 8, 162, 8, 8, 1, 8, 1, 8, 1, 8, 5, 8,
- 167, 8, 8, 10, 8, 12, 8, 170, 9, 8, 1, 8, 1, 8, 3, 8, 174, 8, 8, 1, 8,
- 3, 8, 177, 8, 8, 1, 8, 1, 8, 3, 8, 181, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 186,
- 8, 9, 10, 9, 12, 9, 189, 9, 9, 1, 10, 1, 10, 1, 10, 5, 10, 194, 8, 10,
- 10, 10, 12, 10, 197, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1,
- 11, 1, 11, 5, 11, 207, 8, 11, 10, 11, 12, 11, 210, 9, 11, 1, 12, 3, 12,
- 213, 8, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1,
- 13, 1, 13, 5, 13, 225, 8, 13, 10, 13, 12, 13, 228, 9, 13, 1, 14, 3, 14,
- 231, 8, 14, 1, 14, 1, 14, 1, 15, 3, 15, 236, 8, 15, 1, 15, 1, 15, 1, 15,
- 3, 15, 241, 8, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 3, 15, 249,
- 8, 15, 1, 15, 0, 3, 8, 10, 14, 16, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20,
- 22, 24, 26, 28, 30, 0, 3, 1, 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22,
- 281, 0, 32, 1, 0, 0, 0, 2, 35, 1, 0, 0, 0, 4, 43, 1, 0, 0, 0, 6, 51, 1,
- 0, 0, 0, 8, 59, 1, 0, 0, 0, 10, 70, 1, 0, 0, 0, 12, 97, 1, 0, 0, 0, 14,
- 99, 1, 0, 0, 0, 16, 180, 1, 0, 0, 0, 18, 182, 1, 0, 0, 0, 20, 190, 1, 0,
- 0, 0, 22, 198, 1, 0, 0, 0, 24, 212, 1, 0, 0, 0, 26, 216, 1, 0, 0, 0, 28,
- 230, 1, 0, 0, 0, 30, 248, 1, 0, 0, 0, 32, 33, 3, 2, 1, 0, 33, 34, 5, 0,
- 0, 1, 34, 1, 1, 0, 0, 0, 35, 41, 3, 4, 2, 0, 36, 37, 5, 20, 0, 0, 37, 38,
- 3, 4, 2, 0, 38, 39, 5, 21, 0, 0, 39, 40, 3, 2, 1, 0, 40, 42, 1, 0, 0, 0,
- 41, 36, 1, 0, 0, 0, 41, 42, 1, 0, 0, 0, 42, 3, 1, 0, 0, 0, 43, 48, 3, 6,
- 3, 0, 44, 45, 5, 9, 0, 0, 45, 47, 3, 6, 3, 0, 46, 44, 1, 0, 0, 0, 47, 50,
- 1, 0, 0, 0, 48, 46, 1, 0, 0, 0, 48, 49, 1, 0, 0, 0, 49, 5, 1, 0, 0, 0,
- 50, 48, 1, 0, 0, 0, 51, 56, 3, 8, 4, 0, 52, 53, 5, 8, 0, 0, 53, 55, 3,
- 8, 4, 0, 54, 52, 1, 0, 0, 0, 55, 58, 1, 0, 0, 0, 56, 54, 1, 0, 0, 0, 56,
- 57, 1, 0, 0, 0, 57, 7, 1, 0, 0, 0, 58, 56, 1, 0, 0, 0, 59, 60, 6, 4, -1,
- 0, 60, 61, 3, 10, 5, 0, 61, 67, 1, 0, 0, 0, 62, 63, 10, 1, 0, 0, 63, 64,
- 7, 0, 0, 0, 64, 66, 3, 8, 4, 2, 65, 62, 1, 0, 0, 0, 66, 69, 1, 0, 0, 0,
- 67, 65, 1, 0, 0, 0, 67, 68, 1, 0, 0, 0, 68, 9, 1, 0, 0, 0, 69, 67, 1, 0,
- 0, 0, 70, 71, 6, 5, -1, 0, 71, 72, 3, 12, 6, 0, 72, 81, 1, 0, 0, 0, 73,
- 74, 10, 2, 0, 0, 74, 75, 7, 1, 0, 0, 75, 80, 3, 10, 5, 3, 76, 77, 10, 1,
- 0, 0, 77, 78, 7, 2, 0, 0, 78, 80, 3, 10, 5, 2, 79, 73, 1, 0, 0, 0, 79,
- 76, 1, 0, 0, 0, 80, 83, 1, 0, 0, 0, 81, 79, 1, 0, 0, 0, 81, 82, 1, 0, 0,
- 0, 82, 11, 1, 0, 0, 0, 83, 81, 1, 0, 0, 0, 84, 98, 3, 14, 7, 0, 85, 87,
- 5, 19, 0, 0, 86, 85, 1, 0, 0, 0, 87, 88, 1, 0, 0, 0, 88, 86, 1, 0, 0, 0,
- 88, 89, 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 98, 3, 14, 7, 0, 91, 93, 5,
- 18, 0, 0, 92, 91, 1, 0, 0, 0, 93, 94, 1, 0, 0, 0, 94, 92, 1, 0, 0, 0, 94,
- 95, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 98, 3, 14, 7, 0, 97, 84, 1, 0,
- 0, 0, 97, 86, 1, 0, 0, 0, 97, 92, 1, 0, 0, 0, 98, 13, 1, 0, 0, 0, 99, 100,
- 6, 7, -1, 0, 100, 101, 3, 16, 8, 0, 101, 126, 1, 0, 0, 0, 102, 103, 10,
- 3, 0, 0, 103, 105, 5, 16, 0, 0, 104, 106, 5, 20, 0, 0, 105, 104, 1, 0,
- 0, 0, 105, 106, 1, 0, 0, 0, 106, 107, 1, 0, 0, 0, 107, 125, 5, 36, 0, 0,
- 108, 109, 10, 2, 0, 0, 109, 110, 5, 16, 0, 0, 110, 111, 5, 36, 0, 0, 111,
- 113, 5, 14, 0, 0, 112, 114, 3, 18, 9, 0, 113, 112, 1, 0, 0, 0, 113, 114,
- 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115, 125, 5, 15, 0, 0, 116, 117, 10,
- 1, 0, 0, 117, 119, 5, 10, 0, 0, 118, 120, 5, 20, 0, 0, 119, 118, 1, 0,
- 0, 0, 119, 120, 1, 0, 0, 0, 120, 121, 1, 0, 0, 0, 121, 122, 3, 2, 1, 0,
- 122, 123, 5, 11, 0, 0, 123, 125, 1, 0, 0, 0, 124, 102, 1, 0, 0, 0, 124,
- 108, 1, 0, 0, 0, 124, 116, 1, 0, 0, 0, 125, 128, 1, 0, 0, 0, 126, 124,
- 1, 0, 0, 0, 126, 127, 1, 0, 0, 0, 127, 15, 1, 0, 0, 0, 128, 126, 1, 0,
- 0, 0, 129, 131, 5, 16, 0, 0, 130, 129, 1, 0, 0, 0, 130, 131, 1, 0, 0, 0,
- 131, 132, 1, 0, 0, 0, 132, 138, 5, 36, 0, 0, 133, 135, 5, 14, 0, 0, 134,
- 136, 3, 18, 9, 0, 135, 134, 1, 0, 0, 0, 135, 136, 1, 0, 0, 0, 136, 137,
- 1, 0, 0, 0, 137, 139, 5, 15, 0, 0, 138, 133, 1, 0, 0, 0, 138, 139, 1, 0,
- 0, 0, 139, 181, 1, 0, 0, 0, 140, 141, 5, 14, 0, 0, 141, 142, 3, 2, 1, 0,
- 142, 143, 5, 15, 0, 0, 143, 181, 1, 0, 0, 0, 144, 146, 5, 10, 0, 0, 145,
- 147, 3, 20, 10, 0, 146, 145, 1, 0, 0, 0, 146, 147, 1, 0, 0, 0, 147, 149,
- 1, 0, 0, 0, 148, 150, 5, 17, 0, 0, 149, 148, 1, 0, 0, 0, 149, 150, 1, 0,
- 0, 0, 150, 151, 1, 0, 0, 0, 151, 181, 5, 11, 0, 0, 152, 154, 5, 12, 0,
- 0, 153, 155, 3, 26, 13, 0, 154, 153, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0,
- 155, 157, 1, 0, 0, 0, 156, 158, 5, 17, 0, 0, 157, 156, 1, 0, 0, 0, 157,
- 158, 1, 0, 0, 0, 158, 159, 1, 0, 0, 0, 159, 181, 5, 13, 0, 0, 160, 162,
- 5, 16, 0, 0, 161, 160, 1, 0, 0, 0, 161, 162, 1, 0, 0, 0, 162, 163, 1, 0,
- 0, 0, 163, 168, 5, 36, 0, 0, 164, 165, 5, 16, 0, 0, 165, 167, 5, 36, 0,
- 0, 166, 164, 1, 0, 0, 0, 167, 170, 1, 0, 0, 0, 168, 166, 1, 0, 0, 0, 168,
- 169, 1, 0, 0, 0, 169, 171, 1, 0, 0, 0, 170, 168, 1, 0, 0, 0, 171, 173,
- 5, 12, 0, 0, 172, 174, 3, 22, 11, 0, 173, 172, 1, 0, 0, 0, 173, 174, 1,
- 0, 0, 0, 174, 176, 1, 0, 0, 0, 175, 177, 5, 17, 0, 0, 176, 175, 1, 0, 0,
- 0, 176, 177, 1, 0, 0, 0, 177, 178, 1, 0, 0, 0, 178, 181, 5, 13, 0, 0, 179,
- 181, 3, 30, 15, 0, 180, 130, 1, 0, 0, 0, 180, 140, 1, 0, 0, 0, 180, 144,
- 1, 0, 0, 0, 180, 152, 1, 0, 0, 0, 180, 161, 1, 0, 0, 0, 180, 179, 1, 0,
- 0, 0, 181, 17, 1, 0, 0, 0, 182, 187, 3, 2, 1, 0, 183, 184, 5, 17, 0, 0,
- 184, 186, 3, 2, 1, 0, 185, 183, 1, 0, 0, 0, 186, 189, 1, 0, 0, 0, 187,
- 185, 1, 0, 0, 0, 187, 188, 1, 0, 0, 0, 188, 19, 1, 0, 0, 0, 189, 187, 1,
- 0, 0, 0, 190, 195, 3, 28, 14, 0, 191, 192, 5, 17, 0, 0, 192, 194, 3, 28,
- 14, 0, 193, 191, 1, 0, 0, 0, 194, 197, 1, 0, 0, 0, 195, 193, 1, 0, 0, 0,
- 195, 196, 1, 0, 0, 0, 196, 21, 1, 0, 0, 0, 197, 195, 1, 0, 0, 0, 198, 199,
- 3, 24, 12, 0, 199, 200, 5, 21, 0, 0, 200, 208, 3, 2, 1, 0, 201, 202, 5,
- 17, 0, 0, 202, 203, 3, 24, 12, 0, 203, 204, 5, 21, 0, 0, 204, 205, 3, 2,
- 1, 0, 205, 207, 1, 0, 0, 0, 206, 201, 1, 0, 0, 0, 207, 210, 1, 0, 0, 0,
- 208, 206, 1, 0, 0, 0, 208, 209, 1, 0, 0, 0, 209, 23, 1, 0, 0, 0, 210, 208,
- 1, 0, 0, 0, 211, 213, 5, 20, 0, 0, 212, 211, 1, 0, 0, 0, 212, 213, 1, 0,
- 0, 0, 213, 214, 1, 0, 0, 0, 214, 215, 5, 36, 0, 0, 215, 25, 1, 0, 0, 0,
- 216, 217, 3, 28, 14, 0, 217, 218, 5, 21, 0, 0, 218, 226, 3, 2, 1, 0, 219,
- 220, 5, 17, 0, 0, 220, 221, 3, 28, 14, 0, 221, 222, 5, 21, 0, 0, 222, 223,
- 3, 2, 1, 0, 223, 225, 1, 0, 0, 0, 224, 219, 1, 0, 0, 0, 225, 228, 1, 0,
- 0, 0, 226, 224, 1, 0, 0, 0, 226, 227, 1, 0, 0, 0, 227, 27, 1, 0, 0, 0,
- 228, 226, 1, 0, 0, 0, 229, 231, 5, 20, 0, 0, 230, 229, 1, 0, 0, 0, 230,
- 231, 1, 0, 0, 0, 231, 232, 1, 0, 0, 0, 232, 233, 3, 2, 1, 0, 233, 29, 1,
- 0, 0, 0, 234, 236, 5, 18, 0, 0, 235, 234, 1, 0, 0, 0, 235, 236, 1, 0, 0,
- 0, 236, 237, 1, 0, 0, 0, 237, 249, 5, 32, 0, 0, 238, 249, 5, 33, 0, 0,
- 239, 241, 5, 18, 0, 0, 240, 239, 1, 0, 0, 0, 240, 241, 1, 0, 0, 0, 241,
- 242, 1, 0, 0, 0, 242, 249, 5, 31, 0, 0, 243, 249, 5, 34, 0, 0, 244, 249,
- 5, 35, 0, 0, 245, 249, 5, 26, 0, 0, 246, 249, 5, 27, 0, 0, 247, 249, 5,
- 28, 0, 0, 248, 235, 1, 0, 0, 0, 248, 238, 1, 0, 0, 0, 248, 240, 1, 0, 0,
- 0, 248, 243, 1, 0, 0, 0, 248, 244, 1, 0, 0, 0, 248, 245, 1, 0, 0, 0, 248,
- 246, 1, 0, 0, 0, 248, 247, 1, 0, 0, 0, 249, 31, 1, 0, 0, 0, 35, 41, 48,
- 56, 67, 79, 81, 88, 94, 97, 105, 113, 119, 124, 126, 130, 135, 138, 146,
- 149, 154, 157, 161, 168, 173, 176, 180, 187, 195, 208, 212, 226, 230, 235,
- 240, 248,
-}
- deserializer := antlr.NewATNDeserializer(nil)
- staticData.atn = deserializer.Deserialize(staticData.serializedATN)
- atn := staticData.atn
- staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState))
- decisionToDFA := staticData.decisionToDFA
- for index, state := range atn.DecisionToState {
- decisionToDFA[index] = antlr.NewDFA(state, index)
- }
+ staticData := &celParserStaticData
+ staticData.literalNames = []string{
+ "", "'=='", "'!='", "'in'", "'<'", "'<='", "'>='", "'>'", "'&&'", "'||'",
+ "'['", "']'", "'{'", "'}'", "'('", "')'", "'.'", "','", "'-'", "'!'",
+ "'?'", "':'", "'+'", "'*'", "'/'", "'%'", "'true'", "'false'", "'null'",
+ }
+ staticData.symbolicNames = []string{
+ "", "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS",
+ "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE",
+ "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK",
+ "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE",
+ "NUL", "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT",
+ "STRING", "BYTES", "IDENTIFIER",
+ }
+ staticData.ruleNames = []string{
+ "start", "expr", "conditionalOr", "conditionalAnd", "relation", "calc",
+ "unary", "member", "primary", "exprList", "listInit", "fieldInitializerList",
+ "optField", "mapInitializerList", "optExpr", "literal",
+ }
+ staticData.predictionContextCache = antlr.NewPredictionContextCache()
+ staticData.serializedATN = []int32{
+ 4, 1, 36, 251, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7,
+ 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7,
+ 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15,
+ 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 42, 8, 1, 1,
+ 2, 1, 2, 1, 2, 5, 2, 47, 8, 2, 10, 2, 12, 2, 50, 9, 2, 1, 3, 1, 3, 1, 3,
+ 5, 3, 55, 8, 3, 10, 3, 12, 3, 58, 9, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1,
+ 4, 5, 4, 66, 8, 4, 10, 4, 12, 4, 69, 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5,
+ 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 80, 8, 5, 10, 5, 12, 5, 83, 9, 5, 1, 6, 1,
+ 6, 4, 6, 87, 8, 6, 11, 6, 12, 6, 88, 1, 6, 1, 6, 4, 6, 93, 8, 6, 11, 6,
+ 12, 6, 94, 1, 6, 3, 6, 98, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3,
+ 7, 106, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 114, 8, 7, 1, 7,
+ 1, 7, 1, 7, 1, 7, 3, 7, 120, 8, 7, 1, 7, 1, 7, 1, 7, 5, 7, 125, 8, 7, 10,
+ 7, 12, 7, 128, 9, 7, 1, 8, 3, 8, 131, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 136,
+ 8, 8, 1, 8, 3, 8, 139, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8,
+ 147, 8, 8, 1, 8, 3, 8, 150, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 155, 8, 8, 1,
+ 8, 3, 8, 158, 8, 8, 1, 8, 1, 8, 3, 8, 162, 8, 8, 1, 8, 1, 8, 1, 8, 5, 8,
+ 167, 8, 8, 10, 8, 12, 8, 170, 9, 8, 1, 8, 1, 8, 3, 8, 174, 8, 8, 1, 8,
+ 3, 8, 177, 8, 8, 1, 8, 1, 8, 3, 8, 181, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 186,
+ 8, 9, 10, 9, 12, 9, 189, 9, 9, 1, 10, 1, 10, 1, 10, 5, 10, 194, 8, 10,
+ 10, 10, 12, 10, 197, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1,
+ 11, 1, 11, 5, 11, 207, 8, 11, 10, 11, 12, 11, 210, 9, 11, 1, 12, 3, 12,
+ 213, 8, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1,
+ 13, 1, 13, 5, 13, 225, 8, 13, 10, 13, 12, 13, 228, 9, 13, 1, 14, 3, 14,
+ 231, 8, 14, 1, 14, 1, 14, 1, 15, 3, 15, 236, 8, 15, 1, 15, 1, 15, 1, 15,
+ 3, 15, 241, 8, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 3, 15, 249,
+ 8, 15, 1, 15, 0, 3, 8, 10, 14, 16, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20,
+ 22, 24, 26, 28, 30, 0, 3, 1, 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22,
+ 281, 0, 32, 1, 0, 0, 0, 2, 35, 1, 0, 0, 0, 4, 43, 1, 0, 0, 0, 6, 51, 1,
+ 0, 0, 0, 8, 59, 1, 0, 0, 0, 10, 70, 1, 0, 0, 0, 12, 97, 1, 0, 0, 0, 14,
+ 99, 1, 0, 0, 0, 16, 180, 1, 0, 0, 0, 18, 182, 1, 0, 0, 0, 20, 190, 1, 0,
+ 0, 0, 22, 198, 1, 0, 0, 0, 24, 212, 1, 0, 0, 0, 26, 216, 1, 0, 0, 0, 28,
+ 230, 1, 0, 0, 0, 30, 248, 1, 0, 0, 0, 32, 33, 3, 2, 1, 0, 33, 34, 5, 0,
+ 0, 1, 34, 1, 1, 0, 0, 0, 35, 41, 3, 4, 2, 0, 36, 37, 5, 20, 0, 0, 37, 38,
+ 3, 4, 2, 0, 38, 39, 5, 21, 0, 0, 39, 40, 3, 2, 1, 0, 40, 42, 1, 0, 0, 0,
+ 41, 36, 1, 0, 0, 0, 41, 42, 1, 0, 0, 0, 42, 3, 1, 0, 0, 0, 43, 48, 3, 6,
+ 3, 0, 44, 45, 5, 9, 0, 0, 45, 47, 3, 6, 3, 0, 46, 44, 1, 0, 0, 0, 47, 50,
+ 1, 0, 0, 0, 48, 46, 1, 0, 0, 0, 48, 49, 1, 0, 0, 0, 49, 5, 1, 0, 0, 0,
+ 50, 48, 1, 0, 0, 0, 51, 56, 3, 8, 4, 0, 52, 53, 5, 8, 0, 0, 53, 55, 3,
+ 8, 4, 0, 54, 52, 1, 0, 0, 0, 55, 58, 1, 0, 0, 0, 56, 54, 1, 0, 0, 0, 56,
+ 57, 1, 0, 0, 0, 57, 7, 1, 0, 0, 0, 58, 56, 1, 0, 0, 0, 59, 60, 6, 4, -1,
+ 0, 60, 61, 3, 10, 5, 0, 61, 67, 1, 0, 0, 0, 62, 63, 10, 1, 0, 0, 63, 64,
+ 7, 0, 0, 0, 64, 66, 3, 8, 4, 2, 65, 62, 1, 0, 0, 0, 66, 69, 1, 0, 0, 0,
+ 67, 65, 1, 0, 0, 0, 67, 68, 1, 0, 0, 0, 68, 9, 1, 0, 0, 0, 69, 67, 1, 0,
+ 0, 0, 70, 71, 6, 5, -1, 0, 71, 72, 3, 12, 6, 0, 72, 81, 1, 0, 0, 0, 73,
+ 74, 10, 2, 0, 0, 74, 75, 7, 1, 0, 0, 75, 80, 3, 10, 5, 3, 76, 77, 10, 1,
+ 0, 0, 77, 78, 7, 2, 0, 0, 78, 80, 3, 10, 5, 2, 79, 73, 1, 0, 0, 0, 79,
+ 76, 1, 0, 0, 0, 80, 83, 1, 0, 0, 0, 81, 79, 1, 0, 0, 0, 81, 82, 1, 0, 0,
+ 0, 82, 11, 1, 0, 0, 0, 83, 81, 1, 0, 0, 0, 84, 98, 3, 14, 7, 0, 85, 87,
+ 5, 19, 0, 0, 86, 85, 1, 0, 0, 0, 87, 88, 1, 0, 0, 0, 88, 86, 1, 0, 0, 0,
+ 88, 89, 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 98, 3, 14, 7, 0, 91, 93, 5,
+ 18, 0, 0, 92, 91, 1, 0, 0, 0, 93, 94, 1, 0, 0, 0, 94, 92, 1, 0, 0, 0, 94,
+ 95, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 98, 3, 14, 7, 0, 97, 84, 1, 0,
+ 0, 0, 97, 86, 1, 0, 0, 0, 97, 92, 1, 0, 0, 0, 98, 13, 1, 0, 0, 0, 99, 100,
+ 6, 7, -1, 0, 100, 101, 3, 16, 8, 0, 101, 126, 1, 0, 0, 0, 102, 103, 10,
+ 3, 0, 0, 103, 105, 5, 16, 0, 0, 104, 106, 5, 20, 0, 0, 105, 104, 1, 0,
+ 0, 0, 105, 106, 1, 0, 0, 0, 106, 107, 1, 0, 0, 0, 107, 125, 5, 36, 0, 0,
+ 108, 109, 10, 2, 0, 0, 109, 110, 5, 16, 0, 0, 110, 111, 5, 36, 0, 0, 111,
+ 113, 5, 14, 0, 0, 112, 114, 3, 18, 9, 0, 113, 112, 1, 0, 0, 0, 113, 114,
+ 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115, 125, 5, 15, 0, 0, 116, 117, 10,
+ 1, 0, 0, 117, 119, 5, 10, 0, 0, 118, 120, 5, 20, 0, 0, 119, 118, 1, 0,
+ 0, 0, 119, 120, 1, 0, 0, 0, 120, 121, 1, 0, 0, 0, 121, 122, 3, 2, 1, 0,
+ 122, 123, 5, 11, 0, 0, 123, 125, 1, 0, 0, 0, 124, 102, 1, 0, 0, 0, 124,
+ 108, 1, 0, 0, 0, 124, 116, 1, 0, 0, 0, 125, 128, 1, 0, 0, 0, 126, 124,
+ 1, 0, 0, 0, 126, 127, 1, 0, 0, 0, 127, 15, 1, 0, 0, 0, 128, 126, 1, 0,
+ 0, 0, 129, 131, 5, 16, 0, 0, 130, 129, 1, 0, 0, 0, 130, 131, 1, 0, 0, 0,
+ 131, 132, 1, 0, 0, 0, 132, 138, 5, 36, 0, 0, 133, 135, 5, 14, 0, 0, 134,
+ 136, 3, 18, 9, 0, 135, 134, 1, 0, 0, 0, 135, 136, 1, 0, 0, 0, 136, 137,
+ 1, 0, 0, 0, 137, 139, 5, 15, 0, 0, 138, 133, 1, 0, 0, 0, 138, 139, 1, 0,
+ 0, 0, 139, 181, 1, 0, 0, 0, 140, 141, 5, 14, 0, 0, 141, 142, 3, 2, 1, 0,
+ 142, 143, 5, 15, 0, 0, 143, 181, 1, 0, 0, 0, 144, 146, 5, 10, 0, 0, 145,
+ 147, 3, 20, 10, 0, 146, 145, 1, 0, 0, 0, 146, 147, 1, 0, 0, 0, 147, 149,
+ 1, 0, 0, 0, 148, 150, 5, 17, 0, 0, 149, 148, 1, 0, 0, 0, 149, 150, 1, 0,
+ 0, 0, 150, 151, 1, 0, 0, 0, 151, 181, 5, 11, 0, 0, 152, 154, 5, 12, 0,
+ 0, 153, 155, 3, 26, 13, 0, 154, 153, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0,
+ 155, 157, 1, 0, 0, 0, 156, 158, 5, 17, 0, 0, 157, 156, 1, 0, 0, 0, 157,
+ 158, 1, 0, 0, 0, 158, 159, 1, 0, 0, 0, 159, 181, 5, 13, 0, 0, 160, 162,
+ 5, 16, 0, 0, 161, 160, 1, 0, 0, 0, 161, 162, 1, 0, 0, 0, 162, 163, 1, 0,
+ 0, 0, 163, 168, 5, 36, 0, 0, 164, 165, 5, 16, 0, 0, 165, 167, 5, 36, 0,
+ 0, 166, 164, 1, 0, 0, 0, 167, 170, 1, 0, 0, 0, 168, 166, 1, 0, 0, 0, 168,
+ 169, 1, 0, 0, 0, 169, 171, 1, 0, 0, 0, 170, 168, 1, 0, 0, 0, 171, 173,
+ 5, 12, 0, 0, 172, 174, 3, 22, 11, 0, 173, 172, 1, 0, 0, 0, 173, 174, 1,
+ 0, 0, 0, 174, 176, 1, 0, 0, 0, 175, 177, 5, 17, 0, 0, 176, 175, 1, 0, 0,
+ 0, 176, 177, 1, 0, 0, 0, 177, 178, 1, 0, 0, 0, 178, 181, 5, 13, 0, 0, 179,
+ 181, 3, 30, 15, 0, 180, 130, 1, 0, 0, 0, 180, 140, 1, 0, 0, 0, 180, 144,
+ 1, 0, 0, 0, 180, 152, 1, 0, 0, 0, 180, 161, 1, 0, 0, 0, 180, 179, 1, 0,
+ 0, 0, 181, 17, 1, 0, 0, 0, 182, 187, 3, 2, 1, 0, 183, 184, 5, 17, 0, 0,
+ 184, 186, 3, 2, 1, 0, 185, 183, 1, 0, 0, 0, 186, 189, 1, 0, 0, 0, 187,
+ 185, 1, 0, 0, 0, 187, 188, 1, 0, 0, 0, 188, 19, 1, 0, 0, 0, 189, 187, 1,
+ 0, 0, 0, 190, 195, 3, 28, 14, 0, 191, 192, 5, 17, 0, 0, 192, 194, 3, 28,
+ 14, 0, 193, 191, 1, 0, 0, 0, 194, 197, 1, 0, 0, 0, 195, 193, 1, 0, 0, 0,
+ 195, 196, 1, 0, 0, 0, 196, 21, 1, 0, 0, 0, 197, 195, 1, 0, 0, 0, 198, 199,
+ 3, 24, 12, 0, 199, 200, 5, 21, 0, 0, 200, 208, 3, 2, 1, 0, 201, 202, 5,
+ 17, 0, 0, 202, 203, 3, 24, 12, 0, 203, 204, 5, 21, 0, 0, 204, 205, 3, 2,
+ 1, 0, 205, 207, 1, 0, 0, 0, 206, 201, 1, 0, 0, 0, 207, 210, 1, 0, 0, 0,
+ 208, 206, 1, 0, 0, 0, 208, 209, 1, 0, 0, 0, 209, 23, 1, 0, 0, 0, 210, 208,
+ 1, 0, 0, 0, 211, 213, 5, 20, 0, 0, 212, 211, 1, 0, 0, 0, 212, 213, 1, 0,
+ 0, 0, 213, 214, 1, 0, 0, 0, 214, 215, 5, 36, 0, 0, 215, 25, 1, 0, 0, 0,
+ 216, 217, 3, 28, 14, 0, 217, 218, 5, 21, 0, 0, 218, 226, 3, 2, 1, 0, 219,
+ 220, 5, 17, 0, 0, 220, 221, 3, 28, 14, 0, 221, 222, 5, 21, 0, 0, 222, 223,
+ 3, 2, 1, 0, 223, 225, 1, 0, 0, 0, 224, 219, 1, 0, 0, 0, 225, 228, 1, 0,
+ 0, 0, 226, 224, 1, 0, 0, 0, 226, 227, 1, 0, 0, 0, 227, 27, 1, 0, 0, 0,
+ 228, 226, 1, 0, 0, 0, 229, 231, 5, 20, 0, 0, 230, 229, 1, 0, 0, 0, 230,
+ 231, 1, 0, 0, 0, 231, 232, 1, 0, 0, 0, 232, 233, 3, 2, 1, 0, 233, 29, 1,
+ 0, 0, 0, 234, 236, 5, 18, 0, 0, 235, 234, 1, 0, 0, 0, 235, 236, 1, 0, 0,
+ 0, 236, 237, 1, 0, 0, 0, 237, 249, 5, 32, 0, 0, 238, 249, 5, 33, 0, 0,
+ 239, 241, 5, 18, 0, 0, 240, 239, 1, 0, 0, 0, 240, 241, 1, 0, 0, 0, 241,
+ 242, 1, 0, 0, 0, 242, 249, 5, 31, 0, 0, 243, 249, 5, 34, 0, 0, 244, 249,
+ 5, 35, 0, 0, 245, 249, 5, 26, 0, 0, 246, 249, 5, 27, 0, 0, 247, 249, 5,
+ 28, 0, 0, 248, 235, 1, 0, 0, 0, 248, 238, 1, 0, 0, 0, 248, 240, 1, 0, 0,
+ 0, 248, 243, 1, 0, 0, 0, 248, 244, 1, 0, 0, 0, 248, 245, 1, 0, 0, 0, 248,
+ 246, 1, 0, 0, 0, 248, 247, 1, 0, 0, 0, 249, 31, 1, 0, 0, 0, 35, 41, 48,
+ 56, 67, 79, 81, 88, 94, 97, 105, 113, 119, 124, 126, 130, 135, 138, 146,
+ 149, 154, 157, 161, 168, 173, 176, 180, 187, 195, 208, 212, 226, 230, 235,
+ 240, 248,
+ }
+ deserializer := antlr.NewATNDeserializer(nil)
+ staticData.atn = deserializer.Deserialize(staticData.serializedATN)
+ atn := staticData.atn
+ staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState))
+ decisionToDFA := staticData.decisionToDFA
+ for index, state := range atn.DecisionToState {
+ decisionToDFA[index] = antlr.NewDFA(state, index)
+ }
}
// CELParserInit initializes any static state used to implement CELParser. By default the
@@ -182,8 +181,8 @@ func celParserInit() {
// NewCELParser(). You can call this function if you wish to initialize the static state ahead
// of time.
func CELParserInit() {
- staticData := &CELParserStaticData
- staticData.once.Do(celParserInit)
+ staticData := &celParserStaticData
+ staticData.once.Do(celParserInit)
}
// NewCELParser produces a new parser instance for the optional input antlr.TokenStream.
@@ -191,76 +190,75 @@ func NewCELParser(input antlr.TokenStream) *CELParser {
CELParserInit()
this := new(CELParser)
this.BaseParser = antlr.NewBaseParser(input)
- staticData := &CELParserStaticData
- this.Interpreter = antlr.NewParserATNSimulator(this, staticData.atn, staticData.decisionToDFA, staticData.PredictionContextCache)
- this.RuleNames = staticData.RuleNames
- this.LiteralNames = staticData.LiteralNames
- this.SymbolicNames = staticData.SymbolicNames
+ staticData := &celParserStaticData
+ this.Interpreter = antlr.NewParserATNSimulator(this, staticData.atn, staticData.decisionToDFA, staticData.predictionContextCache)
+ this.RuleNames = staticData.ruleNames
+ this.LiteralNames = staticData.literalNames
+ this.SymbolicNames = staticData.symbolicNames
this.GrammarFileName = "CEL.g4"
return this
}
-
// CELParser tokens.
const (
- CELParserEOF = antlr.TokenEOF
- CELParserEQUALS = 1
- CELParserNOT_EQUALS = 2
- CELParserIN = 3
- CELParserLESS = 4
- CELParserLESS_EQUALS = 5
+ CELParserEOF = antlr.TokenEOF
+ CELParserEQUALS = 1
+ CELParserNOT_EQUALS = 2
+ CELParserIN = 3
+ CELParserLESS = 4
+ CELParserLESS_EQUALS = 5
CELParserGREATER_EQUALS = 6
- CELParserGREATER = 7
- CELParserLOGICAL_AND = 8
- CELParserLOGICAL_OR = 9
- CELParserLBRACKET = 10
- CELParserRPRACKET = 11
- CELParserLBRACE = 12
- CELParserRBRACE = 13
- CELParserLPAREN = 14
- CELParserRPAREN = 15
- CELParserDOT = 16
- CELParserCOMMA = 17
- CELParserMINUS = 18
- CELParserEXCLAM = 19
- CELParserQUESTIONMARK = 20
- CELParserCOLON = 21
- CELParserPLUS = 22
- CELParserSTAR = 23
- CELParserSLASH = 24
- CELParserPERCENT = 25
- CELParserCEL_TRUE = 26
- CELParserCEL_FALSE = 27
- CELParserNUL = 28
- CELParserWHITESPACE = 29
- CELParserCOMMENT = 30
- CELParserNUM_FLOAT = 31
- CELParserNUM_INT = 32
- CELParserNUM_UINT = 33
- CELParserSTRING = 34
- CELParserBYTES = 35
- CELParserIDENTIFIER = 36
+ CELParserGREATER = 7
+ CELParserLOGICAL_AND = 8
+ CELParserLOGICAL_OR = 9
+ CELParserLBRACKET = 10
+ CELParserRPRACKET = 11
+ CELParserLBRACE = 12
+ CELParserRBRACE = 13
+ CELParserLPAREN = 14
+ CELParserRPAREN = 15
+ CELParserDOT = 16
+ CELParserCOMMA = 17
+ CELParserMINUS = 18
+ CELParserEXCLAM = 19
+ CELParserQUESTIONMARK = 20
+ CELParserCOLON = 21
+ CELParserPLUS = 22
+ CELParserSTAR = 23
+ CELParserSLASH = 24
+ CELParserPERCENT = 25
+ CELParserCEL_TRUE = 26
+ CELParserCEL_FALSE = 27
+ CELParserNUL = 28
+ CELParserWHITESPACE = 29
+ CELParserCOMMENT = 30
+ CELParserNUM_FLOAT = 31
+ CELParserNUM_INT = 32
+ CELParserNUM_UINT = 33
+ CELParserSTRING = 34
+ CELParserBYTES = 35
+ CELParserIDENTIFIER = 36
)
// CELParser rules.
const (
- CELParserRULE_start = 0
- CELParserRULE_expr = 1
- CELParserRULE_conditionalOr = 2
- CELParserRULE_conditionalAnd = 3
- CELParserRULE_relation = 4
- CELParserRULE_calc = 5
- CELParserRULE_unary = 6
- CELParserRULE_member = 7
- CELParserRULE_primary = 8
- CELParserRULE_exprList = 9
- CELParserRULE_listInit = 10
+ CELParserRULE_start = 0
+ CELParserRULE_expr = 1
+ CELParserRULE_conditionalOr = 2
+ CELParserRULE_conditionalAnd = 3
+ CELParserRULE_relation = 4
+ CELParserRULE_calc = 5
+ CELParserRULE_unary = 6
+ CELParserRULE_member = 7
+ CELParserRULE_primary = 8
+ CELParserRULE_exprList = 9
+ CELParserRULE_listInit = 10
CELParserRULE_fieldInitializerList = 11
- CELParserRULE_optField = 12
- CELParserRULE_mapInitializerList = 13
- CELParserRULE_optExpr = 14
- CELParserRULE_literal = 15
+ CELParserRULE_optField = 12
+ CELParserRULE_mapInitializerList = 13
+ CELParserRULE_optExpr = 14
+ CELParserRULE_literal = 15
)
// IStartContext is an interface to support dynamic dispatch.
@@ -273,11 +271,9 @@ type IStartContext interface {
// GetE returns the e rule contexts.
GetE() IExprContext
-
// SetE sets the e rule contexts.
SetE(IExprContext)
-
// Getter signatures
EOF() antlr.TerminalNode
Expr() IExprContext
@@ -287,29 +283,24 @@ type IStartContext interface {
}
type StartContext struct {
- antlr.BaseParserRuleContext
+ *antlr.BaseParserRuleContext
parser antlr.Parser
- e IExprContext
+ e IExprContext
}
func NewEmptyStartContext() *StartContext {
var p = new(StartContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
p.RuleIndex = CELParserRULE_start
return p
}
-func InitEmptyStartContext(p *StartContext) {
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
- p.RuleIndex = CELParserRULE_start
-}
-
func (*StartContext) IsStartContext() {}
func NewStartContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *StartContext {
var p = new(StartContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
p.parser = parser
p.RuleIndex = CELParserRULE_start
@@ -321,19 +312,17 @@ func (s *StartContext) GetParser() antlr.Parser { return s.parser }
func (s *StartContext) GetE() IExprContext { return s.e }
-
func (s *StartContext) SetE(v IExprContext) { s.e = v }
-
func (s *StartContext) EOF() antlr.TerminalNode {
return s.GetToken(CELParserEOF, 0)
}
func (s *StartContext) Expr() IExprContext {
- var t antlr.RuleContext;
+ var t antlr.RuleContext
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(IExprContext); ok {
- t = ctx.(antlr.RuleContext);
+ t = ctx.(antlr.RuleContext)
break
}
}
@@ -353,7 +342,6 @@ func (s *StartContext) ToStringTree(ruleNames []string, recog antlr.Recognizer)
return antlr.TreesStringTree(s, ruleNames, recog)
}
-
func (s *StartContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterStart(s)
@@ -376,46 +364,45 @@ func (s *StartContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
}
}
+func (p *CELParser) Start() (localctx IStartContext) {
+ this := p
+ _ = this
-
-
-func (p *CELParser) Start_() (localctx IStartContext) {
localctx = NewStartContext(p, p.GetParserRuleContext(), p.GetState())
p.EnterRule(localctx, 0, CELParserRULE_start)
+
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
p.EnterOuterAlt(localctx, 1)
{
p.SetState(32)
var _x = p.Expr()
-
localctx.(*StartContext).e = _x
}
{
p.SetState(33)
p.Match(CELParserEOF)
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
-
-
-errorExit:
- if p.HasError() {
- v := p.GetError()
- localctx.SetException(v)
- p.GetErrorHandler().ReportError(p, v)
- p.GetErrorHandler().Recover(p, v)
- p.SetError(nil)
- }
- p.ExitRule()
return localctx
- goto errorExit // Trick to prevent compiler error if the label is not used
}
-
// IExprContext is an interface to support dynamic dispatch.
type IExprContext interface {
antlr.ParserRuleContext
@@ -424,12 +411,10 @@ type IExprContext interface {
GetParser() antlr.Parser
// GetOp returns the op token.
- GetOp() antlr.Token
-
+ GetOp() antlr.Token
// SetOp sets the op token.
- SetOp(antlr.Token)
-
+ SetOp(antlr.Token)
// GetE returns the e rule contexts.
GetE() IConditionalOrContext
@@ -440,7 +425,6 @@ type IExprContext interface {
// GetE2 returns the e2 rule contexts.
GetE2() IExprContext
-
// SetE sets the e rule contexts.
SetE(IConditionalOrContext)
@@ -450,7 +434,6 @@ type IExprContext interface {
// SetE2 sets the e2 rule contexts.
SetE2(IExprContext)
-
// Getter signatures
AllConditionalOr() []IConditionalOrContext
ConditionalOr(i int) IConditionalOrContext
@@ -463,32 +446,27 @@ type IExprContext interface {
}
type ExprContext struct {
- antlr.BaseParserRuleContext
+ *antlr.BaseParserRuleContext
parser antlr.Parser
- e IConditionalOrContext
- op antlr.Token
- e1 IConditionalOrContext
- e2 IExprContext
+ e IConditionalOrContext
+ op antlr.Token
+ e1 IConditionalOrContext
+ e2 IExprContext
}
func NewEmptyExprContext() *ExprContext {
var p = new(ExprContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
p.RuleIndex = CELParserRULE_expr
return p
}
-func InitEmptyExprContext(p *ExprContext) {
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
- p.RuleIndex = CELParserRULE_expr
-}
-
func (*ExprContext) IsExprContext() {}
func NewExprContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ExprContext {
var p = new(ExprContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
p.parser = parser
p.RuleIndex = CELParserRULE_expr
@@ -500,24 +478,20 @@ func (s *ExprContext) GetParser() antlr.Parser { return s.parser }
func (s *ExprContext) GetOp() antlr.Token { return s.op }
-
func (s *ExprContext) SetOp(v antlr.Token) { s.op = v }
-
func (s *ExprContext) GetE() IConditionalOrContext { return s.e }
func (s *ExprContext) GetE1() IConditionalOrContext { return s.e1 }
func (s *ExprContext) GetE2() IExprContext { return s.e2 }
-
func (s *ExprContext) SetE(v IConditionalOrContext) { s.e = v }
func (s *ExprContext) SetE1(v IConditionalOrContext) { s.e1 = v }
func (s *ExprContext) SetE2(v IExprContext) { s.e2 = v }
-
func (s *ExprContext) AllConditionalOr() []IConditionalOrContext {
children := s.GetChildren()
len := 0
@@ -540,12 +514,12 @@ func (s *ExprContext) AllConditionalOr() []IConditionalOrContext {
}
func (s *ExprContext) ConditionalOr(i int) IConditionalOrContext {
- var t antlr.RuleContext;
+ var t antlr.RuleContext
j := 0
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(IConditionalOrContext); ok {
if j == i {
- t = ctx.(antlr.RuleContext);
+ t = ctx.(antlr.RuleContext)
break
}
j++
@@ -568,10 +542,10 @@ func (s *ExprContext) QUESTIONMARK() antlr.TerminalNode {
}
func (s *ExprContext) Expr() IExprContext {
- var t antlr.RuleContext;
+ var t antlr.RuleContext
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(IExprContext); ok {
- t = ctx.(antlr.RuleContext);
+ t = ctx.(antlr.RuleContext)
break
}
}
@@ -591,7 +565,6 @@ func (s *ExprContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) s
return antlr.TreesStringTree(s, ruleNames, recog)
}
-
func (s *ExprContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterExpr(s)
@@ -614,31 +587,42 @@ func (s *ExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
}
}
-
-
-
func (p *CELParser) Expr() (localctx IExprContext) {
+ this := p
+ _ = this
+
localctx = NewExprContext(p, p.GetParserRuleContext(), p.GetState())
p.EnterRule(localctx, 2, CELParserRULE_expr)
var _la int
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
p.EnterOuterAlt(localctx, 1)
{
p.SetState(35)
var _x = p.ConditionalOr()
-
localctx.(*ExprContext).e = _x
}
p.SetState(41)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
_la = p.GetTokenStream().LA(1)
-
if _la == CELParserQUESTIONMARK {
{
p.SetState(36)
@@ -646,54 +630,31 @@ func (p *CELParser) Expr() (localctx IExprContext) {
var _m = p.Match(CELParserQUESTIONMARK)
localctx.(*ExprContext).op = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
{
p.SetState(37)
var _x = p.ConditionalOr()
-
localctx.(*ExprContext).e1 = _x
}
{
p.SetState(38)
p.Match(CELParserCOLON)
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
{
p.SetState(39)
var _x = p.Expr()
-
localctx.(*ExprContext).e2 = _x
}
}
-
-
-errorExit:
- if p.HasError() {
- v := p.GetError()
- localctx.SetException(v)
- p.GetErrorHandler().ReportError(p, v)
- p.GetErrorHandler().Recover(p, v)
- p.SetError(nil)
- }
- p.ExitRule()
return localctx
- goto errorExit // Trick to prevent compiler error if the label is not used
}
-
// IConditionalOrContext is an interface to support dynamic dispatch.
type IConditionalOrContext interface {
antlr.ParserRuleContext
@@ -702,42 +663,34 @@ type IConditionalOrContext interface {
GetParser() antlr.Parser
// GetS9 returns the s9 token.
- GetS9() antlr.Token
-
+ GetS9() antlr.Token
// SetS9 sets the s9 token.
- SetS9(antlr.Token)
-
+ SetS9(antlr.Token)
// GetOps returns the ops token list.
GetOps() []antlr.Token
-
// SetOps sets the ops token list.
SetOps([]antlr.Token)
-
// GetE returns the e rule contexts.
GetE() IConditionalAndContext
// Get_conditionalAnd returns the _conditionalAnd rule contexts.
Get_conditionalAnd() IConditionalAndContext
-
// SetE sets the e rule contexts.
SetE(IConditionalAndContext)
// Set_conditionalAnd sets the _conditionalAnd rule contexts.
Set_conditionalAnd(IConditionalAndContext)
-
// GetE1 returns the e1 rule context list.
GetE1() []IConditionalAndContext
-
// SetE1 sets the e1 rule context list.
- SetE1([]IConditionalAndContext)
-
+ SetE1([]IConditionalAndContext)
// Getter signatures
AllConditionalAnd() []IConditionalAndContext
@@ -750,33 +703,28 @@ type IConditionalOrContext interface {
}
type ConditionalOrContext struct {
- antlr.BaseParserRuleContext
- parser antlr.Parser
- e IConditionalAndContext
- s9 antlr.Token
- ops []antlr.Token
- _conditionalAnd IConditionalAndContext
- e1 []IConditionalAndContext
+ *antlr.BaseParserRuleContext
+ parser antlr.Parser
+ e IConditionalAndContext
+ s9 antlr.Token
+ ops []antlr.Token
+ _conditionalAnd IConditionalAndContext
+ e1 []IConditionalAndContext
}
func NewEmptyConditionalOrContext() *ConditionalOrContext {
var p = new(ConditionalOrContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
p.RuleIndex = CELParserRULE_conditionalOr
return p
}
-func InitEmptyConditionalOrContext(p *ConditionalOrContext) {
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
- p.RuleIndex = CELParserRULE_conditionalOr
-}
-
func (*ConditionalOrContext) IsConditionalOrContext() {}
func NewConditionalOrContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ConditionalOrContext {
var p = new(ConditionalOrContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
p.parser = parser
p.RuleIndex = CELParserRULE_conditionalOr
@@ -788,32 +736,24 @@ func (s *ConditionalOrContext) GetParser() antlr.Parser { return s.parser }
func (s *ConditionalOrContext) GetS9() antlr.Token { return s.s9 }
-
func (s *ConditionalOrContext) SetS9(v antlr.Token) { s.s9 = v }
-
func (s *ConditionalOrContext) GetOps() []antlr.Token { return s.ops }
-
func (s *ConditionalOrContext) SetOps(v []antlr.Token) { s.ops = v }
-
func (s *ConditionalOrContext) GetE() IConditionalAndContext { return s.e }
func (s *ConditionalOrContext) Get_conditionalAnd() IConditionalAndContext { return s._conditionalAnd }
-
func (s *ConditionalOrContext) SetE(v IConditionalAndContext) { s.e = v }
func (s *ConditionalOrContext) Set_conditionalAnd(v IConditionalAndContext) { s._conditionalAnd = v }
-
func (s *ConditionalOrContext) GetE1() []IConditionalAndContext { return s.e1 }
-
func (s *ConditionalOrContext) SetE1(v []IConditionalAndContext) { s.e1 = v }
-
func (s *ConditionalOrContext) AllConditionalAnd() []IConditionalAndContext {
children := s.GetChildren()
len := 0
@@ -836,12 +776,12 @@ func (s *ConditionalOrContext) AllConditionalAnd() []IConditionalAndContext {
}
func (s *ConditionalOrContext) ConditionalAnd(i int) IConditionalAndContext {
- var t antlr.RuleContext;
+ var t antlr.RuleContext
j := 0
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(IConditionalAndContext); ok {
if j == i {
- t = ctx.(antlr.RuleContext);
+ t = ctx.(antlr.RuleContext)
break
}
j++
@@ -871,7 +811,6 @@ func (s *ConditionalOrContext) ToStringTree(ruleNames []string, recog antlr.Reco
return antlr.TreesStringTree(s, ruleNames, recog)
}
-
func (s *ConditionalOrContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterConditionalOr(s)
@@ -894,31 +833,42 @@ func (s *ConditionalOrContext) Accept(visitor antlr.ParseTreeVisitor) interface{
}
}
-
-
-
func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) {
+ this := p
+ _ = this
+
localctx = NewConditionalOrContext(p, p.GetParserRuleContext(), p.GetState())
p.EnterRule(localctx, 4, CELParserRULE_conditionalOr)
var _la int
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
p.EnterOuterAlt(localctx, 1)
{
p.SetState(43)
var _x = p.ConditionalAnd()
-
localctx.(*ConditionalOrContext).e = _x
}
p.SetState(48)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
_la = p.GetTokenStream().LA(1)
-
for _la == CELParserLOGICAL_OR {
{
p.SetState(44)
@@ -926,10 +876,6 @@ func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) {
var _m = p.Match(CELParserLOGICAL_OR)
localctx.(*ConditionalOrContext).s9 = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
localctx.(*ConditionalOrContext).ops = append(localctx.(*ConditionalOrContext).ops, localctx.(*ConditionalOrContext).s9)
{
@@ -937,36 +883,18 @@ func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) {
var _x = p.ConditionalAnd()
-
localctx.(*ConditionalOrContext)._conditionalAnd = _x
}
localctx.(*ConditionalOrContext).e1 = append(localctx.(*ConditionalOrContext).e1, localctx.(*ConditionalOrContext)._conditionalAnd)
-
p.SetState(50)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
_la = p.GetTokenStream().LA(1)
}
-
-
-errorExit:
- if p.HasError() {
- v := p.GetError()
- localctx.SetException(v)
- p.GetErrorHandler().ReportError(p, v)
- p.GetErrorHandler().Recover(p, v)
- p.SetError(nil)
- }
- p.ExitRule()
return localctx
- goto errorExit // Trick to prevent compiler error if the label is not used
}
-
// IConditionalAndContext is an interface to support dynamic dispatch.
type IConditionalAndContext interface {
antlr.ParserRuleContext
@@ -975,42 +903,34 @@ type IConditionalAndContext interface {
GetParser() antlr.Parser
// GetS8 returns the s8 token.
- GetS8() antlr.Token
-
+ GetS8() antlr.Token
// SetS8 sets the s8 token.
- SetS8(antlr.Token)
-
+ SetS8(antlr.Token)
// GetOps returns the ops token list.
GetOps() []antlr.Token
-
// SetOps sets the ops token list.
SetOps([]antlr.Token)
-
// GetE returns the e rule contexts.
GetE() IRelationContext
// Get_relation returns the _relation rule contexts.
Get_relation() IRelationContext
-
// SetE sets the e rule contexts.
SetE(IRelationContext)
// Set_relation sets the _relation rule contexts.
Set_relation(IRelationContext)
-
// GetE1 returns the e1 rule context list.
GetE1() []IRelationContext
-
// SetE1 sets the e1 rule context list.
- SetE1([]IRelationContext)
-
+ SetE1([]IRelationContext)
// Getter signatures
AllRelation() []IRelationContext
@@ -1023,33 +943,28 @@ type IConditionalAndContext interface {
}
type ConditionalAndContext struct {
- antlr.BaseParserRuleContext
- parser antlr.Parser
- e IRelationContext
- s8 antlr.Token
- ops []antlr.Token
- _relation IRelationContext
- e1 []IRelationContext
+ *antlr.BaseParserRuleContext
+ parser antlr.Parser
+ e IRelationContext
+ s8 antlr.Token
+ ops []antlr.Token
+ _relation IRelationContext
+ e1 []IRelationContext
}
func NewEmptyConditionalAndContext() *ConditionalAndContext {
var p = new(ConditionalAndContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
p.RuleIndex = CELParserRULE_conditionalAnd
return p
}
-func InitEmptyConditionalAndContext(p *ConditionalAndContext) {
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
- p.RuleIndex = CELParserRULE_conditionalAnd
-}
-
func (*ConditionalAndContext) IsConditionalAndContext() {}
func NewConditionalAndContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ConditionalAndContext {
var p = new(ConditionalAndContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
p.parser = parser
p.RuleIndex = CELParserRULE_conditionalAnd
@@ -1061,32 +976,24 @@ func (s *ConditionalAndContext) GetParser() antlr.Parser { return s.parser }
func (s *ConditionalAndContext) GetS8() antlr.Token { return s.s8 }
-
func (s *ConditionalAndContext) SetS8(v antlr.Token) { s.s8 = v }
-
func (s *ConditionalAndContext) GetOps() []antlr.Token { return s.ops }
-
func (s *ConditionalAndContext) SetOps(v []antlr.Token) { s.ops = v }
-
func (s *ConditionalAndContext) GetE() IRelationContext { return s.e }
func (s *ConditionalAndContext) Get_relation() IRelationContext { return s._relation }
-
func (s *ConditionalAndContext) SetE(v IRelationContext) { s.e = v }
func (s *ConditionalAndContext) Set_relation(v IRelationContext) { s._relation = v }
-
func (s *ConditionalAndContext) GetE1() []IRelationContext { return s.e1 }
-
func (s *ConditionalAndContext) SetE1(v []IRelationContext) { s.e1 = v }
-
func (s *ConditionalAndContext) AllRelation() []IRelationContext {
children := s.GetChildren()
len := 0
@@ -1109,12 +1016,12 @@ func (s *ConditionalAndContext) AllRelation() []IRelationContext {
}
func (s *ConditionalAndContext) Relation(i int) IRelationContext {
- var t antlr.RuleContext;
+ var t antlr.RuleContext
j := 0
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(IRelationContext); ok {
if j == i {
- t = ctx.(antlr.RuleContext);
+ t = ctx.(antlr.RuleContext)
break
}
j++
@@ -1144,7 +1051,6 @@ func (s *ConditionalAndContext) ToStringTree(ruleNames []string, recog antlr.Rec
return antlr.TreesStringTree(s, ruleNames, recog)
}
-
func (s *ConditionalAndContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterConditionalAnd(s)
@@ -1167,14 +1073,30 @@ func (s *ConditionalAndContext) Accept(visitor antlr.ParseTreeVisitor) interface
}
}
-
-
-
func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) {
+ this := p
+ _ = this
+
localctx = NewConditionalAndContext(p, p.GetParserRuleContext(), p.GetState())
p.EnterRule(localctx, 6, CELParserRULE_conditionalAnd)
var _la int
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
p.EnterOuterAlt(localctx, 1)
{
p.SetState(51)
@@ -1185,12 +1107,8 @@ func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) {
}
p.SetState(56)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
_la = p.GetTokenStream().LA(1)
-
for _la == CELParserLOGICAL_AND {
{
p.SetState(52)
@@ -1198,10 +1116,6 @@ func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) {
var _m = p.Match(CELParserLOGICAL_AND)
localctx.(*ConditionalAndContext).s8 = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
localctx.(*ConditionalAndContext).ops = append(localctx.(*ConditionalAndContext).ops, localctx.(*ConditionalAndContext).s8)
{
@@ -1213,31 +1127,14 @@ func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) {
}
localctx.(*ConditionalAndContext).e1 = append(localctx.(*ConditionalAndContext).e1, localctx.(*ConditionalAndContext)._relation)
-
p.SetState(58)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
_la = p.GetTokenStream().LA(1)
}
-
-
-errorExit:
- if p.HasError() {
- v := p.GetError()
- localctx.SetException(v)
- p.GetErrorHandler().ReportError(p, v)
- p.GetErrorHandler().Recover(p, v)
- p.SetError(nil)
- }
- p.ExitRule()
return localctx
- goto errorExit // Trick to prevent compiler error if the label is not used
}
-
// IRelationContext is an interface to support dynamic dispatch.
type IRelationContext interface {
antlr.ParserRuleContext
@@ -1246,12 +1143,10 @@ type IRelationContext interface {
GetParser() antlr.Parser
// GetOp returns the op token.
- GetOp() antlr.Token
-
+ GetOp() antlr.Token
// SetOp sets the op token.
- SetOp(antlr.Token)
-
+ SetOp(antlr.Token)
// Getter signatures
Calc() ICalcContext
@@ -1270,29 +1165,24 @@ type IRelationContext interface {
}
type RelationContext struct {
- antlr.BaseParserRuleContext
+ *antlr.BaseParserRuleContext
parser antlr.Parser
- op antlr.Token
+ op antlr.Token
}
func NewEmptyRelationContext() *RelationContext {
var p = new(RelationContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
p.RuleIndex = CELParserRULE_relation
return p
}
-func InitEmptyRelationContext(p *RelationContext) {
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
- p.RuleIndex = CELParserRULE_relation
-}
-
func (*RelationContext) IsRelationContext() {}
func NewRelationContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *RelationContext {
var p = new(RelationContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
p.parser = parser
p.RuleIndex = CELParserRULE_relation
@@ -1304,15 +1194,13 @@ func (s *RelationContext) GetParser() antlr.Parser { return s.parser }
func (s *RelationContext) GetOp() antlr.Token { return s.op }
-
func (s *RelationContext) SetOp(v antlr.Token) { s.op = v }
-
func (s *RelationContext) Calc() ICalcContext {
- var t antlr.RuleContext;
+ var t antlr.RuleContext
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(ICalcContext); ok {
- t = ctx.(antlr.RuleContext);
+ t = ctx.(antlr.RuleContext)
break
}
}
@@ -1346,12 +1234,12 @@ func (s *RelationContext) AllRelation() []IRelationContext {
}
func (s *RelationContext) Relation(i int) IRelationContext {
- var t antlr.RuleContext;
+ var t antlr.RuleContext
j := 0
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(IRelationContext); ok {
if j == i {
- t = ctx.(antlr.RuleContext);
+ t = ctx.(antlr.RuleContext)
break
}
j++
@@ -1401,7 +1289,6 @@ func (s *RelationContext) ToStringTree(ruleNames []string, recog antlr.Recognize
return antlr.TreesStringTree(s, ruleNames, recog)
}
-
func (s *RelationContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterRelation(s)
@@ -1424,17 +1311,15 @@ func (s *RelationContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
}
}
-
-
-
-
func (p *CELParser) Relation() (localctx IRelationContext) {
return p.relation(0)
}
func (p *CELParser) relation(_p int) (localctx IRelationContext) {
- var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext()
+ this := p
+ _ = this
+ var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext()
_parentState := p.GetState()
localctx = NewRelationContext(p, p.GetParserRuleContext(), _parentState)
var _prevctx IRelationContext = localctx
@@ -1443,6 +1328,22 @@ func (p *CELParser) relation(_p int) (localctx IRelationContext) {
p.EnterRecursionRule(localctx, 8, CELParserRULE_relation, _p)
var _la int
+ defer func() {
+ p.UnrollRecursionContexts(_parentctx)
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
var _alt int
p.EnterOuterAlt(localctx, 1)
@@ -1454,13 +1355,8 @@ func (p *CELParser) relation(_p int) (localctx IRelationContext) {
p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1))
p.SetState(67)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
- _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 3, p.GetParserRuleContext())
- if p.HasError() {
- goto errorExit
- }
+ _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 3, p.GetParserRuleContext())
+
for _alt != 2 && _alt != antlr.ATNInvalidAltNumber {
if _alt == 1 {
if p.GetParseListeners() != nil {
@@ -1472,8 +1368,7 @@ func (p *CELParser) relation(_p int) (localctx IRelationContext) {
p.SetState(62)
if !(p.Precpred(p.GetParserRuleContext(), 1)) {
- p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", ""))
- goto errorExit
+ panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", ""))
}
{
p.SetState(63)
@@ -1484,7 +1379,7 @@ func (p *CELParser) relation(_p int) (localctx IRelationContext) {
_la = p.GetTokenStream().LA(1)
- if !(((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 254) != 0)) {
+ if !((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&254) != 0) {
var _ri = p.GetErrorHandler().RecoverInline(p)
localctx.(*RelationContext).op = _ri
@@ -1498,35 +1393,15 @@ func (p *CELParser) relation(_p int) (localctx IRelationContext) {
p.relation(2)
}
-
}
p.SetState(69)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
- _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 3, p.GetParserRuleContext())
- if p.HasError() {
- goto errorExit
- }
+ _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 3, p.GetParserRuleContext())
}
-
-
- errorExit:
- if p.HasError() {
- v := p.GetError()
- localctx.SetException(v)
- p.GetErrorHandler().ReportError(p, v)
- p.GetErrorHandler().Recover(p, v)
- p.SetError(nil)
- }
- p.UnrollRecursionContexts(_parentctx)
return localctx
- goto errorExit // Trick to prevent compiler error if the label is not used
}
-
// ICalcContext is an interface to support dynamic dispatch.
type ICalcContext interface {
antlr.ParserRuleContext
@@ -1535,12 +1410,10 @@ type ICalcContext interface {
GetParser() antlr.Parser
// GetOp returns the op token.
- GetOp() antlr.Token
-
+ GetOp() antlr.Token
// SetOp sets the op token.
- SetOp(antlr.Token)
-
+ SetOp(antlr.Token)
// Getter signatures
Unary() IUnaryContext
@@ -1557,29 +1430,24 @@ type ICalcContext interface {
}
type CalcContext struct {
- antlr.BaseParserRuleContext
+ *antlr.BaseParserRuleContext
parser antlr.Parser
- op antlr.Token
+ op antlr.Token
}
func NewEmptyCalcContext() *CalcContext {
var p = new(CalcContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
p.RuleIndex = CELParserRULE_calc
return p
}
-func InitEmptyCalcContext(p *CalcContext) {
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
- p.RuleIndex = CELParserRULE_calc
-}
-
func (*CalcContext) IsCalcContext() {}
func NewCalcContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *CalcContext {
var p = new(CalcContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
p.parser = parser
p.RuleIndex = CELParserRULE_calc
@@ -1591,15 +1459,13 @@ func (s *CalcContext) GetParser() antlr.Parser { return s.parser }
func (s *CalcContext) GetOp() antlr.Token { return s.op }
-
func (s *CalcContext) SetOp(v antlr.Token) { s.op = v }
-
func (s *CalcContext) Unary() IUnaryContext {
- var t antlr.RuleContext;
+ var t antlr.RuleContext
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(IUnaryContext); ok {
- t = ctx.(antlr.RuleContext);
+ t = ctx.(antlr.RuleContext)
break
}
}
@@ -1633,12 +1499,12 @@ func (s *CalcContext) AllCalc() []ICalcContext {
}
func (s *CalcContext) Calc(i int) ICalcContext {
- var t antlr.RuleContext;
+ var t antlr.RuleContext
j := 0
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(ICalcContext); ok {
if j == i {
- t = ctx.(antlr.RuleContext);
+ t = ctx.(antlr.RuleContext)
break
}
j++
@@ -1680,7 +1546,6 @@ func (s *CalcContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) s
return antlr.TreesStringTree(s, ruleNames, recog)
}
-
func (s *CalcContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterCalc(s)
@@ -1703,17 +1568,15 @@ func (s *CalcContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
}
}
-
-
-
-
func (p *CELParser) Calc() (localctx ICalcContext) {
return p.calc(0)
}
func (p *CELParser) calc(_p int) (localctx ICalcContext) {
- var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext()
+ this := p
+ _ = this
+ var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext()
_parentState := p.GetState()
localctx = NewCalcContext(p, p.GetParserRuleContext(), _parentState)
var _prevctx ICalcContext = localctx
@@ -1722,6 +1585,22 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) {
p.EnterRecursionRule(localctx, 10, CELParserRULE_calc, _p)
var _la int
+ defer func() {
+ p.UnrollRecursionContexts(_parentctx)
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
var _alt int
p.EnterOuterAlt(localctx, 1)
@@ -1733,13 +1612,8 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) {
p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1))
p.SetState(81)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
- _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 5, p.GetParserRuleContext())
- if p.HasError() {
- goto errorExit
- }
+ _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 5, p.GetParserRuleContext())
+
for _alt != 2 && _alt != antlr.ATNInvalidAltNumber {
if _alt == 1 {
if p.GetParseListeners() != nil {
@@ -1748,19 +1622,14 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) {
_prevctx = localctx
p.SetState(79)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
-
- switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 4, p.GetParserRuleContext()) {
+ switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 4, p.GetParserRuleContext()) {
case 1:
localctx = NewCalcContext(p, _parentctx, _parentState)
p.PushNewRecursionContext(localctx, _startState, CELParserRULE_calc)
p.SetState(73)
if !(p.Precpred(p.GetParserRuleContext(), 2)) {
- p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 2)", ""))
- goto errorExit
+ panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 2)", ""))
}
{
p.SetState(74)
@@ -1771,7 +1640,7 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) {
_la = p.GetTokenStream().LA(1)
- if !(((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 58720256) != 0)) {
+ if !((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&58720256) != 0) {
var _ri = p.GetErrorHandler().RecoverInline(p)
localctx.(*CalcContext).op = _ri
@@ -1785,15 +1654,13 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) {
p.calc(3)
}
-
case 2:
localctx = NewCalcContext(p, _parentctx, _parentState)
p.PushNewRecursionContext(localctx, _startState, CELParserRULE_calc)
p.SetState(76)
if !(p.Precpred(p.GetParserRuleContext(), 1)) {
- p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", ""))
- goto errorExit
+ panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", ""))
}
{
p.SetState(77)
@@ -1818,38 +1685,17 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) {
p.calc(2)
}
- case antlr.ATNInvalidAltNumber:
- goto errorExit
}
}
p.SetState(83)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
- _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 5, p.GetParserRuleContext())
- if p.HasError() {
- goto errorExit
- }
+ _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 5, p.GetParserRuleContext())
}
-
-
- errorExit:
- if p.HasError() {
- v := p.GetError()
- localctx.SetException(v)
- p.GetErrorHandler().ReportError(p, v)
- p.GetErrorHandler().Recover(p, v)
- p.SetError(nil)
- }
- p.UnrollRecursionContexts(_parentctx)
return localctx
- goto errorExit // Trick to prevent compiler error if the label is not used
}
-
// IUnaryContext is an interface to support dynamic dispatch.
type IUnaryContext interface {
antlr.ParserRuleContext
@@ -1861,28 +1707,23 @@ type IUnaryContext interface {
}
type UnaryContext struct {
- antlr.BaseParserRuleContext
+ *antlr.BaseParserRuleContext
parser antlr.Parser
}
func NewEmptyUnaryContext() *UnaryContext {
var p = new(UnaryContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
p.RuleIndex = CELParserRULE_unary
return p
}
-func InitEmptyUnaryContext(p *UnaryContext) {
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
- p.RuleIndex = CELParserRULE_unary
-}
-
func (*UnaryContext) IsUnaryContext() {}
func NewUnaryContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *UnaryContext {
var p = new(UnaryContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
p.parser = parser
p.RuleIndex = CELParserRULE_unary
@@ -1892,8 +1733,8 @@ func NewUnaryContext(parser antlr.Parser, parent antlr.ParserRuleContext, invoki
func (s *UnaryContext) GetParser() antlr.Parser { return s.parser }
-func (s *UnaryContext) CopyAll(ctx *UnaryContext) {
- s.CopyFrom(&ctx.BaseParserRuleContext)
+func (s *UnaryContext) CopyFrom(ctx *UnaryContext) {
+ s.BaseParserRuleContext.CopyFrom(ctx.BaseParserRuleContext)
}
func (s *UnaryContext) GetRuleContext() antlr.RuleContext {
@@ -1904,11 +1745,8 @@ func (s *UnaryContext) ToStringTree(ruleNames []string, recog antlr.Recognizer)
return antlr.TreesStringTree(s, ruleNames, recog)
}
-
-
-
type LogicalNotContext struct {
- UnaryContext
+ *UnaryContext
s19 antlr.Token
ops []antlr.Token
}
@@ -1916,23 +1754,19 @@ type LogicalNotContext struct {
func NewLogicalNotContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *LogicalNotContext {
var p = new(LogicalNotContext)
- InitEmptyUnaryContext(&p.UnaryContext)
+ p.UnaryContext = NewEmptyUnaryContext()
p.parser = parser
- p.CopyAll(ctx.(*UnaryContext))
+ p.CopyFrom(ctx.(*UnaryContext))
return p
}
-
func (s *LogicalNotContext) GetS19() antlr.Token { return s.s19 }
-
func (s *LogicalNotContext) SetS19(v antlr.Token) { s.s19 = v }
-
func (s *LogicalNotContext) GetOps() []antlr.Token { return s.ops }
-
func (s *LogicalNotContext) SetOps(v []antlr.Token) { s.ops = v }
func (s *LogicalNotContext) GetRuleContext() antlr.RuleContext {
@@ -1940,10 +1774,10 @@ func (s *LogicalNotContext) GetRuleContext() antlr.RuleContext {
}
func (s *LogicalNotContext) Member() IMemberContext {
- var t antlr.RuleContext;
+ var t antlr.RuleContext
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(IMemberContext); ok {
- t = ctx.(antlr.RuleContext);
+ t = ctx.(antlr.RuleContext)
break
}
}
@@ -1963,7 +1797,6 @@ func (s *LogicalNotContext) EXCLAM(i int) antlr.TerminalNode {
return s.GetToken(CELParserEXCLAM, i)
}
-
func (s *LogicalNotContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterLogicalNot(s)
@@ -1986,17 +1819,16 @@ func (s *LogicalNotContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
}
}
-
type MemberExprContext struct {
- UnaryContext
+ *UnaryContext
}
func NewMemberExprContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *MemberExprContext {
var p = new(MemberExprContext)
- InitEmptyUnaryContext(&p.UnaryContext)
+ p.UnaryContext = NewEmptyUnaryContext()
p.parser = parser
- p.CopyAll(ctx.(*UnaryContext))
+ p.CopyFrom(ctx.(*UnaryContext))
return p
}
@@ -2006,10 +1838,10 @@ func (s *MemberExprContext) GetRuleContext() antlr.RuleContext {
}
func (s *MemberExprContext) Member() IMemberContext {
- var t antlr.RuleContext;
+ var t antlr.RuleContext
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(IMemberContext); ok {
- t = ctx.(antlr.RuleContext);
+ t = ctx.(antlr.RuleContext)
break
}
}
@@ -2021,7 +1853,6 @@ func (s *MemberExprContext) Member() IMemberContext {
return t.(IMemberContext)
}
-
func (s *MemberExprContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterMemberExpr(s)
@@ -2044,9 +1875,8 @@ func (s *MemberExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
}
}
-
type NegateContext struct {
- UnaryContext
+ *UnaryContext
s18 antlr.Token
ops []antlr.Token
}
@@ -2054,23 +1884,19 @@ type NegateContext struct {
func NewNegateContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *NegateContext {
var p = new(NegateContext)
- InitEmptyUnaryContext(&p.UnaryContext)
+ p.UnaryContext = NewEmptyUnaryContext()
p.parser = parser
- p.CopyAll(ctx.(*UnaryContext))
+ p.CopyFrom(ctx.(*UnaryContext))
return p
}
-
func (s *NegateContext) GetS18() antlr.Token { return s.s18 }
-
func (s *NegateContext) SetS18(v antlr.Token) { s.s18 = v }
-
func (s *NegateContext) GetOps() []antlr.Token { return s.ops }
-
func (s *NegateContext) SetOps(v []antlr.Token) { s.ops = v }
func (s *NegateContext) GetRuleContext() antlr.RuleContext {
@@ -2078,10 +1904,10 @@ func (s *NegateContext) GetRuleContext() antlr.RuleContext {
}
func (s *NegateContext) Member() IMemberContext {
- var t antlr.RuleContext;
+ var t antlr.RuleContext
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(IMemberContext); ok {
- t = ctx.(antlr.RuleContext);
+ t = ctx.(antlr.RuleContext)
break
}
}
@@ -2101,7 +1927,6 @@ func (s *NegateContext) MINUS(i int) antlr.TerminalNode {
return s.GetToken(CELParserMINUS, i)
}
-
func (s *NegateContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterNegate(s)
@@ -2124,22 +1949,35 @@ func (s *NegateContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
}
}
-
-
func (p *CELParser) Unary() (localctx IUnaryContext) {
+ this := p
+ _ = this
+
localctx = NewUnaryContext(p, p.GetParserRuleContext(), p.GetState())
p.EnterRule(localctx, 12, CELParserRULE_unary)
var _la int
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
var _alt int
p.SetState(97)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
-
- switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 8, p.GetParserRuleContext()) {
+ switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 8, p.GetParserRuleContext()) {
case 1:
localctx = NewMemberExprContext(p, localctx)
p.EnterOuterAlt(localctx, 1)
@@ -2148,18 +1986,13 @@ func (p *CELParser) Unary() (localctx IUnaryContext) {
p.member(0)
}
-
case 2:
localctx = NewLogicalNotContext(p, localctx)
p.EnterOuterAlt(localctx, 2)
p.SetState(86)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
_la = p.GetTokenStream().LA(1)
-
for ok := true; ok; ok = _la == CELParserEXCLAM {
{
p.SetState(85)
@@ -2167,19 +2000,11 @@ func (p *CELParser) Unary() (localctx IUnaryContext) {
var _m = p.Match(CELParserEXCLAM)
localctx.(*LogicalNotContext).s19 = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
localctx.(*LogicalNotContext).ops = append(localctx.(*LogicalNotContext).ops, localctx.(*LogicalNotContext).s19)
-
p.SetState(88)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
_la = p.GetTokenStream().LA(1)
}
{
@@ -2187,71 +2012,42 @@ func (p *CELParser) Unary() (localctx IUnaryContext) {
p.member(0)
}
-
case 3:
localctx = NewNegateContext(p, localctx)
p.EnterOuterAlt(localctx, 3)
p.SetState(92)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
_alt = 1
for ok := true; ok; ok = _alt != 2 && _alt != antlr.ATNInvalidAltNumber {
switch _alt {
case 1:
- {
- p.SetState(91)
-
- var _m = p.Match(CELParserMINUS)
-
- localctx.(*NegateContext).s18 = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
- }
- localctx.(*NegateContext).ops = append(localctx.(*NegateContext).ops, localctx.(*NegateContext).s18)
-
+ {
+ p.SetState(91)
+ var _m = p.Match(CELParserMINUS)
+ localctx.(*NegateContext).s18 = _m
+ }
+ localctx.(*NegateContext).ops = append(localctx.(*NegateContext).ops, localctx.(*NegateContext).s18)
default:
- p.SetError(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil))
- goto errorExit
+ panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil))
}
p.SetState(94)
p.GetErrorHandler().Sync(p)
- _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 7, p.GetParserRuleContext())
- if p.HasError() {
- goto errorExit
- }
+ _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 7, p.GetParserRuleContext())
}
{
p.SetState(96)
p.member(0)
}
- case antlr.ATNInvalidAltNumber:
- goto errorExit
}
-
-errorExit:
- if p.HasError() {
- v := p.GetError()
- localctx.SetException(v)
- p.GetErrorHandler().ReportError(p, v)
- p.GetErrorHandler().Recover(p, v)
- p.SetError(nil)
- }
- p.ExitRule()
return localctx
- goto errorExit // Trick to prevent compiler error if the label is not used
}
-
// IMemberContext is an interface to support dynamic dispatch.
type IMemberContext interface {
antlr.ParserRuleContext
@@ -2263,28 +2059,23 @@ type IMemberContext interface {
}
type MemberContext struct {
- antlr.BaseParserRuleContext
+ *antlr.BaseParserRuleContext
parser antlr.Parser
}
func NewEmptyMemberContext() *MemberContext {
var p = new(MemberContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
p.RuleIndex = CELParserRULE_member
return p
}
-func InitEmptyMemberContext(p *MemberContext) {
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
- p.RuleIndex = CELParserRULE_member
-}
-
func (*MemberContext) IsMemberContext() {}
func NewMemberContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *MemberContext {
var p = new(MemberContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
p.parser = parser
p.RuleIndex = CELParserRULE_member
@@ -2294,8 +2085,8 @@ func NewMemberContext(parser antlr.Parser, parent antlr.ParserRuleContext, invok
func (s *MemberContext) GetParser() antlr.Parser { return s.parser }
-func (s *MemberContext) CopyAll(ctx *MemberContext) {
- s.CopyFrom(&ctx.BaseParserRuleContext)
+func (s *MemberContext) CopyFrom(ctx *MemberContext) {
+ s.BaseParserRuleContext.CopyFrom(ctx.BaseParserRuleContext)
}
func (s *MemberContext) GetRuleContext() antlr.RuleContext {
@@ -2306,46 +2097,38 @@ func (s *MemberContext) ToStringTree(ruleNames []string, recog antlr.Recognizer)
return antlr.TreesStringTree(s, ruleNames, recog)
}
-
-
-
-
type MemberCallContext struct {
- MemberContext
- op antlr.Token
- id antlr.Token
+ *MemberContext
+ op antlr.Token
+ id antlr.Token
open antlr.Token
- args IExprListContext
+ args IExprListContext
}
func NewMemberCallContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *MemberCallContext {
var p = new(MemberCallContext)
- InitEmptyMemberContext(&p.MemberContext)
+ p.MemberContext = NewEmptyMemberContext()
p.parser = parser
- p.CopyAll(ctx.(*MemberContext))
+ p.CopyFrom(ctx.(*MemberContext))
return p
}
-
func (s *MemberCallContext) GetOp() antlr.Token { return s.op }
func (s *MemberCallContext) GetId() antlr.Token { return s.id }
func (s *MemberCallContext) GetOpen() antlr.Token { return s.open }
-
func (s *MemberCallContext) SetOp(v antlr.Token) { s.op = v }
func (s *MemberCallContext) SetId(v antlr.Token) { s.id = v }
func (s *MemberCallContext) SetOpen(v antlr.Token) { s.open = v }
-
func (s *MemberCallContext) GetArgs() IExprListContext { return s.args }
-
func (s *MemberCallContext) SetArgs(v IExprListContext) { s.args = v }
func (s *MemberCallContext) GetRuleContext() antlr.RuleContext {
@@ -2353,10 +2136,10 @@ func (s *MemberCallContext) GetRuleContext() antlr.RuleContext {
}
func (s *MemberCallContext) Member() IMemberContext {
- var t antlr.RuleContext;
+ var t antlr.RuleContext
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(IMemberContext); ok {
- t = ctx.(antlr.RuleContext);
+ t = ctx.(antlr.RuleContext)
break
}
}
@@ -2385,10 +2168,10 @@ func (s *MemberCallContext) LPAREN() antlr.TerminalNode {
}
func (s *MemberCallContext) ExprList() IExprListContext {
- var t antlr.RuleContext;
+ var t antlr.RuleContext
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(IExprListContext); ok {
- t = ctx.(antlr.RuleContext);
+ t = ctx.(antlr.RuleContext)
break
}
}
@@ -2400,7 +2183,6 @@ func (s *MemberCallContext) ExprList() IExprListContext {
return t.(IExprListContext)
}
-
func (s *MemberCallContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterMemberCall(s)
@@ -2423,32 +2205,29 @@ func (s *MemberCallContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
}
}
-
type SelectContext struct {
- MemberContext
- op antlr.Token
+ *MemberContext
+ op antlr.Token
opt antlr.Token
- id antlr.Token
+ id antlr.Token
}
func NewSelectContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *SelectContext {
var p = new(SelectContext)
- InitEmptyMemberContext(&p.MemberContext)
+ p.MemberContext = NewEmptyMemberContext()
p.parser = parser
- p.CopyAll(ctx.(*MemberContext))
+ p.CopyFrom(ctx.(*MemberContext))
return p
}
-
func (s *SelectContext) GetOp() antlr.Token { return s.op }
func (s *SelectContext) GetOpt() antlr.Token { return s.opt }
func (s *SelectContext) GetId() antlr.Token { return s.id }
-
func (s *SelectContext) SetOp(v antlr.Token) { s.op = v }
func (s *SelectContext) SetOpt(v antlr.Token) { s.opt = v }
@@ -2460,10 +2239,10 @@ func (s *SelectContext) GetRuleContext() antlr.RuleContext {
}
func (s *SelectContext) Member() IMemberContext {
- var t antlr.RuleContext;
+ var t antlr.RuleContext
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(IMemberContext); ok {
- t = ctx.(antlr.RuleContext);
+ t = ctx.(antlr.RuleContext)
break
}
}
@@ -2487,7 +2266,6 @@ func (s *SelectContext) QUESTIONMARK() antlr.TerminalNode {
return s.GetToken(CELParserQUESTIONMARK, 0)
}
-
func (s *SelectContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterSelect(s)
@@ -2510,17 +2288,16 @@ func (s *SelectContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
}
}
-
type PrimaryExprContext struct {
- MemberContext
+ *MemberContext
}
func NewPrimaryExprContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *PrimaryExprContext {
var p = new(PrimaryExprContext)
- InitEmptyMemberContext(&p.MemberContext)
+ p.MemberContext = NewEmptyMemberContext()
p.parser = parser
- p.CopyAll(ctx.(*MemberContext))
+ p.CopyFrom(ctx.(*MemberContext))
return p
}
@@ -2530,10 +2307,10 @@ func (s *PrimaryExprContext) GetRuleContext() antlr.RuleContext {
}
func (s *PrimaryExprContext) Primary() IPrimaryContext {
- var t antlr.RuleContext;
+ var t antlr.RuleContext
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(IPrimaryContext); ok {
- t = ctx.(antlr.RuleContext);
+ t = ctx.(antlr.RuleContext)
break
}
}
@@ -2545,7 +2322,6 @@ func (s *PrimaryExprContext) Primary() IPrimaryContext {
return t.(IPrimaryContext)
}
-
func (s *PrimaryExprContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterPrimaryExpr(s)
@@ -2568,38 +2344,33 @@ func (s *PrimaryExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{}
}
}
-
type IndexContext struct {
- MemberContext
- op antlr.Token
- opt antlr.Token
- index IExprContext
+ *MemberContext
+ op antlr.Token
+ opt antlr.Token
+ index IExprContext
}
func NewIndexContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *IndexContext {
var p = new(IndexContext)
- InitEmptyMemberContext(&p.MemberContext)
+ p.MemberContext = NewEmptyMemberContext()
p.parser = parser
- p.CopyAll(ctx.(*MemberContext))
+ p.CopyFrom(ctx.(*MemberContext))
return p
}
-
func (s *IndexContext) GetOp() antlr.Token { return s.op }
func (s *IndexContext) GetOpt() antlr.Token { return s.opt }
-
func (s *IndexContext) SetOp(v antlr.Token) { s.op = v }
func (s *IndexContext) SetOpt(v antlr.Token) { s.opt = v }
-
func (s *IndexContext) GetIndex() IExprContext { return s.index }
-
func (s *IndexContext) SetIndex(v IExprContext) { s.index = v }
func (s *IndexContext) GetRuleContext() antlr.RuleContext {
@@ -2607,10 +2378,10 @@ func (s *IndexContext) GetRuleContext() antlr.RuleContext {
}
func (s *IndexContext) Member() IMemberContext {
- var t antlr.RuleContext;
+ var t antlr.RuleContext
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(IMemberContext); ok {
- t = ctx.(antlr.RuleContext);
+ t = ctx.(antlr.RuleContext)
break
}
}
@@ -2631,10 +2402,10 @@ func (s *IndexContext) LBRACKET() antlr.TerminalNode {
}
func (s *IndexContext) Expr() IExprContext {
- var t antlr.RuleContext;
+ var t antlr.RuleContext
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(IExprContext); ok {
- t = ctx.(antlr.RuleContext);
+ t = ctx.(antlr.RuleContext)
break
}
}
@@ -2650,7 +2421,6 @@ func (s *IndexContext) QUESTIONMARK() antlr.TerminalNode {
return s.GetToken(CELParserQUESTIONMARK, 0)
}
-
func (s *IndexContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterIndex(s)
@@ -2673,15 +2443,15 @@ func (s *IndexContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
}
}
-
-
func (p *CELParser) Member() (localctx IMemberContext) {
return p.member(0)
}
func (p *CELParser) member(_p int) (localctx IMemberContext) {
- var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext()
+ this := p
+ _ = this
+ var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext()
_parentState := p.GetState()
localctx = NewMemberContext(p, p.GetParserRuleContext(), _parentState)
var _prevctx IMemberContext = localctx
@@ -2690,6 +2460,22 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) {
p.EnterRecursionRule(localctx, 14, CELParserRULE_member, _p)
var _la int
+ defer func() {
+ p.UnrollRecursionContexts(_parentctx)
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
var _alt int
p.EnterOuterAlt(localctx, 1)
@@ -2705,13 +2491,8 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) {
p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1))
p.SetState(126)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
- _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 13, p.GetParserRuleContext())
- if p.HasError() {
- goto errorExit
- }
+ _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 13, p.GetParserRuleContext())
+
for _alt != 2 && _alt != antlr.ATNInvalidAltNumber {
if _alt == 1 {
if p.GetParseListeners() != nil {
@@ -2720,19 +2501,14 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) {
_prevctx = localctx
p.SetState(124)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
-
- switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 12, p.GetParserRuleContext()) {
+ switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 12, p.GetParserRuleContext()) {
case 1:
localctx = NewSelectContext(p, NewMemberContext(p, _parentctx, _parentState))
p.PushNewRecursionContext(localctx, _startState, CELParserRULE_member)
p.SetState(102)
if !(p.Precpred(p.GetParserRuleContext(), 3)) {
- p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 3)", ""))
- goto errorExit
+ panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 3)", ""))
}
{
p.SetState(103)
@@ -2740,19 +2516,11 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) {
var _m = p.Match(CELParserDOT)
localctx.(*SelectContext).op = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
p.SetState(105)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
_la = p.GetTokenStream().LA(1)
-
if _la == CELParserQUESTIONMARK {
{
p.SetState(104)
@@ -2760,10 +2528,6 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) {
var _m = p.Match(CELParserQUESTIONMARK)
localctx.(*SelectContext).opt = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
}
@@ -2773,21 +2537,15 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) {
var _m = p.Match(CELParserIDENTIFIER)
localctx.(*SelectContext).id = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
-
case 2:
localctx = NewMemberCallContext(p, NewMemberContext(p, _parentctx, _parentState))
p.PushNewRecursionContext(localctx, _startState, CELParserRULE_member)
p.SetState(108)
if !(p.Precpred(p.GetParserRuleContext(), 2)) {
- p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 2)", ""))
- goto errorExit
+ panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 2)", ""))
}
{
p.SetState(109)
@@ -2795,10 +2553,6 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) {
var _m = p.Match(CELParserDOT)
localctx.(*MemberCallContext).op = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
{
p.SetState(110)
@@ -2806,10 +2560,6 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) {
var _m = p.Match(CELParserIDENTIFIER)
localctx.(*MemberCallContext).id = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
{
p.SetState(111)
@@ -2817,26 +2567,17 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) {
var _m = p.Match(CELParserLPAREN)
localctx.(*MemberCallContext).open = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
p.SetState(113)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
_la = p.GetTokenStream().LA(1)
-
- if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135762105344) != 0) {
+ if (int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&135762105344) != 0 {
{
p.SetState(112)
var _x = p.ExprList()
-
localctx.(*MemberCallContext).args = _x
}
@@ -2844,21 +2585,15 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) {
{
p.SetState(115)
p.Match(CELParserRPAREN)
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
-
case 3:
localctx = NewIndexContext(p, NewMemberContext(p, _parentctx, _parentState))
p.PushNewRecursionContext(localctx, _startState, CELParserRULE_member)
p.SetState(116)
if !(p.Precpred(p.GetParserRuleContext(), 1)) {
- p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", ""))
- goto errorExit
+ panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", ""))
}
{
p.SetState(117)
@@ -2866,19 +2601,11 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) {
var _m = p.Match(CELParserLBRACKET)
localctx.(*IndexContext).op = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
p.SetState(119)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
_la = p.GetTokenStream().LA(1)
-
if _la == CELParserQUESTIONMARK {
{
p.SetState(118)
@@ -2886,10 +2613,6 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) {
var _m = p.Match(CELParserQUESTIONMARK)
localctx.(*IndexContext).opt = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
}
@@ -2898,50 +2621,24 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) {
var _x = p.Expr()
-
localctx.(*IndexContext).index = _x
}
{
p.SetState(122)
p.Match(CELParserRPRACKET)
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
- case antlr.ATNInvalidAltNumber:
- goto errorExit
}
}
p.SetState(128)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
- _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 13, p.GetParserRuleContext())
- if p.HasError() {
- goto errorExit
- }
+ _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 13, p.GetParserRuleContext())
}
-
-
- errorExit:
- if p.HasError() {
- v := p.GetError()
- localctx.SetException(v)
- p.GetErrorHandler().ReportError(p, v)
- p.GetErrorHandler().Recover(p, v)
- p.SetError(nil)
- }
- p.UnrollRecursionContexts(_parentctx)
return localctx
- goto errorExit // Trick to prevent compiler error if the label is not used
}
-
// IPrimaryContext is an interface to support dynamic dispatch.
type IPrimaryContext interface {
antlr.ParserRuleContext
@@ -2953,28 +2650,23 @@ type IPrimaryContext interface {
}
type PrimaryContext struct {
- antlr.BaseParserRuleContext
+ *antlr.BaseParserRuleContext
parser antlr.Parser
}
func NewEmptyPrimaryContext() *PrimaryContext {
var p = new(PrimaryContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
p.RuleIndex = CELParserRULE_primary
return p
}
-func InitEmptyPrimaryContext(p *PrimaryContext) {
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
- p.RuleIndex = CELParserRULE_primary
-}
-
func (*PrimaryContext) IsPrimaryContext() {}
func NewPrimaryContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *PrimaryContext {
var p = new(PrimaryContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
p.parser = parser
p.RuleIndex = CELParserRULE_primary
@@ -2984,8 +2676,8 @@ func NewPrimaryContext(parser antlr.Parser, parent antlr.ParserRuleContext, invo
func (s *PrimaryContext) GetParser() antlr.Parser { return s.parser }
-func (s *PrimaryContext) CopyAll(ctx *PrimaryContext) {
- s.CopyFrom(&ctx.BaseParserRuleContext)
+func (s *PrimaryContext) CopyFrom(ctx *PrimaryContext) {
+ s.BaseParserRuleContext.CopyFrom(ctx.BaseParserRuleContext)
}
func (s *PrimaryContext) GetRuleContext() antlr.RuleContext {
@@ -2996,35 +2688,28 @@ func (s *PrimaryContext) ToStringTree(ruleNames []string, recog antlr.Recognizer
return antlr.TreesStringTree(s, ruleNames, recog)
}
-
-
-
type CreateListContext struct {
- PrimaryContext
- op antlr.Token
- elems IListInitContext
+ *PrimaryContext
+ op antlr.Token
+ elems IListInitContext
}
func NewCreateListContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *CreateListContext {
var p = new(CreateListContext)
- InitEmptyPrimaryContext(&p.PrimaryContext)
+ p.PrimaryContext = NewEmptyPrimaryContext()
p.parser = parser
- p.CopyAll(ctx.(*PrimaryContext))
+ p.CopyFrom(ctx.(*PrimaryContext))
return p
}
-
func (s *CreateListContext) GetOp() antlr.Token { return s.op }
-
func (s *CreateListContext) SetOp(v antlr.Token) { s.op = v }
-
func (s *CreateListContext) GetElems() IListInitContext { return s.elems }
-
func (s *CreateListContext) SetElems(v IListInitContext) { s.elems = v }
func (s *CreateListContext) GetRuleContext() antlr.RuleContext {
@@ -3044,10 +2729,10 @@ func (s *CreateListContext) COMMA() antlr.TerminalNode {
}
func (s *CreateListContext) ListInit() IListInitContext {
- var t antlr.RuleContext;
+ var t antlr.RuleContext
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(IListInitContext); ok {
- t = ctx.(antlr.RuleContext);
+ t = ctx.(antlr.RuleContext)
break
}
}
@@ -3059,7 +2744,6 @@ func (s *CreateListContext) ListInit() IListInitContext {
return t.(IListInitContext)
}
-
func (s *CreateListContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterCreateList(s)
@@ -3082,33 +2766,28 @@ func (s *CreateListContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
}
}
-
type CreateStructContext struct {
- PrimaryContext
- op antlr.Token
- entries IMapInitializerListContext
+ *PrimaryContext
+ op antlr.Token
+ entries IMapInitializerListContext
}
func NewCreateStructContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *CreateStructContext {
var p = new(CreateStructContext)
- InitEmptyPrimaryContext(&p.PrimaryContext)
+ p.PrimaryContext = NewEmptyPrimaryContext()
p.parser = parser
- p.CopyAll(ctx.(*PrimaryContext))
+ p.CopyFrom(ctx.(*PrimaryContext))
return p
}
-
func (s *CreateStructContext) GetOp() antlr.Token { return s.op }
-
func (s *CreateStructContext) SetOp(v antlr.Token) { s.op = v }
-
func (s *CreateStructContext) GetEntries() IMapInitializerListContext { return s.entries }
-
func (s *CreateStructContext) SetEntries(v IMapInitializerListContext) { s.entries = v }
func (s *CreateStructContext) GetRuleContext() antlr.RuleContext {
@@ -3128,10 +2807,10 @@ func (s *CreateStructContext) COMMA() antlr.TerminalNode {
}
func (s *CreateStructContext) MapInitializerList() IMapInitializerListContext {
- var t antlr.RuleContext;
+ var t antlr.RuleContext
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(IMapInitializerListContext); ok {
- t = ctx.(antlr.RuleContext);
+ t = ctx.(antlr.RuleContext)
break
}
}
@@ -3143,7 +2822,6 @@ func (s *CreateStructContext) MapInitializerList() IMapInitializerListContext {
return t.(IMapInitializerListContext)
}
-
func (s *CreateStructContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterCreateStruct(s)
@@ -3166,17 +2844,16 @@ func (s *CreateStructContext) Accept(visitor antlr.ParseTreeVisitor) interface{}
}
}
-
type ConstantLiteralContext struct {
- PrimaryContext
+ *PrimaryContext
}
func NewConstantLiteralContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *ConstantLiteralContext {
var p = new(ConstantLiteralContext)
- InitEmptyPrimaryContext(&p.PrimaryContext)
+ p.PrimaryContext = NewEmptyPrimaryContext()
p.parser = parser
- p.CopyAll(ctx.(*PrimaryContext))
+ p.CopyFrom(ctx.(*PrimaryContext))
return p
}
@@ -3186,10 +2863,10 @@ func (s *ConstantLiteralContext) GetRuleContext() antlr.RuleContext {
}
func (s *ConstantLiteralContext) Literal() ILiteralContext {
- var t antlr.RuleContext;
+ var t antlr.RuleContext
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(ILiteralContext); ok {
- t = ctx.(antlr.RuleContext);
+ t = ctx.(antlr.RuleContext)
break
}
}
@@ -3201,7 +2878,6 @@ func (s *ConstantLiteralContext) Literal() ILiteralContext {
return t.(ILiteralContext)
}
-
func (s *ConstantLiteralContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterConstantLiteral(s)
@@ -3224,26 +2900,23 @@ func (s *ConstantLiteralContext) Accept(visitor antlr.ParseTreeVisitor) interfac
}
}
-
type NestedContext struct {
- PrimaryContext
- e IExprContext
+ *PrimaryContext
+ e IExprContext
}
func NewNestedContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *NestedContext {
var p = new(NestedContext)
- InitEmptyPrimaryContext(&p.PrimaryContext)
+ p.PrimaryContext = NewEmptyPrimaryContext()
p.parser = parser
- p.CopyAll(ctx.(*PrimaryContext))
+ p.CopyFrom(ctx.(*PrimaryContext))
return p
}
-
func (s *NestedContext) GetE() IExprContext { return s.e }
-
func (s *NestedContext) SetE(v IExprContext) { s.e = v }
func (s *NestedContext) GetRuleContext() antlr.RuleContext {
@@ -3259,10 +2932,10 @@ func (s *NestedContext) RPAREN() antlr.TerminalNode {
}
func (s *NestedContext) Expr() IExprContext {
- var t antlr.RuleContext;
+ var t antlr.RuleContext
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(IExprContext); ok {
- t = ctx.(antlr.RuleContext);
+ t = ctx.(antlr.RuleContext)
break
}
}
@@ -3274,7 +2947,6 @@ func (s *NestedContext) Expr() IExprContext {
return t.(IExprContext)
}
-
func (s *NestedContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterNested(s)
@@ -3297,29 +2969,27 @@ func (s *NestedContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
}
}
-
type CreateMessageContext struct {
- PrimaryContext
- leadingDot antlr.Token
+ *PrimaryContext
+ leadingDot antlr.Token
_IDENTIFIER antlr.Token
- ids []antlr.Token
- s16 antlr.Token
- ops []antlr.Token
- op antlr.Token
- entries IFieldInitializerListContext
+ ids []antlr.Token
+ s16 antlr.Token
+ ops []antlr.Token
+ op antlr.Token
+ entries IFieldInitializerListContext
}
func NewCreateMessageContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *CreateMessageContext {
var p = new(CreateMessageContext)
- InitEmptyPrimaryContext(&p.PrimaryContext)
+ p.PrimaryContext = NewEmptyPrimaryContext()
p.parser = parser
- p.CopyAll(ctx.(*PrimaryContext))
+ p.CopyFrom(ctx.(*PrimaryContext))
return p
}
-
func (s *CreateMessageContext) GetLeadingDot() antlr.Token { return s.leadingDot }
func (s *CreateMessageContext) Get_IDENTIFIER() antlr.Token { return s._IDENTIFIER }
@@ -3328,7 +2998,6 @@ func (s *CreateMessageContext) GetS16() antlr.Token { return s.s16 }
func (s *CreateMessageContext) GetOp() antlr.Token { return s.op }
-
func (s *CreateMessageContext) SetLeadingDot(v antlr.Token) { s.leadingDot = v }
func (s *CreateMessageContext) Set_IDENTIFIER(v antlr.Token) { s._IDENTIFIER = v }
@@ -3337,20 +3006,16 @@ func (s *CreateMessageContext) SetS16(v antlr.Token) { s.s16 = v }
func (s *CreateMessageContext) SetOp(v antlr.Token) { s.op = v }
-
func (s *CreateMessageContext) GetIds() []antlr.Token { return s.ids }
func (s *CreateMessageContext) GetOps() []antlr.Token { return s.ops }
-
func (s *CreateMessageContext) SetIds(v []antlr.Token) { s.ids = v }
func (s *CreateMessageContext) SetOps(v []antlr.Token) { s.ops = v }
-
func (s *CreateMessageContext) GetEntries() IFieldInitializerListContext { return s.entries }
-
func (s *CreateMessageContext) SetEntries(v IFieldInitializerListContext) { s.entries = v }
func (s *CreateMessageContext) GetRuleContext() antlr.RuleContext {
@@ -3386,10 +3051,10 @@ func (s *CreateMessageContext) DOT(i int) antlr.TerminalNode {
}
func (s *CreateMessageContext) FieldInitializerList() IFieldInitializerListContext {
- var t antlr.RuleContext;
+ var t antlr.RuleContext
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(IFieldInitializerListContext); ok {
- t = ctx.(antlr.RuleContext);
+ t = ctx.(antlr.RuleContext)
break
}
}
@@ -3401,7 +3066,6 @@ func (s *CreateMessageContext) FieldInitializerList() IFieldInitializerListConte
return t.(IFieldInitializerListContext)
}
-
func (s *CreateMessageContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterCreateMessage(s)
@@ -3424,43 +3088,38 @@ func (s *CreateMessageContext) Accept(visitor antlr.ParseTreeVisitor) interface{
}
}
-
type IdentOrGlobalCallContext struct {
- PrimaryContext
+ *PrimaryContext
leadingDot antlr.Token
- id antlr.Token
- op antlr.Token
- args IExprListContext
+ id antlr.Token
+ op antlr.Token
+ args IExprListContext
}
func NewIdentOrGlobalCallContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *IdentOrGlobalCallContext {
var p = new(IdentOrGlobalCallContext)
- InitEmptyPrimaryContext(&p.PrimaryContext)
+ p.PrimaryContext = NewEmptyPrimaryContext()
p.parser = parser
- p.CopyAll(ctx.(*PrimaryContext))
+ p.CopyFrom(ctx.(*PrimaryContext))
return p
}
-
func (s *IdentOrGlobalCallContext) GetLeadingDot() antlr.Token { return s.leadingDot }
func (s *IdentOrGlobalCallContext) GetId() antlr.Token { return s.id }
func (s *IdentOrGlobalCallContext) GetOp() antlr.Token { return s.op }
-
func (s *IdentOrGlobalCallContext) SetLeadingDot(v antlr.Token) { s.leadingDot = v }
func (s *IdentOrGlobalCallContext) SetId(v antlr.Token) { s.id = v }
func (s *IdentOrGlobalCallContext) SetOp(v antlr.Token) { s.op = v }
-
func (s *IdentOrGlobalCallContext) GetArgs() IExprListContext { return s.args }
-
func (s *IdentOrGlobalCallContext) SetArgs(v IExprListContext) { s.args = v }
func (s *IdentOrGlobalCallContext) GetRuleContext() antlr.RuleContext {
@@ -3484,10 +3143,10 @@ func (s *IdentOrGlobalCallContext) LPAREN() antlr.TerminalNode {
}
func (s *IdentOrGlobalCallContext) ExprList() IExprListContext {
- var t antlr.RuleContext;
+ var t antlr.RuleContext
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(IExprListContext); ok {
- t = ctx.(antlr.RuleContext);
+ t = ctx.(antlr.RuleContext)
break
}
}
@@ -3499,7 +3158,6 @@ func (s *IdentOrGlobalCallContext) ExprList() IExprListContext {
return t.(IExprListContext)
}
-
func (s *IdentOrGlobalCallContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterIdentOrGlobalCall(s)
@@ -3522,31 +3180,40 @@ func (s *IdentOrGlobalCallContext) Accept(visitor antlr.ParseTreeVisitor) interf
}
}
-
-
func (p *CELParser) Primary() (localctx IPrimaryContext) {
+ this := p
+ _ = this
+
localctx = NewPrimaryContext(p, p.GetParserRuleContext(), p.GetState())
p.EnterRule(localctx, 16, CELParserRULE_primary)
var _la int
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
p.SetState(180)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
-
- switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 25, p.GetParserRuleContext()) {
+ switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 25, p.GetParserRuleContext()) {
case 1:
localctx = NewIdentOrGlobalCallContext(p, localctx)
p.EnterOuterAlt(localctx, 1)
p.SetState(130)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
_la = p.GetTokenStream().LA(1)
-
if _la == CELParserDOT {
{
p.SetState(129)
@@ -3554,10 +3221,6 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) {
var _m = p.Match(CELParserDOT)
localctx.(*IdentOrGlobalCallContext).leadingDot = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
}
@@ -3567,42 +3230,28 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) {
var _m = p.Match(CELParserIDENTIFIER)
localctx.(*IdentOrGlobalCallContext).id = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
p.SetState(138)
p.GetErrorHandler().Sync(p)
-
- if p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 16, p.GetParserRuleContext()) == 1 {
+ if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 16, p.GetParserRuleContext()) == 1 {
{
p.SetState(133)
var _m = p.Match(CELParserLPAREN)
localctx.(*IdentOrGlobalCallContext).op = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
p.SetState(135)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
_la = p.GetTokenStream().LA(1)
-
- if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135762105344) != 0) {
+ if (int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&135762105344) != 0 {
{
p.SetState(134)
var _x = p.ExprList()
-
localctx.(*IdentOrGlobalCallContext).args = _x
}
@@ -3610,46 +3259,29 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) {
{
p.SetState(137)
p.Match(CELParserRPAREN)
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
- } else if p.HasError() { // JIM
- goto errorExit
}
-
case 2:
localctx = NewNestedContext(p, localctx)
p.EnterOuterAlt(localctx, 2)
{
p.SetState(140)
p.Match(CELParserLPAREN)
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
{
p.SetState(141)
var _x = p.Expr()
-
localctx.(*NestedContext).e = _x
}
{
p.SetState(142)
p.Match(CELParserRPAREN)
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
-
case 3:
localctx = NewCreateListContext(p, localctx)
p.EnterOuterAlt(localctx, 3)
@@ -3659,59 +3291,37 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) {
var _m = p.Match(CELParserLBRACKET)
localctx.(*CreateListContext).op = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
p.SetState(146)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
_la = p.GetTokenStream().LA(1)
-
- if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135763153920) != 0) {
+ if (int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&135763153920) != 0 {
{
p.SetState(145)
var _x = p.ListInit()
-
localctx.(*CreateListContext).elems = _x
}
}
p.SetState(149)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
_la = p.GetTokenStream().LA(1)
-
if _la == CELParserCOMMA {
{
p.SetState(148)
p.Match(CELParserCOMMA)
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
}
{
p.SetState(151)
p.Match(CELParserRPRACKET)
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
-
case 4:
localctx = NewCreateStructContext(p, localctx)
p.EnterOuterAlt(localctx, 4)
@@ -3721,70 +3331,44 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) {
var _m = p.Match(CELParserLBRACE)
localctx.(*CreateStructContext).op = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
p.SetState(154)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
_la = p.GetTokenStream().LA(1)
-
- if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135763153920) != 0) {
+ if (int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&135763153920) != 0 {
{
p.SetState(153)
var _x = p.MapInitializerList()
-
localctx.(*CreateStructContext).entries = _x
}
}
p.SetState(157)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
_la = p.GetTokenStream().LA(1)
-
if _la == CELParserCOMMA {
{
p.SetState(156)
p.Match(CELParserCOMMA)
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
}
{
p.SetState(159)
p.Match(CELParserRBRACE)
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
-
case 5:
localctx = NewCreateMessageContext(p, localctx)
p.EnterOuterAlt(localctx, 5)
p.SetState(161)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
_la = p.GetTokenStream().LA(1)
-
if _la == CELParserDOT {
{
p.SetState(160)
@@ -3792,10 +3376,6 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) {
var _m = p.Match(CELParserDOT)
localctx.(*CreateMessageContext).leadingDot = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
}
@@ -3805,20 +3385,12 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) {
var _m = p.Match(CELParserIDENTIFIER)
localctx.(*CreateMessageContext)._IDENTIFIER = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
localctx.(*CreateMessageContext).ids = append(localctx.(*CreateMessageContext).ids, localctx.(*CreateMessageContext)._IDENTIFIER)
p.SetState(168)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
_la = p.GetTokenStream().LA(1)
-
for _la == CELParserDOT {
{
p.SetState(164)
@@ -3826,10 +3398,6 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) {
var _m = p.Match(CELParserDOT)
localctx.(*CreateMessageContext).s16 = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
localctx.(*CreateMessageContext).ops = append(localctx.(*CreateMessageContext).ops, localctx.(*CreateMessageContext).s16)
{
@@ -3838,19 +3406,11 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) {
var _m = p.Match(CELParserIDENTIFIER)
localctx.(*CreateMessageContext)._IDENTIFIER = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
localctx.(*CreateMessageContext).ids = append(localctx.(*CreateMessageContext).ids, localctx.(*CreateMessageContext)._IDENTIFIER)
-
p.SetState(170)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
_la = p.GetTokenStream().LA(1)
}
{
@@ -3859,59 +3419,37 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) {
var _m = p.Match(CELParserLBRACE)
localctx.(*CreateMessageContext).op = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
p.SetState(173)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
_la = p.GetTokenStream().LA(1)
-
if _la == CELParserQUESTIONMARK || _la == CELParserIDENTIFIER {
{
p.SetState(172)
var _x = p.FieldInitializerList()
-
localctx.(*CreateMessageContext).entries = _x
}
}
p.SetState(176)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
_la = p.GetTokenStream().LA(1)
-
if _la == CELParserCOMMA {
{
p.SetState(175)
p.Match(CELParserCOMMA)
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
}
{
p.SetState(178)
p.Match(CELParserRBRACE)
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
-
case 6:
localctx = NewConstantLiteralContext(p, localctx)
p.EnterOuterAlt(localctx, 6)
@@ -3920,25 +3458,11 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) {
p.Literal()
}
- case antlr.ATNInvalidAltNumber:
- goto errorExit
}
-
-errorExit:
- if p.HasError() {
- v := p.GetError()
- localctx.SetException(v)
- p.GetErrorHandler().ReportError(p, v)
- p.GetErrorHandler().Recover(p, v)
- p.SetError(nil)
- }
- p.ExitRule()
return localctx
- goto errorExit // Trick to prevent compiler error if the label is not used
}
-
// IExprListContext is an interface to support dynamic dispatch.
type IExprListContext interface {
antlr.ParserRuleContext
@@ -3949,18 +3473,14 @@ type IExprListContext interface {
// Get_expr returns the _expr rule contexts.
Get_expr() IExprContext
-
// Set_expr sets the _expr rule contexts.
Set_expr(IExprContext)
-
// GetE returns the e rule context list.
GetE() []IExprContext
-
// SetE sets the e rule context list.
- SetE([]IExprContext)
-
+ SetE([]IExprContext)
// Getter signatures
AllExpr() []IExprContext
@@ -3973,30 +3493,25 @@ type IExprListContext interface {
}
type ExprListContext struct {
- antlr.BaseParserRuleContext
+ *antlr.BaseParserRuleContext
parser antlr.Parser
- _expr IExprContext
- e []IExprContext
+ _expr IExprContext
+ e []IExprContext
}
func NewEmptyExprListContext() *ExprListContext {
var p = new(ExprListContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
p.RuleIndex = CELParserRULE_exprList
return p
}
-func InitEmptyExprListContext(p *ExprListContext) {
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
- p.RuleIndex = CELParserRULE_exprList
-}
-
func (*ExprListContext) IsExprListContext() {}
func NewExprListContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ExprListContext {
var p = new(ExprListContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
p.parser = parser
p.RuleIndex = CELParserRULE_exprList
@@ -4008,16 +3523,12 @@ func (s *ExprListContext) GetParser() antlr.Parser { return s.parser }
func (s *ExprListContext) Get_expr() IExprContext { return s._expr }
-
func (s *ExprListContext) Set_expr(v IExprContext) { s._expr = v }
-
func (s *ExprListContext) GetE() []IExprContext { return s.e }
-
func (s *ExprListContext) SetE(v []IExprContext) { s.e = v }
-
func (s *ExprListContext) AllExpr() []IExprContext {
children := s.GetChildren()
len := 0
@@ -4040,12 +3551,12 @@ func (s *ExprListContext) AllExpr() []IExprContext {
}
func (s *ExprListContext) Expr(i int) IExprContext {
- var t antlr.RuleContext;
+ var t antlr.RuleContext
j := 0
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(IExprContext); ok {
if j == i {
- t = ctx.(antlr.RuleContext);
+ t = ctx.(antlr.RuleContext)
break
}
j++
@@ -4075,7 +3586,6 @@ func (s *ExprListContext) ToStringTree(ruleNames []string, recog antlr.Recognize
return antlr.TreesStringTree(s, ruleNames, recog)
}
-
func (s *ExprListContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterExprList(s)
@@ -4098,76 +3608,65 @@ func (s *ExprListContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
}
}
-
-
-
func (p *CELParser) ExprList() (localctx IExprListContext) {
+ this := p
+ _ = this
+
localctx = NewExprListContext(p, p.GetParserRuleContext(), p.GetState())
p.EnterRule(localctx, 18, CELParserRULE_exprList)
var _la int
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
p.EnterOuterAlt(localctx, 1)
{
p.SetState(182)
var _x = p.Expr()
-
localctx.(*ExprListContext)._expr = _x
}
localctx.(*ExprListContext).e = append(localctx.(*ExprListContext).e, localctx.(*ExprListContext)._expr)
p.SetState(187)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
_la = p.GetTokenStream().LA(1)
-
for _la == CELParserCOMMA {
{
p.SetState(183)
p.Match(CELParserCOMMA)
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
{
p.SetState(184)
var _x = p.Expr()
-
localctx.(*ExprListContext)._expr = _x
}
localctx.(*ExprListContext).e = append(localctx.(*ExprListContext).e, localctx.(*ExprListContext)._expr)
-
p.SetState(189)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
_la = p.GetTokenStream().LA(1)
}
-
-
-errorExit:
- if p.HasError() {
- v := p.GetError()
- localctx.SetException(v)
- p.GetErrorHandler().ReportError(p, v)
- p.GetErrorHandler().Recover(p, v)
- p.SetError(nil)
- }
- p.ExitRule()
return localctx
- goto errorExit // Trick to prevent compiler error if the label is not used
}
-
// IListInitContext is an interface to support dynamic dispatch.
type IListInitContext interface {
antlr.ParserRuleContext
@@ -4178,18 +3677,14 @@ type IListInitContext interface {
// Get_optExpr returns the _optExpr rule contexts.
Get_optExpr() IOptExprContext
-
// Set_optExpr sets the _optExpr rule contexts.
Set_optExpr(IOptExprContext)
-
// GetElems returns the elems rule context list.
GetElems() []IOptExprContext
-
// SetElems sets the elems rule context list.
- SetElems([]IOptExprContext)
-
+ SetElems([]IOptExprContext)
// Getter signatures
AllOptExpr() []IOptExprContext
@@ -4202,30 +3697,25 @@ type IListInitContext interface {
}
type ListInitContext struct {
- antlr.BaseParserRuleContext
- parser antlr.Parser
- _optExpr IOptExprContext
- elems []IOptExprContext
+ *antlr.BaseParserRuleContext
+ parser antlr.Parser
+ _optExpr IOptExprContext
+ elems []IOptExprContext
}
func NewEmptyListInitContext() *ListInitContext {
var p = new(ListInitContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
p.RuleIndex = CELParserRULE_listInit
return p
}
-func InitEmptyListInitContext(p *ListInitContext) {
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
- p.RuleIndex = CELParserRULE_listInit
-}
-
func (*ListInitContext) IsListInitContext() {}
func NewListInitContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ListInitContext {
var p = new(ListInitContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
p.parser = parser
p.RuleIndex = CELParserRULE_listInit
@@ -4237,16 +3727,12 @@ func (s *ListInitContext) GetParser() antlr.Parser { return s.parser }
func (s *ListInitContext) Get_optExpr() IOptExprContext { return s._optExpr }
-
func (s *ListInitContext) Set_optExpr(v IOptExprContext) { s._optExpr = v }
-
func (s *ListInitContext) GetElems() []IOptExprContext { return s.elems }
-
func (s *ListInitContext) SetElems(v []IOptExprContext) { s.elems = v }
-
func (s *ListInitContext) AllOptExpr() []IOptExprContext {
children := s.GetChildren()
len := 0
@@ -4269,12 +3755,12 @@ func (s *ListInitContext) AllOptExpr() []IOptExprContext {
}
func (s *ListInitContext) OptExpr(i int) IOptExprContext {
- var t antlr.RuleContext;
+ var t antlr.RuleContext
j := 0
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(IOptExprContext); ok {
if j == i {
- t = ctx.(antlr.RuleContext);
+ t = ctx.(antlr.RuleContext)
break
}
j++
@@ -4304,7 +3790,6 @@ func (s *ListInitContext) ToStringTree(ruleNames []string, recog antlr.Recognize
return antlr.TreesStringTree(s, ruleNames, recog)
}
-
func (s *ListInitContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterListInit(s)
@@ -4327,12 +3812,29 @@ func (s *ListInitContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
}
}
-
-
-
func (p *CELParser) ListInit() (localctx IListInitContext) {
+ this := p
+ _ = this
+
localctx = NewListInitContext(p, p.GetParserRuleContext(), p.GetState())
p.EnterRule(localctx, 20, CELParserRULE_listInit)
+
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
var _alt int
p.EnterOuterAlt(localctx, 1)
@@ -4341,68 +3843,37 @@ func (p *CELParser) ListInit() (localctx IListInitContext) {
var _x = p.OptExpr()
-
localctx.(*ListInitContext)._optExpr = _x
}
localctx.(*ListInitContext).elems = append(localctx.(*ListInitContext).elems, localctx.(*ListInitContext)._optExpr)
p.SetState(195)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
- _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 27, p.GetParserRuleContext())
- if p.HasError() {
- goto errorExit
- }
+ _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 27, p.GetParserRuleContext())
+
for _alt != 2 && _alt != antlr.ATNInvalidAltNumber {
if _alt == 1 {
{
p.SetState(191)
p.Match(CELParserCOMMA)
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
{
p.SetState(192)
var _x = p.OptExpr()
-
localctx.(*ListInitContext)._optExpr = _x
}
localctx.(*ListInitContext).elems = append(localctx.(*ListInitContext).elems, localctx.(*ListInitContext)._optExpr)
-
}
p.SetState(197)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
- _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 27, p.GetParserRuleContext())
- if p.HasError() {
- goto errorExit
- }
+ _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 27, p.GetParserRuleContext())
}
-
-
-errorExit:
- if p.HasError() {
- v := p.GetError()
- localctx.SetException(v)
- p.GetErrorHandler().ReportError(p, v)
- p.GetErrorHandler().Recover(p, v)
- p.SetError(nil)
- }
- p.ExitRule()
return localctx
- goto errorExit // Trick to prevent compiler error if the label is not used
}
-
// IFieldInitializerListContext is an interface to support dynamic dispatch.
type IFieldInitializerListContext interface {
antlr.ParserRuleContext
@@ -4411,48 +3882,40 @@ type IFieldInitializerListContext interface {
GetParser() antlr.Parser
// GetS21 returns the s21 token.
- GetS21() antlr.Token
-
+ GetS21() antlr.Token
// SetS21 sets the s21 token.
- SetS21(antlr.Token)
-
+ SetS21(antlr.Token)
// GetCols returns the cols token list.
GetCols() []antlr.Token
-
// SetCols sets the cols token list.
SetCols([]antlr.Token)
-
// Get_optField returns the _optField rule contexts.
Get_optField() IOptFieldContext
// Get_expr returns the _expr rule contexts.
Get_expr() IExprContext
-
// Set_optField sets the _optField rule contexts.
Set_optField(IOptFieldContext)
// Set_expr sets the _expr rule contexts.
Set_expr(IExprContext)
-
// GetFields returns the fields rule context list.
GetFields() []IOptFieldContext
// GetValues returns the values rule context list.
GetValues() []IExprContext
-
// SetFields sets the fields rule context list.
- SetFields([]IOptFieldContext)
+ SetFields([]IOptFieldContext)
// SetValues sets the values rule context list.
- SetValues([]IExprContext)
-
+ SetValues([]IExprContext)
// Getter signatures
AllOptField() []IOptFieldContext
@@ -4469,34 +3932,29 @@ type IFieldInitializerListContext interface {
}
type FieldInitializerListContext struct {
- antlr.BaseParserRuleContext
- parser antlr.Parser
- _optField IOptFieldContext
- fields []IOptFieldContext
- s21 antlr.Token
- cols []antlr.Token
- _expr IExprContext
- values []IExprContext
+ *antlr.BaseParserRuleContext
+ parser antlr.Parser
+ _optField IOptFieldContext
+ fields []IOptFieldContext
+ s21 antlr.Token
+ cols []antlr.Token
+ _expr IExprContext
+ values []IExprContext
}
func NewEmptyFieldInitializerListContext() *FieldInitializerListContext {
var p = new(FieldInitializerListContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
p.RuleIndex = CELParserRULE_fieldInitializerList
return p
}
-func InitEmptyFieldInitializerListContext(p *FieldInitializerListContext) {
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
- p.RuleIndex = CELParserRULE_fieldInitializerList
-}
-
func (*FieldInitializerListContext) IsFieldInitializerListContext() {}
func NewFieldInitializerListContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *FieldInitializerListContext {
var p = new(FieldInitializerListContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
p.parser = parser
p.RuleIndex = CELParserRULE_fieldInitializerList
@@ -4508,36 +3966,28 @@ func (s *FieldInitializerListContext) GetParser() antlr.Parser { return s.parser
func (s *FieldInitializerListContext) GetS21() antlr.Token { return s.s21 }
-
func (s *FieldInitializerListContext) SetS21(v antlr.Token) { s.s21 = v }
-
func (s *FieldInitializerListContext) GetCols() []antlr.Token { return s.cols }
-
func (s *FieldInitializerListContext) SetCols(v []antlr.Token) { s.cols = v }
-
func (s *FieldInitializerListContext) Get_optField() IOptFieldContext { return s._optField }
func (s *FieldInitializerListContext) Get_expr() IExprContext { return s._expr }
-
func (s *FieldInitializerListContext) Set_optField(v IOptFieldContext) { s._optField = v }
func (s *FieldInitializerListContext) Set_expr(v IExprContext) { s._expr = v }
-
func (s *FieldInitializerListContext) GetFields() []IOptFieldContext { return s.fields }
func (s *FieldInitializerListContext) GetValues() []IExprContext { return s.values }
-
func (s *FieldInitializerListContext) SetFields(v []IOptFieldContext) { s.fields = v }
func (s *FieldInitializerListContext) SetValues(v []IExprContext) { s.values = v }
-
func (s *FieldInitializerListContext) AllOptField() []IOptFieldContext {
children := s.GetChildren()
len := 0
@@ -4560,12 +4010,12 @@ func (s *FieldInitializerListContext) AllOptField() []IOptFieldContext {
}
func (s *FieldInitializerListContext) OptField(i int) IOptFieldContext {
- var t antlr.RuleContext;
+ var t antlr.RuleContext
j := 0
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(IOptFieldContext); ok {
if j == i {
- t = ctx.(antlr.RuleContext);
+ t = ctx.(antlr.RuleContext)
break
}
j++
@@ -4609,12 +4059,12 @@ func (s *FieldInitializerListContext) AllExpr() []IExprContext {
}
func (s *FieldInitializerListContext) Expr(i int) IExprContext {
- var t antlr.RuleContext;
+ var t antlr.RuleContext
j := 0
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(IExprContext); ok {
if j == i {
- t = ctx.(antlr.RuleContext);
+ t = ctx.(antlr.RuleContext)
break
}
j++
@@ -4644,7 +4094,6 @@ func (s *FieldInitializerListContext) ToStringTree(ruleNames []string, recog ant
return antlr.TreesStringTree(s, ruleNames, recog)
}
-
func (s *FieldInitializerListContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterFieldInitializerList(s)
@@ -4667,12 +4116,29 @@ func (s *FieldInitializerListContext) Accept(visitor antlr.ParseTreeVisitor) int
}
}
-
-
-
func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContext) {
+ this := p
+ _ = this
+
localctx = NewFieldInitializerListContext(p, p.GetParserRuleContext(), p.GetState())
p.EnterRule(localctx, 22, CELParserRULE_fieldInitializerList)
+
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
var _alt int
p.EnterOuterAlt(localctx, 1)
@@ -4681,7 +4147,6 @@ func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContex
var _x = p.OptField()
-
localctx.(*FieldInitializerListContext)._optField = _x
}
localctx.(*FieldInitializerListContext).fields = append(localctx.(*FieldInitializerListContext).fields, localctx.(*FieldInitializerListContext)._optField)
@@ -4691,10 +4156,6 @@ func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContex
var _m = p.Match(CELParserCOLON)
localctx.(*FieldInitializerListContext).s21 = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
localctx.(*FieldInitializerListContext).cols = append(localctx.(*FieldInitializerListContext).cols, localctx.(*FieldInitializerListContext).s21)
{
@@ -4702,35 +4163,24 @@ func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContex
var _x = p.Expr()
-
localctx.(*FieldInitializerListContext)._expr = _x
}
localctx.(*FieldInitializerListContext).values = append(localctx.(*FieldInitializerListContext).values, localctx.(*FieldInitializerListContext)._expr)
p.SetState(208)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
- _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 28, p.GetParserRuleContext())
- if p.HasError() {
- goto errorExit
- }
+ _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 28, p.GetParserRuleContext())
+
for _alt != 2 && _alt != antlr.ATNInvalidAltNumber {
if _alt == 1 {
{
p.SetState(201)
p.Match(CELParserCOMMA)
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
{
p.SetState(202)
var _x = p.OptField()
-
localctx.(*FieldInitializerListContext)._optField = _x
}
localctx.(*FieldInitializerListContext).fields = append(localctx.(*FieldInitializerListContext).fields, localctx.(*FieldInitializerListContext)._optField)
@@ -4740,10 +4190,6 @@ func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContex
var _m = p.Match(CELParserCOLON)
localctx.(*FieldInitializerListContext).s21 = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
localctx.(*FieldInitializerListContext).cols = append(localctx.(*FieldInitializerListContext).cols, localctx.(*FieldInitializerListContext).s21)
{
@@ -4751,40 +4197,19 @@ func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContex
var _x = p.Expr()
-
localctx.(*FieldInitializerListContext)._expr = _x
}
localctx.(*FieldInitializerListContext).values = append(localctx.(*FieldInitializerListContext).values, localctx.(*FieldInitializerListContext)._expr)
-
}
p.SetState(210)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
- _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 28, p.GetParserRuleContext())
- if p.HasError() {
- goto errorExit
- }
+ _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 28, p.GetParserRuleContext())
}
-
-
-errorExit:
- if p.HasError() {
- v := p.GetError()
- localctx.SetException(v)
- p.GetErrorHandler().ReportError(p, v)
- p.GetErrorHandler().Recover(p, v)
- p.SetError(nil)
- }
- p.ExitRule()
return localctx
- goto errorExit // Trick to prevent compiler error if the label is not used
}
-
// IOptFieldContext is an interface to support dynamic dispatch.
type IOptFieldContext interface {
antlr.ParserRuleContext
@@ -4793,12 +4218,10 @@ type IOptFieldContext interface {
GetParser() antlr.Parser
// GetOpt returns the opt token.
- GetOpt() antlr.Token
-
+ GetOpt() antlr.Token
// SetOpt sets the opt token.
- SetOpt(antlr.Token)
-
+ SetOpt(antlr.Token)
// Getter signatures
IDENTIFIER() antlr.TerminalNode
@@ -4809,29 +4232,24 @@ type IOptFieldContext interface {
}
type OptFieldContext struct {
- antlr.BaseParserRuleContext
+ *antlr.BaseParserRuleContext
parser antlr.Parser
- opt antlr.Token
+ opt antlr.Token
}
func NewEmptyOptFieldContext() *OptFieldContext {
var p = new(OptFieldContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
p.RuleIndex = CELParserRULE_optField
return p
}
-func InitEmptyOptFieldContext(p *OptFieldContext) {
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
- p.RuleIndex = CELParserRULE_optField
-}
-
func (*OptFieldContext) IsOptFieldContext() {}
func NewOptFieldContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *OptFieldContext {
var p = new(OptFieldContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
p.parser = parser
p.RuleIndex = CELParserRULE_optField
@@ -4843,10 +4261,8 @@ func (s *OptFieldContext) GetParser() antlr.Parser { return s.parser }
func (s *OptFieldContext) GetOpt() antlr.Token { return s.opt }
-
func (s *OptFieldContext) SetOpt(v antlr.Token) { s.opt = v }
-
func (s *OptFieldContext) IDENTIFIER() antlr.TerminalNode {
return s.GetToken(CELParserIDENTIFIER, 0)
}
@@ -4863,7 +4279,6 @@ func (s *OptFieldContext) ToStringTree(ruleNames []string, recog antlr.Recognize
return antlr.TreesStringTree(s, ruleNames, recog)
}
-
func (s *OptFieldContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterOptField(s)
@@ -4886,23 +4301,35 @@ func (s *OptFieldContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
}
}
-
-
-
func (p *CELParser) OptField() (localctx IOptFieldContext) {
+ this := p
+ _ = this
+
localctx = NewOptFieldContext(p, p.GetParserRuleContext(), p.GetState())
p.EnterRule(localctx, 24, CELParserRULE_optField)
var _la int
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
p.EnterOuterAlt(localctx, 1)
p.SetState(212)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
_la = p.GetTokenStream().LA(1)
-
if _la == CELParserQUESTIONMARK {
{
p.SetState(211)
@@ -4910,38 +4337,17 @@ func (p *CELParser) OptField() (localctx IOptFieldContext) {
var _m = p.Match(CELParserQUESTIONMARK)
localctx.(*OptFieldContext).opt = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
}
{
p.SetState(214)
p.Match(CELParserIDENTIFIER)
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
-
-
-errorExit:
- if p.HasError() {
- v := p.GetError()
- localctx.SetException(v)
- p.GetErrorHandler().ReportError(p, v)
- p.GetErrorHandler().Recover(p, v)
- p.SetError(nil)
- }
- p.ExitRule()
return localctx
- goto errorExit // Trick to prevent compiler error if the label is not used
}
-
// IMapInitializerListContext is an interface to support dynamic dispatch.
type IMapInitializerListContext interface {
antlr.ParserRuleContext
@@ -4950,48 +4356,40 @@ type IMapInitializerListContext interface {
GetParser() antlr.Parser
// GetS21 returns the s21 token.
- GetS21() antlr.Token
-
+ GetS21() antlr.Token
// SetS21 sets the s21 token.
- SetS21(antlr.Token)
-
+ SetS21(antlr.Token)
// GetCols returns the cols token list.
GetCols() []antlr.Token
-
// SetCols sets the cols token list.
SetCols([]antlr.Token)
-
// Get_optExpr returns the _optExpr rule contexts.
Get_optExpr() IOptExprContext
// Get_expr returns the _expr rule contexts.
Get_expr() IExprContext
-
// Set_optExpr sets the _optExpr rule contexts.
Set_optExpr(IOptExprContext)
// Set_expr sets the _expr rule contexts.
Set_expr(IExprContext)
-
// GetKeys returns the keys rule context list.
GetKeys() []IOptExprContext
// GetValues returns the values rule context list.
GetValues() []IExprContext
-
// SetKeys sets the keys rule context list.
- SetKeys([]IOptExprContext)
+ SetKeys([]IOptExprContext)
// SetValues sets the values rule context list.
- SetValues([]IExprContext)
-
+ SetValues([]IExprContext)
// Getter signatures
AllOptExpr() []IOptExprContext
@@ -5008,34 +4406,29 @@ type IMapInitializerListContext interface {
}
type MapInitializerListContext struct {
- antlr.BaseParserRuleContext
- parser antlr.Parser
- _optExpr IOptExprContext
- keys []IOptExprContext
- s21 antlr.Token
- cols []antlr.Token
- _expr IExprContext
- values []IExprContext
+ *antlr.BaseParserRuleContext
+ parser antlr.Parser
+ _optExpr IOptExprContext
+ keys []IOptExprContext
+ s21 antlr.Token
+ cols []antlr.Token
+ _expr IExprContext
+ values []IExprContext
}
func NewEmptyMapInitializerListContext() *MapInitializerListContext {
var p = new(MapInitializerListContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
p.RuleIndex = CELParserRULE_mapInitializerList
return p
}
-func InitEmptyMapInitializerListContext(p *MapInitializerListContext) {
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
- p.RuleIndex = CELParserRULE_mapInitializerList
-}
-
func (*MapInitializerListContext) IsMapInitializerListContext() {}
func NewMapInitializerListContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *MapInitializerListContext {
var p = new(MapInitializerListContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
p.parser = parser
p.RuleIndex = CELParserRULE_mapInitializerList
@@ -5047,36 +4440,28 @@ func (s *MapInitializerListContext) GetParser() antlr.Parser { return s.parser }
func (s *MapInitializerListContext) GetS21() antlr.Token { return s.s21 }
-
func (s *MapInitializerListContext) SetS21(v antlr.Token) { s.s21 = v }
-
func (s *MapInitializerListContext) GetCols() []antlr.Token { return s.cols }
-
func (s *MapInitializerListContext) SetCols(v []antlr.Token) { s.cols = v }
-
func (s *MapInitializerListContext) Get_optExpr() IOptExprContext { return s._optExpr }
func (s *MapInitializerListContext) Get_expr() IExprContext { return s._expr }
-
func (s *MapInitializerListContext) Set_optExpr(v IOptExprContext) { s._optExpr = v }
func (s *MapInitializerListContext) Set_expr(v IExprContext) { s._expr = v }
-
func (s *MapInitializerListContext) GetKeys() []IOptExprContext { return s.keys }
func (s *MapInitializerListContext) GetValues() []IExprContext { return s.values }
-
func (s *MapInitializerListContext) SetKeys(v []IOptExprContext) { s.keys = v }
func (s *MapInitializerListContext) SetValues(v []IExprContext) { s.values = v }
-
func (s *MapInitializerListContext) AllOptExpr() []IOptExprContext {
children := s.GetChildren()
len := 0
@@ -5099,12 +4484,12 @@ func (s *MapInitializerListContext) AllOptExpr() []IOptExprContext {
}
func (s *MapInitializerListContext) OptExpr(i int) IOptExprContext {
- var t antlr.RuleContext;
+ var t antlr.RuleContext
j := 0
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(IOptExprContext); ok {
if j == i {
- t = ctx.(antlr.RuleContext);
+ t = ctx.(antlr.RuleContext)
break
}
j++
@@ -5148,12 +4533,12 @@ func (s *MapInitializerListContext) AllExpr() []IExprContext {
}
func (s *MapInitializerListContext) Expr(i int) IExprContext {
- var t antlr.RuleContext;
+ var t antlr.RuleContext
j := 0
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(IExprContext); ok {
if j == i {
- t = ctx.(antlr.RuleContext);
+ t = ctx.(antlr.RuleContext)
break
}
j++
@@ -5183,7 +4568,6 @@ func (s *MapInitializerListContext) ToStringTree(ruleNames []string, recog antlr
return antlr.TreesStringTree(s, ruleNames, recog)
}
-
func (s *MapInitializerListContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterMapInitializerList(s)
@@ -5206,12 +4590,29 @@ func (s *MapInitializerListContext) Accept(visitor antlr.ParseTreeVisitor) inter
}
}
-
-
-
func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) {
+ this := p
+ _ = this
+
localctx = NewMapInitializerListContext(p, p.GetParserRuleContext(), p.GetState())
p.EnterRule(localctx, 26, CELParserRULE_mapInitializerList)
+
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
var _alt int
p.EnterOuterAlt(localctx, 1)
@@ -5220,7 +4621,6 @@ func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) {
var _x = p.OptExpr()
-
localctx.(*MapInitializerListContext)._optExpr = _x
}
localctx.(*MapInitializerListContext).keys = append(localctx.(*MapInitializerListContext).keys, localctx.(*MapInitializerListContext)._optExpr)
@@ -5230,10 +4630,6 @@ func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) {
var _m = p.Match(CELParserCOLON)
localctx.(*MapInitializerListContext).s21 = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
localctx.(*MapInitializerListContext).cols = append(localctx.(*MapInitializerListContext).cols, localctx.(*MapInitializerListContext).s21)
{
@@ -5241,35 +4637,24 @@ func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) {
var _x = p.Expr()
-
localctx.(*MapInitializerListContext)._expr = _x
}
localctx.(*MapInitializerListContext).values = append(localctx.(*MapInitializerListContext).values, localctx.(*MapInitializerListContext)._expr)
p.SetState(226)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
- _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 30, p.GetParserRuleContext())
- if p.HasError() {
- goto errorExit
- }
+ _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 30, p.GetParserRuleContext())
+
for _alt != 2 && _alt != antlr.ATNInvalidAltNumber {
if _alt == 1 {
{
p.SetState(219)
p.Match(CELParserCOMMA)
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
{
p.SetState(220)
var _x = p.OptExpr()
-
localctx.(*MapInitializerListContext)._optExpr = _x
}
localctx.(*MapInitializerListContext).keys = append(localctx.(*MapInitializerListContext).keys, localctx.(*MapInitializerListContext)._optExpr)
@@ -5279,10 +4664,6 @@ func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) {
var _m = p.Match(CELParserCOLON)
localctx.(*MapInitializerListContext).s21 = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
localctx.(*MapInitializerListContext).cols = append(localctx.(*MapInitializerListContext).cols, localctx.(*MapInitializerListContext).s21)
{
@@ -5290,40 +4671,19 @@ func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) {
var _x = p.Expr()
-
localctx.(*MapInitializerListContext)._expr = _x
}
localctx.(*MapInitializerListContext).values = append(localctx.(*MapInitializerListContext).values, localctx.(*MapInitializerListContext)._expr)
-
}
p.SetState(228)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
- _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 30, p.GetParserRuleContext())
- if p.HasError() {
- goto errorExit
- }
+ _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 30, p.GetParserRuleContext())
}
-
-
-errorExit:
- if p.HasError() {
- v := p.GetError()
- localctx.SetException(v)
- p.GetErrorHandler().ReportError(p, v)
- p.GetErrorHandler().Recover(p, v)
- p.SetError(nil)
- }
- p.ExitRule()
return localctx
- goto errorExit // Trick to prevent compiler error if the label is not used
}
-
// IOptExprContext is an interface to support dynamic dispatch.
type IOptExprContext interface {
antlr.ParserRuleContext
@@ -5332,21 +4692,17 @@ type IOptExprContext interface {
GetParser() antlr.Parser
// GetOpt returns the opt token.
- GetOpt() antlr.Token
-
+ GetOpt() antlr.Token
// SetOpt sets the opt token.
- SetOpt(antlr.Token)
-
+ SetOpt(antlr.Token)
// GetE returns the e rule contexts.
GetE() IExprContext
-
// SetE sets the e rule contexts.
SetE(IExprContext)
-
// Getter signatures
Expr() IExprContext
QUESTIONMARK() antlr.TerminalNode
@@ -5356,30 +4712,25 @@ type IOptExprContext interface {
}
type OptExprContext struct {
- antlr.BaseParserRuleContext
+ *antlr.BaseParserRuleContext
parser antlr.Parser
- opt antlr.Token
- e IExprContext
+ opt antlr.Token
+ e IExprContext
}
func NewEmptyOptExprContext() *OptExprContext {
var p = new(OptExprContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
p.RuleIndex = CELParserRULE_optExpr
return p
}
-func InitEmptyOptExprContext(p *OptExprContext) {
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
- p.RuleIndex = CELParserRULE_optExpr
-}
-
func (*OptExprContext) IsOptExprContext() {}
func NewOptExprContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *OptExprContext {
var p = new(OptExprContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
p.parser = parser
p.RuleIndex = CELParserRULE_optExpr
@@ -5391,21 +4742,17 @@ func (s *OptExprContext) GetParser() antlr.Parser { return s.parser }
func (s *OptExprContext) GetOpt() antlr.Token { return s.opt }
-
func (s *OptExprContext) SetOpt(v antlr.Token) { s.opt = v }
-
func (s *OptExprContext) GetE() IExprContext { return s.e }
-
func (s *OptExprContext) SetE(v IExprContext) { s.e = v }
-
func (s *OptExprContext) Expr() IExprContext {
- var t antlr.RuleContext;
+ var t antlr.RuleContext
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(IExprContext); ok {
- t = ctx.(antlr.RuleContext);
+ t = ctx.(antlr.RuleContext)
break
}
}
@@ -5429,7 +4776,6 @@ func (s *OptExprContext) ToStringTree(ruleNames []string, recog antlr.Recognizer
return antlr.TreesStringTree(s, ruleNames, recog)
}
-
func (s *OptExprContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterOptExpr(s)
@@ -5452,23 +4798,35 @@ func (s *OptExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
}
}
-
-
-
func (p *CELParser) OptExpr() (localctx IOptExprContext) {
+ this := p
+ _ = this
+
localctx = NewOptExprContext(p, p.GetParserRuleContext(), p.GetState())
p.EnterRule(localctx, 28, CELParserRULE_optExpr)
var _la int
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
p.EnterOuterAlt(localctx, 1)
p.SetState(230)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
_la = p.GetTokenStream().LA(1)
-
if _la == CELParserQUESTIONMARK {
{
p.SetState(229)
@@ -5476,10 +4834,6 @@ func (p *CELParser) OptExpr() (localctx IOptExprContext) {
var _m = p.Match(CELParserQUESTIONMARK)
localctx.(*OptExprContext).opt = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
}
@@ -5488,26 +4842,12 @@ func (p *CELParser) OptExpr() (localctx IOptExprContext) {
var _x = p.Expr()
-
localctx.(*OptExprContext).e = _x
}
-
-
-errorExit:
- if p.HasError() {
- v := p.GetError()
- localctx.SetException(v)
- p.GetErrorHandler().ReportError(p, v)
- p.GetErrorHandler().Recover(p, v)
- p.SetError(nil)
- }
- p.ExitRule()
return localctx
- goto errorExit // Trick to prevent compiler error if the label is not used
}
-
// ILiteralContext is an interface to support dynamic dispatch.
type ILiteralContext interface {
antlr.ParserRuleContext
@@ -5519,28 +4859,23 @@ type ILiteralContext interface {
}
type LiteralContext struct {
- antlr.BaseParserRuleContext
+ *antlr.BaseParserRuleContext
parser antlr.Parser
}
func NewEmptyLiteralContext() *LiteralContext {
var p = new(LiteralContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
p.RuleIndex = CELParserRULE_literal
return p
}
-func InitEmptyLiteralContext(p *LiteralContext) {
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
- p.RuleIndex = CELParserRULE_literal
-}
-
func (*LiteralContext) IsLiteralContext() {}
func NewLiteralContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *LiteralContext {
var p = new(LiteralContext)
- antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
p.parser = parser
p.RuleIndex = CELParserRULE_literal
@@ -5550,8 +4885,8 @@ func NewLiteralContext(parser antlr.Parser, parent antlr.ParserRuleContext, invo
func (s *LiteralContext) GetParser() antlr.Parser { return s.parser }
-func (s *LiteralContext) CopyAll(ctx *LiteralContext) {
- s.CopyFrom(&ctx.BaseParserRuleContext)
+func (s *LiteralContext) CopyFrom(ctx *LiteralContext) {
+ s.BaseParserRuleContext.CopyFrom(ctx.BaseParserRuleContext)
}
func (s *LiteralContext) GetRuleContext() antlr.RuleContext {
@@ -5562,28 +4897,23 @@ func (s *LiteralContext) ToStringTree(ruleNames []string, recog antlr.Recognizer
return antlr.TreesStringTree(s, ruleNames, recog)
}
-
-
-
type BytesContext struct {
- LiteralContext
+ *LiteralContext
tok antlr.Token
}
func NewBytesContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BytesContext {
var p = new(BytesContext)
- InitEmptyLiteralContext(&p.LiteralContext)
+ p.LiteralContext = NewEmptyLiteralContext()
p.parser = parser
- p.CopyAll(ctx.(*LiteralContext))
+ p.CopyFrom(ctx.(*LiteralContext))
return p
}
-
func (s *BytesContext) GetTok() antlr.Token { return s.tok }
-
func (s *BytesContext) SetTok(v antlr.Token) { s.tok = v }
func (s *BytesContext) GetRuleContext() antlr.RuleContext {
@@ -5594,7 +4924,6 @@ func (s *BytesContext) BYTES() antlr.TerminalNode {
return s.GetToken(CELParserBYTES, 0)
}
-
func (s *BytesContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterBytes(s)
@@ -5617,26 +4946,23 @@ func (s *BytesContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
}
}
-
type UintContext struct {
- LiteralContext
+ *LiteralContext
tok antlr.Token
}
func NewUintContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *UintContext {
var p = new(UintContext)
- InitEmptyLiteralContext(&p.LiteralContext)
+ p.LiteralContext = NewEmptyLiteralContext()
p.parser = parser
- p.CopyAll(ctx.(*LiteralContext))
+ p.CopyFrom(ctx.(*LiteralContext))
return p
}
-
func (s *UintContext) GetTok() antlr.Token { return s.tok }
-
func (s *UintContext) SetTok(v antlr.Token) { s.tok = v }
func (s *UintContext) GetRuleContext() antlr.RuleContext {
@@ -5647,7 +4973,6 @@ func (s *UintContext) NUM_UINT() antlr.TerminalNode {
return s.GetToken(CELParserNUM_UINT, 0)
}
-
func (s *UintContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterUint(s)
@@ -5670,26 +4995,23 @@ func (s *UintContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
}
}
-
type NullContext struct {
- LiteralContext
+ *LiteralContext
tok antlr.Token
}
func NewNullContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *NullContext {
var p = new(NullContext)
- InitEmptyLiteralContext(&p.LiteralContext)
+ p.LiteralContext = NewEmptyLiteralContext()
p.parser = parser
- p.CopyAll(ctx.(*LiteralContext))
+ p.CopyFrom(ctx.(*LiteralContext))
return p
}
-
func (s *NullContext) GetTok() antlr.Token { return s.tok }
-
func (s *NullContext) SetTok(v antlr.Token) { s.tok = v }
func (s *NullContext) GetRuleContext() antlr.RuleContext {
@@ -5700,7 +5022,6 @@ func (s *NullContext) NUL() antlr.TerminalNode {
return s.GetToken(CELParserNUL, 0)
}
-
func (s *NullContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterNull(s)
@@ -5723,26 +5044,23 @@ func (s *NullContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
}
}
-
type BoolFalseContext struct {
- LiteralContext
+ *LiteralContext
tok antlr.Token
}
func NewBoolFalseContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BoolFalseContext {
var p = new(BoolFalseContext)
- InitEmptyLiteralContext(&p.LiteralContext)
+ p.LiteralContext = NewEmptyLiteralContext()
p.parser = parser
- p.CopyAll(ctx.(*LiteralContext))
+ p.CopyFrom(ctx.(*LiteralContext))
return p
}
-
func (s *BoolFalseContext) GetTok() antlr.Token { return s.tok }
-
func (s *BoolFalseContext) SetTok(v antlr.Token) { s.tok = v }
func (s *BoolFalseContext) GetRuleContext() antlr.RuleContext {
@@ -5753,7 +5071,6 @@ func (s *BoolFalseContext) CEL_FALSE() antlr.TerminalNode {
return s.GetToken(CELParserCEL_FALSE, 0)
}
-
func (s *BoolFalseContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterBoolFalse(s)
@@ -5776,26 +5093,23 @@ func (s *BoolFalseContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
}
}
-
type StringContext struct {
- LiteralContext
+ *LiteralContext
tok antlr.Token
}
func NewStringContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *StringContext {
var p = new(StringContext)
- InitEmptyLiteralContext(&p.LiteralContext)
+ p.LiteralContext = NewEmptyLiteralContext()
p.parser = parser
- p.CopyAll(ctx.(*LiteralContext))
+ p.CopyFrom(ctx.(*LiteralContext))
return p
}
-
func (s *StringContext) GetTok() antlr.Token { return s.tok }
-
func (s *StringContext) SetTok(v antlr.Token) { s.tok = v }
func (s *StringContext) GetRuleContext() antlr.RuleContext {
@@ -5806,7 +5120,6 @@ func (s *StringContext) STRING() antlr.TerminalNode {
return s.GetToken(CELParserSTRING, 0)
}
-
func (s *StringContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterString(s)
@@ -5829,29 +5142,26 @@ func (s *StringContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
}
}
-
type DoubleContext struct {
- LiteralContext
+ *LiteralContext
sign antlr.Token
- tok antlr.Token
+ tok antlr.Token
}
func NewDoubleContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *DoubleContext {
var p = new(DoubleContext)
- InitEmptyLiteralContext(&p.LiteralContext)
+ p.LiteralContext = NewEmptyLiteralContext()
p.parser = parser
- p.CopyAll(ctx.(*LiteralContext))
+ p.CopyFrom(ctx.(*LiteralContext))
return p
}
-
func (s *DoubleContext) GetSign() antlr.Token { return s.sign }
func (s *DoubleContext) GetTok() antlr.Token { return s.tok }
-
func (s *DoubleContext) SetSign(v antlr.Token) { s.sign = v }
func (s *DoubleContext) SetTok(v antlr.Token) { s.tok = v }
@@ -5868,7 +5178,6 @@ func (s *DoubleContext) MINUS() antlr.TerminalNode {
return s.GetToken(CELParserMINUS, 0)
}
-
func (s *DoubleContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterDouble(s)
@@ -5891,26 +5200,23 @@ func (s *DoubleContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
}
}
-
type BoolTrueContext struct {
- LiteralContext
+ *LiteralContext
tok antlr.Token
}
func NewBoolTrueContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BoolTrueContext {
var p = new(BoolTrueContext)
- InitEmptyLiteralContext(&p.LiteralContext)
+ p.LiteralContext = NewEmptyLiteralContext()
p.parser = parser
- p.CopyAll(ctx.(*LiteralContext))
+ p.CopyFrom(ctx.(*LiteralContext))
return p
}
-
func (s *BoolTrueContext) GetTok() antlr.Token { return s.tok }
-
func (s *BoolTrueContext) SetTok(v antlr.Token) { s.tok = v }
func (s *BoolTrueContext) GetRuleContext() antlr.RuleContext {
@@ -5921,7 +5227,6 @@ func (s *BoolTrueContext) CEL_TRUE() antlr.TerminalNode {
return s.GetToken(CELParserCEL_TRUE, 0)
}
-
func (s *BoolTrueContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterBoolTrue(s)
@@ -5944,29 +5249,26 @@ func (s *BoolTrueContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
}
}
-
type IntContext struct {
- LiteralContext
+ *LiteralContext
sign antlr.Token
- tok antlr.Token
+ tok antlr.Token
}
func NewIntContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *IntContext {
var p = new(IntContext)
- InitEmptyLiteralContext(&p.LiteralContext)
+ p.LiteralContext = NewEmptyLiteralContext()
p.parser = parser
- p.CopyAll(ctx.(*LiteralContext))
+ p.CopyFrom(ctx.(*LiteralContext))
return p
}
-
func (s *IntContext) GetSign() antlr.Token { return s.sign }
func (s *IntContext) GetTok() antlr.Token { return s.tok }
-
func (s *IntContext) SetSign(v antlr.Token) { s.sign = v }
func (s *IntContext) SetTok(v antlr.Token) { s.tok = v }
@@ -5983,7 +5285,6 @@ func (s *IntContext) MINUS() antlr.TerminalNode {
return s.GetToken(CELParserMINUS, 0)
}
-
func (s *IntContext) EnterRule(listener antlr.ParseTreeListener) {
if listenerT, ok := listener.(CELListener); ok {
listenerT.EnterInt(s)
@@ -6006,31 +5307,40 @@ func (s *IntContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
}
}
-
-
func (p *CELParser) Literal() (localctx ILiteralContext) {
+ this := p
+ _ = this
+
localctx = NewLiteralContext(p, p.GetParserRuleContext(), p.GetState())
p.EnterRule(localctx, 30, CELParserRULE_literal)
var _la int
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
p.SetState(248)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
-
- switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 34, p.GetParserRuleContext()) {
+ switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 34, p.GetParserRuleContext()) {
case 1:
localctx = NewIntContext(p, localctx)
p.EnterOuterAlt(localctx, 1)
p.SetState(235)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
_la = p.GetTokenStream().LA(1)
-
if _la == CELParserMINUS {
{
p.SetState(234)
@@ -6038,10 +5348,6 @@ func (p *CELParser) Literal() (localctx ILiteralContext) {
var _m = p.Match(CELParserMINUS)
localctx.(*IntContext).sign = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
}
@@ -6051,13 +5357,8 @@ func (p *CELParser) Literal() (localctx ILiteralContext) {
var _m = p.Match(CELParserNUM_INT)
localctx.(*IntContext).tok = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
-
case 2:
localctx = NewUintContext(p, localctx)
p.EnterOuterAlt(localctx, 2)
@@ -6067,24 +5368,15 @@ func (p *CELParser) Literal() (localctx ILiteralContext) {
var _m = p.Match(CELParserNUM_UINT)
localctx.(*UintContext).tok = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
-
case 3:
localctx = NewDoubleContext(p, localctx)
p.EnterOuterAlt(localctx, 3)
p.SetState(240)
p.GetErrorHandler().Sync(p)
- if p.HasError() {
- goto errorExit
- }
_la = p.GetTokenStream().LA(1)
-
if _la == CELParserMINUS {
{
p.SetState(239)
@@ -6092,10 +5384,6 @@ func (p *CELParser) Literal() (localctx ILiteralContext) {
var _m = p.Match(CELParserMINUS)
localctx.(*DoubleContext).sign = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
}
@@ -6105,13 +5393,8 @@ func (p *CELParser) Literal() (localctx ILiteralContext) {
var _m = p.Match(CELParserNUM_FLOAT)
localctx.(*DoubleContext).tok = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
-
case 4:
localctx = NewStringContext(p, localctx)
p.EnterOuterAlt(localctx, 4)
@@ -6121,13 +5404,8 @@ func (p *CELParser) Literal() (localctx ILiteralContext) {
var _m = p.Match(CELParserSTRING)
localctx.(*StringContext).tok = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
-
case 5:
localctx = NewBytesContext(p, localctx)
p.EnterOuterAlt(localctx, 5)
@@ -6137,13 +5415,8 @@ func (p *CELParser) Literal() (localctx ILiteralContext) {
var _m = p.Match(CELParserBYTES)
localctx.(*BytesContext).tok = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
-
case 6:
localctx = NewBoolTrueContext(p, localctx)
p.EnterOuterAlt(localctx, 6)
@@ -6153,13 +5426,8 @@ func (p *CELParser) Literal() (localctx ILiteralContext) {
var _m = p.Match(CELParserCEL_TRUE)
localctx.(*BoolTrueContext).tok = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
-
case 7:
localctx = NewBoolFalseContext(p, localctx)
p.EnterOuterAlt(localctx, 7)
@@ -6169,13 +5437,8 @@ func (p *CELParser) Literal() (localctx ILiteralContext) {
var _m = p.Match(CELParserCEL_FALSE)
localctx.(*BoolFalseContext).tok = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
-
case 8:
localctx = NewNullContext(p, localctx)
p.EnterOuterAlt(localctx, 8)
@@ -6185,48 +5448,35 @@ func (p *CELParser) Literal() (localctx ILiteralContext) {
var _m = p.Match(CELParserNUL)
localctx.(*NullContext).tok = _m
- if p.HasError() {
- // Recognition error - abort rule
- goto errorExit
- }
}
- case antlr.ATNInvalidAltNumber:
- goto errorExit
}
-
-errorExit:
- if p.HasError() {
- v := p.GetError()
- localctx.SetException(v)
- p.GetErrorHandler().ReportError(p, v)
- p.GetErrorHandler().Recover(p, v)
- p.SetError(nil)
- }
- p.ExitRule()
return localctx
- goto errorExit // Trick to prevent compiler error if the label is not used
}
-
func (p *CELParser) Sempred(localctx antlr.RuleContext, ruleIndex, predIndex int) bool {
switch ruleIndex {
case 4:
- var t *RelationContext = nil
- if localctx != nil { t = localctx.(*RelationContext) }
- return p.Relation_Sempred(t, predIndex)
+ var t *RelationContext = nil
+ if localctx != nil {
+ t = localctx.(*RelationContext)
+ }
+ return p.Relation_Sempred(t, predIndex)
case 5:
- var t *CalcContext = nil
- if localctx != nil { t = localctx.(*CalcContext) }
- return p.Calc_Sempred(t, predIndex)
+ var t *CalcContext = nil
+ if localctx != nil {
+ t = localctx.(*CalcContext)
+ }
+ return p.Calc_Sempred(t, predIndex)
case 7:
- var t *MemberContext = nil
- if localctx != nil { t = localctx.(*MemberContext) }
- return p.Member_Sempred(t, predIndex)
-
+ var t *MemberContext = nil
+ if localctx != nil {
+ t = localctx.(*MemberContext)
+ }
+ return p.Member_Sempred(t, predIndex)
default:
panic("No predicate with index: " + fmt.Sprint(ruleIndex))
@@ -6234,9 +5484,12 @@ func (p *CELParser) Sempred(localctx antlr.RuleContext, ruleIndex, predIndex int
}
func (p *CELParser) Relation_Sempred(localctx antlr.RuleContext, predIndex int) bool {
+ this := p
+ _ = this
+
switch predIndex {
case 0:
- return p.Precpred(p.GetParserRuleContext(), 1)
+ return p.Precpred(p.GetParserRuleContext(), 1)
default:
panic("No predicate with index: " + fmt.Sprint(predIndex))
@@ -6244,12 +5497,15 @@ func (p *CELParser) Relation_Sempred(localctx antlr.RuleContext, predIndex int)
}
func (p *CELParser) Calc_Sempred(localctx antlr.RuleContext, predIndex int) bool {
+ this := p
+ _ = this
+
switch predIndex {
case 1:
- return p.Precpred(p.GetParserRuleContext(), 2)
+ return p.Precpred(p.GetParserRuleContext(), 2)
case 2:
- return p.Precpred(p.GetParserRuleContext(), 1)
+ return p.Precpred(p.GetParserRuleContext(), 1)
default:
panic("No predicate with index: " + fmt.Sprint(predIndex))
@@ -6257,18 +5513,20 @@ func (p *CELParser) Calc_Sempred(localctx antlr.RuleContext, predIndex int) bool
}
func (p *CELParser) Member_Sempred(localctx antlr.RuleContext, predIndex int) bool {
+ this := p
+ _ = this
+
switch predIndex {
case 3:
- return p.Precpred(p.GetParserRuleContext(), 3)
+ return p.Precpred(p.GetParserRuleContext(), 3)
case 4:
- return p.Precpred(p.GetParserRuleContext(), 2)
+ return p.Precpred(p.GetParserRuleContext(), 2)
case 5:
- return p.Precpred(p.GetParserRuleContext(), 1)
+ return p.Precpred(p.GetParserRuleContext(), 1)
default:
panic("No predicate with index: " + fmt.Sprint(predIndex))
}
}
-
diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go b/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go
index d2fbd563a..2c54e2cb0 100644
--- a/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go
+++ b/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go
@@ -1,8 +1,7 @@
-// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT.
+// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT.
package gen // CEL
-import "github.com/antlr4-go/antlr/v4"
-
+import "github.com/antlr/antlr4/runtime/Go/antlr/v4"
// A complete Visitor for a parse tree produced by CELParser.
type CELVisitor interface {
@@ -106,5 +105,4 @@ type CELVisitor interface {
// Visit a parse tree produced by CELParser#Null.
VisitNull(ctx *NullContext) interface{}
-
-}
\ No newline at end of file
+}
diff --git a/vendor/github.com/google/cel-go/parser/gen/generate.sh b/vendor/github.com/google/cel-go/parser/gen/generate.sh
index 27a9559f7..389107c6a 100644
--- a/vendor/github.com/google/cel-go/parser/gen/generate.sh
+++ b/vendor/github.com/google/cel-go/parser/gen/generate.sh
@@ -27,7 +27,7 @@
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# Generate AntLR artifacts.
-java -Xmx500M -cp ${DIR}/antlr-4.13.1-complete.jar org.antlr.v4.Tool \
+java -Xmx500M -cp ${DIR}/antlr-4.12.0-complete.jar org.antlr.v4.Tool \
-Dlanguage=Go \
-package gen \
-o ${DIR} \
diff --git a/vendor/github.com/google/cel-go/parser/helper.go b/vendor/github.com/google/cel-go/parser/helper.go
index 182ff034c..a5f29e3d7 100644
--- a/vendor/github.com/google/cel-go/parser/helper.go
+++ b/vendor/github.com/google/cel-go/parser/helper.go
@@ -17,209 +17,284 @@ package parser
import (
"sync"
- antlr "github.com/antlr4-go/antlr/v4"
+ antlr "github.com/antlr/antlr4/runtime/Go/antlr/v4"
"github.com/google/cel-go/common"
- "github.com/google/cel-go/common/ast"
- "github.com/google/cel-go/common/types"
- "github.com/google/cel-go/common/types/ref"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
type parserHelper struct {
- exprFactory ast.ExprFactory
- source common.Source
- sourceInfo *ast.SourceInfo
- nextID int64
+ source common.Source
+ nextID int64
+ positions map[int64]int32
+ macroCalls map[int64]*exprpb.Expr
}
-func newParserHelper(source common.Source, fac ast.ExprFactory) *parserHelper {
+func newParserHelper(source common.Source) *parserHelper {
return &parserHelper{
- exprFactory: fac,
- source: source,
- sourceInfo: ast.NewSourceInfo(source),
- nextID: 1,
+ source: source,
+ nextID: 1,
+ positions: make(map[int64]int32),
+ macroCalls: make(map[int64]*exprpb.Expr),
}
}
-func (p *parserHelper) getSourceInfo() *ast.SourceInfo {
- return p.sourceInfo
+func (p *parserHelper) getSourceInfo() *exprpb.SourceInfo {
+ return &exprpb.SourceInfo{
+ Location: p.source.Description(),
+ Positions: p.positions,
+ LineOffsets: p.source.LineOffsets(),
+ MacroCalls: p.macroCalls}
}
-func (p *parserHelper) newLiteral(ctx any, value ref.Val) ast.Expr {
- return p.exprFactory.NewLiteral(p.newID(ctx), value)
+func (p *parserHelper) newLiteral(ctx any, value *exprpb.Constant) *exprpb.Expr {
+ exprNode := p.newExpr(ctx)
+ exprNode.ExprKind = &exprpb.Expr_ConstExpr{ConstExpr: value}
+ return exprNode
}
-func (p *parserHelper) newLiteralBool(ctx any, value bool) ast.Expr {
- return p.newLiteral(ctx, types.Bool(value))
+func (p *parserHelper) newLiteralBool(ctx any, value bool) *exprpb.Expr {
+ return p.newLiteral(ctx,
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: value}})
}
-func (p *parserHelper) newLiteralString(ctx any, value string) ast.Expr {
- return p.newLiteral(ctx, types.String(value))
+func (p *parserHelper) newLiteralString(ctx any, value string) *exprpb.Expr {
+ return p.newLiteral(ctx,
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: value}})
}
-func (p *parserHelper) newLiteralBytes(ctx any, value []byte) ast.Expr {
- return p.newLiteral(ctx, types.Bytes(value))
+func (p *parserHelper) newLiteralBytes(ctx any, value []byte) *exprpb.Expr {
+ return p.newLiteral(ctx,
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: value}})
}
-func (p *parserHelper) newLiteralInt(ctx any, value int64) ast.Expr {
- return p.newLiteral(ctx, types.Int(value))
+func (p *parserHelper) newLiteralInt(ctx any, value int64) *exprpb.Expr {
+ return p.newLiteral(ctx,
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: value}})
}
-func (p *parserHelper) newLiteralUint(ctx any, value uint64) ast.Expr {
- return p.newLiteral(ctx, types.Uint(value))
+func (p *parserHelper) newLiteralUint(ctx any, value uint64) *exprpb.Expr {
+ return p.newLiteral(ctx, &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: value}})
}
-func (p *parserHelper) newLiteralDouble(ctx any, value float64) ast.Expr {
- return p.newLiteral(ctx, types.Double(value))
+func (p *parserHelper) newLiteralDouble(ctx any, value float64) *exprpb.Expr {
+ return p.newLiteral(ctx,
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: value}})
}
-func (p *parserHelper) newIdent(ctx any, name string) ast.Expr {
- return p.exprFactory.NewIdent(p.newID(ctx), name)
+func (p *parserHelper) newIdent(ctx any, name string) *exprpb.Expr {
+ exprNode := p.newExpr(ctx)
+ exprNode.ExprKind = &exprpb.Expr_IdentExpr{IdentExpr: &exprpb.Expr_Ident{Name: name}}
+ return exprNode
}
-func (p *parserHelper) newSelect(ctx any, operand ast.Expr, field string) ast.Expr {
- return p.exprFactory.NewSelect(p.newID(ctx), operand, field)
+func (p *parserHelper) newSelect(ctx any, operand *exprpb.Expr, field string) *exprpb.Expr {
+ exprNode := p.newExpr(ctx)
+ exprNode.ExprKind = &exprpb.Expr_SelectExpr{
+ SelectExpr: &exprpb.Expr_Select{Operand: operand, Field: field}}
+ return exprNode
}
-func (p *parserHelper) newPresenceTest(ctx any, operand ast.Expr, field string) ast.Expr {
- return p.exprFactory.NewPresenceTest(p.newID(ctx), operand, field)
+func (p *parserHelper) newPresenceTest(ctx any, operand *exprpb.Expr, field string) *exprpb.Expr {
+ exprNode := p.newExpr(ctx)
+ exprNode.ExprKind = &exprpb.Expr_SelectExpr{
+ SelectExpr: &exprpb.Expr_Select{Operand: operand, Field: field, TestOnly: true}}
+ return exprNode
}
-func (p *parserHelper) newGlobalCall(ctx any, function string, args ...ast.Expr) ast.Expr {
- return p.exprFactory.NewCall(p.newID(ctx), function, args...)
+func (p *parserHelper) newGlobalCall(ctx any, function string, args ...*exprpb.Expr) *exprpb.Expr {
+ exprNode := p.newExpr(ctx)
+ exprNode.ExprKind = &exprpb.Expr_CallExpr{
+ CallExpr: &exprpb.Expr_Call{Function: function, Args: args}}
+ return exprNode
}
-func (p *parserHelper) newReceiverCall(ctx any, function string, target ast.Expr, args ...ast.Expr) ast.Expr {
- return p.exprFactory.NewMemberCall(p.newID(ctx), function, target, args...)
+func (p *parserHelper) newReceiverCall(ctx any, function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr {
+ exprNode := p.newExpr(ctx)
+ exprNode.ExprKind = &exprpb.Expr_CallExpr{
+ CallExpr: &exprpb.Expr_Call{Function: function, Target: target, Args: args}}
+ return exprNode
}
-func (p *parserHelper) newList(ctx any, elements []ast.Expr, optionals ...int32) ast.Expr {
- return p.exprFactory.NewList(p.newID(ctx), elements, optionals)
+func (p *parserHelper) newList(ctx any, elements []*exprpb.Expr, optionals ...int32) *exprpb.Expr {
+ exprNode := p.newExpr(ctx)
+ exprNode.ExprKind = &exprpb.Expr_ListExpr{
+ ListExpr: &exprpb.Expr_CreateList{
+ Elements: elements,
+ OptionalIndices: optionals,
+ }}
+ return exprNode
}
-func (p *parserHelper) newMap(ctx any, entries ...ast.EntryExpr) ast.Expr {
- return p.exprFactory.NewMap(p.newID(ctx), entries)
+func (p *parserHelper) newMap(ctx any, entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr {
+ exprNode := p.newExpr(ctx)
+ exprNode.ExprKind = &exprpb.Expr_StructExpr{
+ StructExpr: &exprpb.Expr_CreateStruct{Entries: entries}}
+ return exprNode
}
-func (p *parserHelper) newMapEntry(entryID int64, key ast.Expr, value ast.Expr, optional bool) ast.EntryExpr {
- return p.exprFactory.NewMapEntry(entryID, key, value, optional)
+func (p *parserHelper) newMapEntry(entryID int64, key *exprpb.Expr, value *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry {
+ return &exprpb.Expr_CreateStruct_Entry{
+ Id: entryID,
+ KeyKind: &exprpb.Expr_CreateStruct_Entry_MapKey{MapKey: key},
+ Value: value,
+ OptionalEntry: optional,
+ }
}
-func (p *parserHelper) newObject(ctx any, typeName string, fields ...ast.EntryExpr) ast.Expr {
- return p.exprFactory.NewStruct(p.newID(ctx), typeName, fields)
+func (p *parserHelper) newObject(ctx any, typeName string, entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr {
+ exprNode := p.newExpr(ctx)
+ exprNode.ExprKind = &exprpb.Expr_StructExpr{
+ StructExpr: &exprpb.Expr_CreateStruct{
+ MessageName: typeName,
+ Entries: entries,
+ },
+ }
+ return exprNode
}
-func (p *parserHelper) newObjectField(fieldID int64, field string, value ast.Expr, optional bool) ast.EntryExpr {
- return p.exprFactory.NewStructField(fieldID, field, value, optional)
+func (p *parserHelper) newObjectField(fieldID int64, field string, value *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry {
+ return &exprpb.Expr_CreateStruct_Entry{
+ Id: fieldID,
+ KeyKind: &exprpb.Expr_CreateStruct_Entry_FieldKey{FieldKey: field},
+ Value: value,
+ OptionalEntry: optional,
+ }
}
-func (p *parserHelper) newComprehension(ctx any,
- iterRange ast.Expr,
- iterVar string,
+func (p *parserHelper) newComprehension(ctx any, iterVar string,
+ iterRange *exprpb.Expr,
accuVar string,
- accuInit ast.Expr,
- condition ast.Expr,
- step ast.Expr,
- result ast.Expr) ast.Expr {
- return p.exprFactory.NewComprehension(
- p.newID(ctx), iterRange, iterVar, accuVar, accuInit, condition, step, result)
-}
-
-func (p *parserHelper) newID(ctx any) int64 {
- if id, isID := ctx.(int64); isID {
- return id
+ accuInit *exprpb.Expr,
+ condition *exprpb.Expr,
+ step *exprpb.Expr,
+ result *exprpb.Expr) *exprpb.Expr {
+ exprNode := p.newExpr(ctx)
+ exprNode.ExprKind = &exprpb.Expr_ComprehensionExpr{
+ ComprehensionExpr: &exprpb.Expr_Comprehension{
+ AccuVar: accuVar,
+ AccuInit: accuInit,
+ IterVar: iterVar,
+ IterRange: iterRange,
+ LoopCondition: condition,
+ LoopStep: step,
+ Result: result}}
+ return exprNode
+}
+
+func (p *parserHelper) newExpr(ctx any) *exprpb.Expr {
+ id, isID := ctx.(int64)
+ if isID {
+ return &exprpb.Expr{Id: id}
}
- return p.id(ctx)
-}
-
-func (p *parserHelper) newExpr(ctx any) ast.Expr {
- return p.exprFactory.NewUnspecifiedExpr(p.newID(ctx))
+ return &exprpb.Expr{Id: p.id(ctx)}
}
func (p *parserHelper) id(ctx any) int64 {
- var offset ast.OffsetRange
+ var location common.Location
switch c := ctx.(type) {
case antlr.ParserRuleContext:
- start, stop := c.GetStart(), c.GetStop()
- if stop == nil {
- stop = start
- }
- offset.Start = p.sourceInfo.ComputeOffset(int32(start.GetLine()), int32(start.GetColumn()))
- offset.Stop = p.sourceInfo.ComputeOffset(int32(stop.GetLine()), int32(stop.GetColumn()))
+ token := c.GetStart()
+ location = p.source.NewLocation(token.GetLine(), token.GetColumn())
case antlr.Token:
- offset.Start = p.sourceInfo.ComputeOffset(int32(c.GetLine()), int32(c.GetColumn()))
- offset.Stop = offset.Start
+ token := c
+ location = p.source.NewLocation(token.GetLine(), token.GetColumn())
case common.Location:
- offset.Start = p.sourceInfo.ComputeOffset(int32(c.Line()), int32(c.Column()))
- offset.Stop = offset.Start
- case ast.OffsetRange:
- offset = c
+ location = c
default:
// This should only happen if the ctx is nil
return -1
}
id := p.nextID
- p.sourceInfo.SetOffsetRange(id, offset)
+ p.positions[id], _ = p.source.LocationOffset(location)
p.nextID++
return id
}
func (p *parserHelper) getLocation(id int64) common.Location {
- return p.sourceInfo.GetStartLocation(id)
+ characterOffset := p.positions[id]
+ location, _ := p.source.OffsetLocation(characterOffset)
+ return location
}
// buildMacroCallArg iterates the expression and returns a new expression
// where all macros have been replaced by their IDs in MacroCalls
-func (p *parserHelper) buildMacroCallArg(expr ast.Expr) ast.Expr {
- if _, found := p.sourceInfo.GetMacroCall(expr.ID()); found {
- return p.exprFactory.NewUnspecifiedExpr(expr.ID())
+func (p *parserHelper) buildMacroCallArg(expr *exprpb.Expr) *exprpb.Expr {
+ if _, found := p.macroCalls[expr.GetId()]; found {
+ return &exprpb.Expr{Id: expr.GetId()}
}
- switch expr.Kind() {
- case ast.CallKind:
+ switch expr.GetExprKind().(type) {
+ case *exprpb.Expr_CallExpr:
// Iterate the AST from `expr` recursively looking for macros. Because we are at most
// starting from the top level macro, this recursion is bounded by the size of the AST. This
// means that the depth check on the AST during parsing will catch recursion overflows
// before we get to here.
- call := expr.AsCall()
- macroArgs := make([]ast.Expr, len(call.Args()))
- for index, arg := range call.Args() {
+ macroTarget := expr.GetCallExpr().GetTarget()
+ if macroTarget != nil {
+ macroTarget = p.buildMacroCallArg(macroTarget)
+ }
+ macroArgs := make([]*exprpb.Expr, len(expr.GetCallExpr().GetArgs()))
+ for index, arg := range expr.GetCallExpr().GetArgs() {
macroArgs[index] = p.buildMacroCallArg(arg)
}
- if !call.IsMemberFunction() {
- return p.exprFactory.NewCall(expr.ID(), call.FunctionName(), macroArgs...)
+ return &exprpb.Expr{
+ Id: expr.GetId(),
+ ExprKind: &exprpb.Expr_CallExpr{
+ CallExpr: &exprpb.Expr_Call{
+ Target: macroTarget,
+ Function: expr.GetCallExpr().GetFunction(),
+ Args: macroArgs,
+ },
+ },
}
- macroTarget := p.buildMacroCallArg(call.Target())
- return p.exprFactory.NewMemberCall(expr.ID(), call.FunctionName(), macroTarget, macroArgs...)
- case ast.ListKind:
- list := expr.AsList()
- macroListArgs := make([]ast.Expr, list.Size())
- for i, elem := range list.Elements() {
+ case *exprpb.Expr_ListExpr:
+ listExpr := expr.GetListExpr()
+ macroListArgs := make([]*exprpb.Expr, len(listExpr.GetElements()))
+ for i, elem := range listExpr.GetElements() {
macroListArgs[i] = p.buildMacroCallArg(elem)
}
- return p.exprFactory.NewList(expr.ID(), macroListArgs, list.OptionalIndices())
+ return &exprpb.Expr{
+ Id: expr.GetId(),
+ ExprKind: &exprpb.Expr_ListExpr{
+ ListExpr: &exprpb.Expr_CreateList{
+ Elements: macroListArgs,
+ OptionalIndices: listExpr.GetOptionalIndices(),
+ },
+ },
+ }
}
+
return expr
}
// addMacroCall adds the macro the the MacroCalls map in source info. If a macro has args/subargs/target
// that are macros, their ID will be stored instead for later self-lookups.
-func (p *parserHelper) addMacroCall(exprID int64, function string, target ast.Expr, args ...ast.Expr) {
- macroArgs := make([]ast.Expr, len(args))
+func (p *parserHelper) addMacroCall(exprID int64, function string, target *exprpb.Expr, args ...*exprpb.Expr) {
+ macroTarget := target
+ if target != nil {
+ if _, found := p.macroCalls[target.GetId()]; found {
+ macroTarget = &exprpb.Expr{Id: target.GetId()}
+ } else {
+ macroTarget = p.buildMacroCallArg(target)
+ }
+ }
+
+ macroArgs := make([]*exprpb.Expr, len(args))
for index, arg := range args {
macroArgs[index] = p.buildMacroCallArg(arg)
}
- if target == nil {
- p.sourceInfo.SetMacroCall(exprID, p.exprFactory.NewCall(0, function, macroArgs...))
- return
- }
- macroTarget := target
- if _, found := p.sourceInfo.GetMacroCall(target.ID()); found {
- macroTarget = p.exprFactory.NewUnspecifiedExpr(target.ID())
- } else {
- macroTarget = p.buildMacroCallArg(target)
+
+ p.macroCalls[exprID] = &exprpb.Expr{
+ ExprKind: &exprpb.Expr_CallExpr{
+ CallExpr: &exprpb.Expr_Call{
+ Target: macroTarget,
+ Function: function,
+ Args: macroArgs,
+ },
+ },
}
- p.sourceInfo.SetMacroCall(exprID, p.exprFactory.NewMemberCall(0, function, macroTarget, macroArgs...))
}
// logicManager compacts logical trees into a more efficient structure which is semantically
@@ -234,71 +309,71 @@ func (p *parserHelper) addMacroCall(exprID int64, function string, target ast.Ex
// controversial choice as it alters the traditional order of execution assumptions present in most
// expressions.
type logicManager struct {
- exprFactory ast.ExprFactory
+ helper *parserHelper
function string
- terms []ast.Expr
+ terms []*exprpb.Expr
ops []int64
variadicASTs bool
}
// newVariadicLogicManager creates a logic manager instance bound to a specific function and its first term.
-func newVariadicLogicManager(fac ast.ExprFactory, function string, term ast.Expr) *logicManager {
+func newVariadicLogicManager(h *parserHelper, function string, term *exprpb.Expr) *logicManager {
return &logicManager{
- exprFactory: fac,
+ helper: h,
function: function,
- terms: []ast.Expr{term},
+ terms: []*exprpb.Expr{term},
ops: []int64{},
variadicASTs: true,
}
}
// newBalancingLogicManager creates a logic manager instance bound to a specific function and its first term.
-func newBalancingLogicManager(fac ast.ExprFactory, function string, term ast.Expr) *logicManager {
+func newBalancingLogicManager(h *parserHelper, function string, term *exprpb.Expr) *logicManager {
return &logicManager{
- exprFactory: fac,
+ helper: h,
function: function,
- terms: []ast.Expr{term},
+ terms: []*exprpb.Expr{term},
ops: []int64{},
variadicASTs: false,
}
}
// addTerm adds an operation identifier and term to the set of terms to be balanced.
-func (l *logicManager) addTerm(op int64, term ast.Expr) {
+func (l *logicManager) addTerm(op int64, term *exprpb.Expr) {
l.terms = append(l.terms, term)
l.ops = append(l.ops, op)
}
// toExpr renders the logic graph into an Expr value, either balancing a tree of logical
// operations or creating a variadic representation of the logical operator.
-func (l *logicManager) toExpr() ast.Expr {
+func (l *logicManager) toExpr() *exprpb.Expr {
if len(l.terms) == 1 {
return l.terms[0]
}
if l.variadicASTs {
- return l.exprFactory.NewCall(l.ops[0], l.function, l.terms...)
+ return l.helper.newGlobalCall(l.ops[0], l.function, l.terms...)
}
return l.balancedTree(0, len(l.ops)-1)
}
// balancedTree recursively balances the terms provided to a commutative operator.
-func (l *logicManager) balancedTree(lo, hi int) ast.Expr {
+func (l *logicManager) balancedTree(lo, hi int) *exprpb.Expr {
mid := (lo + hi + 1) / 2
- var left ast.Expr
+ var left *exprpb.Expr
if mid == lo {
left = l.terms[mid]
} else {
left = l.balancedTree(lo, mid-1)
}
- var right ast.Expr
+ var right *exprpb.Expr
if mid == hi {
right = l.terms[mid+1]
} else {
right = l.balancedTree(mid+1, hi)
}
- return l.exprFactory.NewCall(l.ops[mid], l.function, left, right)
+ return l.helper.newGlobalCall(l.ops[mid], l.function, left, right)
}
type exprHelper struct {
@@ -312,151 +387,202 @@ func (e *exprHelper) nextMacroID() int64 {
// Copy implements the ExprHelper interface method by producing a copy of the input Expr value
// with a fresh set of numeric identifiers the Expr and all its descendants.
-func (e *exprHelper) Copy(expr ast.Expr) ast.Expr {
- offsetRange, _ := e.parserHelper.sourceInfo.GetOffsetRange(expr.ID())
- copyID := e.parserHelper.newID(offsetRange)
- switch expr.Kind() {
- case ast.LiteralKind:
- return e.exprFactory.NewLiteral(copyID, expr.AsLiteral())
- case ast.IdentKind:
- return e.exprFactory.NewIdent(copyID, expr.AsIdent())
- case ast.SelectKind:
- sel := expr.AsSelect()
- op := e.Copy(sel.Operand())
- if sel.IsTestOnly() {
- return e.exprFactory.NewPresenceTest(copyID, op, sel.FieldName())
+func (e *exprHelper) Copy(expr *exprpb.Expr) *exprpb.Expr {
+ copy := e.parserHelper.newExpr(e.parserHelper.getLocation(expr.GetId()))
+ switch expr.GetExprKind().(type) {
+ case *exprpb.Expr_ConstExpr:
+ copy.ExprKind = &exprpb.Expr_ConstExpr{ConstExpr: expr.GetConstExpr()}
+ case *exprpb.Expr_IdentExpr:
+ copy.ExprKind = &exprpb.Expr_IdentExpr{IdentExpr: expr.GetIdentExpr()}
+ case *exprpb.Expr_SelectExpr:
+ op := expr.GetSelectExpr().GetOperand()
+ copy.ExprKind = &exprpb.Expr_SelectExpr{SelectExpr: &exprpb.Expr_Select{
+ Operand: e.Copy(op),
+ Field: expr.GetSelectExpr().GetField(),
+ TestOnly: expr.GetSelectExpr().GetTestOnly(),
+ }}
+ case *exprpb.Expr_CallExpr:
+ call := expr.GetCallExpr()
+ target := call.GetTarget()
+ if target != nil {
+ target = e.Copy(target)
}
- return e.exprFactory.NewSelect(copyID, op, sel.FieldName())
- case ast.CallKind:
- call := expr.AsCall()
- args := call.Args()
- argsCopy := make([]ast.Expr, len(args))
+ args := call.GetArgs()
+ argsCopy := make([]*exprpb.Expr, len(args))
for i, arg := range args {
argsCopy[i] = e.Copy(arg)
}
- if !call.IsMemberFunction() {
- return e.exprFactory.NewCall(copyID, call.FunctionName(), argsCopy...)
+ copy.ExprKind = &exprpb.Expr_CallExpr{
+ CallExpr: &exprpb.Expr_Call{
+ Function: call.GetFunction(),
+ Target: target,
+ Args: argsCopy,
+ },
}
- return e.exprFactory.NewMemberCall(copyID, call.FunctionName(), e.Copy(call.Target()), argsCopy...)
- case ast.ListKind:
- list := expr.AsList()
- elems := list.Elements()
- elemsCopy := make([]ast.Expr, len(elems))
+ case *exprpb.Expr_ListExpr:
+ elems := expr.GetListExpr().GetElements()
+ elemsCopy := make([]*exprpb.Expr, len(elems))
for i, elem := range elems {
elemsCopy[i] = e.Copy(elem)
}
- return e.exprFactory.NewList(copyID, elemsCopy, list.OptionalIndices())
- case ast.MapKind:
- m := expr.AsMap()
- entries := m.Entries()
- entriesCopy := make([]ast.EntryExpr, len(entries))
- for i, en := range entries {
- entry := en.AsMapEntry()
- entryID := e.nextMacroID()
- entriesCopy[i] = e.exprFactory.NewMapEntry(entryID,
- e.Copy(entry.Key()), e.Copy(entry.Value()), entry.IsOptional())
+ copy.ExprKind = &exprpb.Expr_ListExpr{
+ ListExpr: &exprpb.Expr_CreateList{Elements: elemsCopy},
+ }
+ case *exprpb.Expr_StructExpr:
+ entries := expr.GetStructExpr().GetEntries()
+ entriesCopy := make([]*exprpb.Expr_CreateStruct_Entry, len(entries))
+ for i, entry := range entries {
+ entryCopy := &exprpb.Expr_CreateStruct_Entry{}
+ entryCopy.Id = e.nextMacroID()
+ switch entry.GetKeyKind().(type) {
+ case *exprpb.Expr_CreateStruct_Entry_FieldKey:
+ entryCopy.KeyKind = &exprpb.Expr_CreateStruct_Entry_FieldKey{
+ FieldKey: entry.GetFieldKey(),
+ }
+ case *exprpb.Expr_CreateStruct_Entry_MapKey:
+ entryCopy.KeyKind = &exprpb.Expr_CreateStruct_Entry_MapKey{
+ MapKey: e.Copy(entry.GetMapKey()),
+ }
+ }
+ entryCopy.Value = e.Copy(entry.GetValue())
+ entriesCopy[i] = entryCopy
}
- return e.exprFactory.NewMap(copyID, entriesCopy)
- case ast.StructKind:
- s := expr.AsStruct()
- fields := s.Fields()
- fieldsCopy := make([]ast.EntryExpr, len(fields))
- for i, f := range fields {
- field := f.AsStructField()
- fieldID := e.nextMacroID()
- fieldsCopy[i] = e.exprFactory.NewStructField(fieldID,
- field.Name(), e.Copy(field.Value()), field.IsOptional())
+ copy.ExprKind = &exprpb.Expr_StructExpr{
+ StructExpr: &exprpb.Expr_CreateStruct{
+ MessageName: expr.GetStructExpr().GetMessageName(),
+ Entries: entriesCopy,
+ },
+ }
+ case *exprpb.Expr_ComprehensionExpr:
+ iterRange := e.Copy(expr.GetComprehensionExpr().GetIterRange())
+ accuInit := e.Copy(expr.GetComprehensionExpr().GetAccuInit())
+ cond := e.Copy(expr.GetComprehensionExpr().GetLoopCondition())
+ step := e.Copy(expr.GetComprehensionExpr().GetLoopStep())
+ result := e.Copy(expr.GetComprehensionExpr().GetResult())
+ copy.ExprKind = &exprpb.Expr_ComprehensionExpr{
+ ComprehensionExpr: &exprpb.Expr_Comprehension{
+ IterRange: iterRange,
+ IterVar: expr.GetComprehensionExpr().GetIterVar(),
+ AccuInit: accuInit,
+ AccuVar: expr.GetComprehensionExpr().GetAccuVar(),
+ LoopCondition: cond,
+ LoopStep: step,
+ Result: result,
+ },
}
- return e.exprFactory.NewStruct(copyID, s.TypeName(), fieldsCopy)
- case ast.ComprehensionKind:
- compre := expr.AsComprehension()
- iterRange := e.Copy(compre.IterRange())
- accuInit := e.Copy(compre.AccuInit())
- cond := e.Copy(compre.LoopCondition())
- step := e.Copy(compre.LoopStep())
- result := e.Copy(compre.Result())
- return e.exprFactory.NewComprehension(copyID,
- iterRange, compre.IterVar(), compre.AccuVar(), accuInit, cond, step, result)
}
- return e.exprFactory.NewUnspecifiedExpr(copyID)
+ return copy
+}
+
+// LiteralBool implements the ExprHelper interface method.
+func (e *exprHelper) LiteralBool(value bool) *exprpb.Expr {
+ return e.parserHelper.newLiteralBool(e.nextMacroID(), value)
+}
+
+// LiteralBytes implements the ExprHelper interface method.
+func (e *exprHelper) LiteralBytes(value []byte) *exprpb.Expr {
+ return e.parserHelper.newLiteralBytes(e.nextMacroID(), value)
+}
+
+// LiteralDouble implements the ExprHelper interface method.
+func (e *exprHelper) LiteralDouble(value float64) *exprpb.Expr {
+ return e.parserHelper.newLiteralDouble(e.nextMacroID(), value)
+}
+
+// LiteralInt implements the ExprHelper interface method.
+func (e *exprHelper) LiteralInt(value int64) *exprpb.Expr {
+ return e.parserHelper.newLiteralInt(e.nextMacroID(), value)
}
-// NewLiteral implements the ExprHelper interface method.
-func (e *exprHelper) NewLiteral(value ref.Val) ast.Expr {
- return e.exprFactory.NewLiteral(e.nextMacroID(), value)
+// LiteralString implements the ExprHelper interface method.
+func (e *exprHelper) LiteralString(value string) *exprpb.Expr {
+ return e.parserHelper.newLiteralString(e.nextMacroID(), value)
+}
+
+// LiteralUint implements the ExprHelper interface method.
+func (e *exprHelper) LiteralUint(value uint64) *exprpb.Expr {
+ return e.parserHelper.newLiteralUint(e.nextMacroID(), value)
}
// NewList implements the ExprHelper interface method.
-func (e *exprHelper) NewList(elems ...ast.Expr) ast.Expr {
- return e.exprFactory.NewList(e.nextMacroID(), elems, []int32{})
+func (e *exprHelper) NewList(elems ...*exprpb.Expr) *exprpb.Expr {
+ return e.parserHelper.newList(e.nextMacroID(), elems)
}
// NewMap implements the ExprHelper interface method.
-func (e *exprHelper) NewMap(entries ...ast.EntryExpr) ast.Expr {
- return e.exprFactory.NewMap(e.nextMacroID(), entries)
+func (e *exprHelper) NewMap(entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr {
+ return e.parserHelper.newMap(e.nextMacroID(), entries...)
}
// NewMapEntry implements the ExprHelper interface method.
-func (e *exprHelper) NewMapEntry(key ast.Expr, val ast.Expr, optional bool) ast.EntryExpr {
- return e.exprFactory.NewMapEntry(e.nextMacroID(), key, val, optional)
+func (e *exprHelper) NewMapEntry(key *exprpb.Expr, val *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry {
+ return e.parserHelper.newMapEntry(e.nextMacroID(), key, val, optional)
}
-// NewStruct implements the ExprHelper interface method.
-func (e *exprHelper) NewStruct(typeName string, fieldInits ...ast.EntryExpr) ast.Expr {
- return e.exprFactory.NewStruct(e.nextMacroID(), typeName, fieldInits)
+// NewObject implements the ExprHelper interface method.
+func (e *exprHelper) NewObject(typeName string, fieldInits ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr {
+ return e.parserHelper.newObject(e.nextMacroID(), typeName, fieldInits...)
}
-// NewStructField implements the ExprHelper interface method.
-func (e *exprHelper) NewStructField(field string, init ast.Expr, optional bool) ast.EntryExpr {
- return e.exprFactory.NewStructField(e.nextMacroID(), field, init, optional)
+// NewObjectFieldInit implements the ExprHelper interface method.
+func (e *exprHelper) NewObjectFieldInit(field string, init *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry {
+ return e.parserHelper.newObjectField(e.nextMacroID(), field, init, optional)
}
-// NewComprehension implements the ExprHelper interface method.
-func (e *exprHelper) NewComprehension(
- iterRange ast.Expr,
- iterVar string,
+// Fold implements the ExprHelper interface method.
+func (e *exprHelper) Fold(iterVar string,
+ iterRange *exprpb.Expr,
accuVar string,
- accuInit ast.Expr,
- condition ast.Expr,
- step ast.Expr,
- result ast.Expr) ast.Expr {
- return e.exprFactory.NewComprehension(
- e.nextMacroID(), iterRange, iterVar, accuVar, accuInit, condition, step, result)
+ accuInit *exprpb.Expr,
+ condition *exprpb.Expr,
+ step *exprpb.Expr,
+ result *exprpb.Expr) *exprpb.Expr {
+ return e.parserHelper.newComprehension(
+ e.nextMacroID(), iterVar, iterRange, accuVar, accuInit, condition, step, result)
}
-// NewIdent implements the ExprHelper interface method.
-func (e *exprHelper) NewIdent(name string) ast.Expr {
- return e.exprFactory.NewIdent(e.nextMacroID(), name)
+// Ident implements the ExprHelper interface method.
+func (e *exprHelper) Ident(name string) *exprpb.Expr {
+ return e.parserHelper.newIdent(e.nextMacroID(), name)
}
-// NewAccuIdent implements the ExprHelper interface method.
-func (e *exprHelper) NewAccuIdent() ast.Expr {
- return e.exprFactory.NewAccuIdent(e.nextMacroID())
+// AccuIdent implements the ExprHelper interface method.
+func (e *exprHelper) AccuIdent() *exprpb.Expr {
+ return e.parserHelper.newIdent(e.nextMacroID(), AccumulatorName)
}
-// NewGlobalCall implements the ExprHelper interface method.
-func (e *exprHelper) NewCall(function string, args ...ast.Expr) ast.Expr {
- return e.exprFactory.NewCall(e.nextMacroID(), function, args...)
+// GlobalCall implements the ExprHelper interface method.
+func (e *exprHelper) GlobalCall(function string, args ...*exprpb.Expr) *exprpb.Expr {
+ return e.parserHelper.newGlobalCall(e.nextMacroID(), function, args...)
}
-// NewMemberCall implements the ExprHelper interface method.
-func (e *exprHelper) NewMemberCall(function string, target ast.Expr, args ...ast.Expr) ast.Expr {
- return e.exprFactory.NewMemberCall(e.nextMacroID(), function, target, args...)
+// ReceiverCall implements the ExprHelper interface method.
+func (e *exprHelper) ReceiverCall(function string,
+ target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr {
+ return e.parserHelper.newReceiverCall(e.nextMacroID(), function, target, args...)
}
-// NewPresenceTest implements the ExprHelper interface method.
-func (e *exprHelper) NewPresenceTest(operand ast.Expr, field string) ast.Expr {
- return e.exprFactory.NewPresenceTest(e.nextMacroID(), operand, field)
+// PresenceTest implements the ExprHelper interface method.
+func (e *exprHelper) PresenceTest(operand *exprpb.Expr, field string) *exprpb.Expr {
+ return e.parserHelper.newPresenceTest(e.nextMacroID(), operand, field)
}
-// NewSelect implements the ExprHelper interface method.
-func (e *exprHelper) NewSelect(operand ast.Expr, field string) ast.Expr {
- return e.exprFactory.NewSelect(e.nextMacroID(), operand, field)
+// Select implements the ExprHelper interface method.
+func (e *exprHelper) Select(operand *exprpb.Expr, field string) *exprpb.Expr {
+ return e.parserHelper.newSelect(e.nextMacroID(), operand, field)
}
// OffsetLocation implements the ExprHelper interface method.
func (e *exprHelper) OffsetLocation(exprID int64) common.Location {
- return e.parserHelper.sourceInfo.GetStartLocation(exprID)
+ offset, found := e.parserHelper.positions[exprID]
+ if !found {
+ return common.NoLocation
+ }
+ location, found := e.parserHelper.source.OffsetLocation(offset)
+ if !found {
+ return common.NoLocation
+ }
+ return location
}
// NewError associates an error message with a given expression id, populating the source offset location of the error if possible.
diff --git a/vendor/github.com/google/cel-go/parser/input.go b/vendor/github.com/google/cel-go/parser/input.go
index 44792455d..810eaff21 100644
--- a/vendor/github.com/google/cel-go/parser/input.go
+++ b/vendor/github.com/google/cel-go/parser/input.go
@@ -15,7 +15,7 @@
package parser
import (
- antlr "github.com/antlr4-go/antlr/v4"
+ antlr "github.com/antlr/antlr4/runtime/Go/antlr/v4"
"github.com/google/cel-go/common/runes"
)
@@ -110,7 +110,7 @@ func (c *charStream) GetTextFromTokens(start, stop antlr.Token) string {
}
// GetTextFromInterval implements (antlr.CharStream).GetTextFromInterval.
-func (c *charStream) GetTextFromInterval(i antlr.Interval) string {
+func (c *charStream) GetTextFromInterval(i *antlr.Interval) string {
return c.GetText(i.Start, i.Stop)
}
diff --git a/vendor/github.com/google/cel-go/parser/macro.go b/vendor/github.com/google/cel-go/parser/macro.go
index 5b1775bed..6066e8ef4 100644
--- a/vendor/github.com/google/cel-go/parser/macro.go
+++ b/vendor/github.com/google/cel-go/parser/macro.go
@@ -18,10 +18,9 @@ import (
"fmt"
"github.com/google/cel-go/common"
- "github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/operators"
- "github.com/google/cel-go/common/types"
- "github.com/google/cel-go/common/types/ref"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
// NewGlobalMacro creates a Macro for a global function with the specified arg count.
@@ -143,38 +142,58 @@ func makeVarArgMacroKey(name string, receiverStyle bool) string {
// and produces as output an Expr ast node.
//
// Note: when the Macro.IsReceiverStyle() method returns true, the target argument will be nil.
-type MacroExpander func(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error)
+type MacroExpander func(eh ExprHelper,
+ target *exprpb.Expr,
+ args []*exprpb.Expr) (*exprpb.Expr, *common.Error)
-// ExprHelper assists with the creation of Expr values in a manner which is consistent
-// the internal semantics and id generation behaviors of the parser and checker libraries.
+// ExprHelper assists with the manipulation of proto-based Expr values in a manner which is
+// consistent with the source position and expression id generation code leveraged by both
+// the parser and type-checker.
type ExprHelper interface {
// Copy the input expression with a brand new set of identifiers.
- Copy(ast.Expr) ast.Expr
+ Copy(*exprpb.Expr) *exprpb.Expr
+
+ // LiteralBool creates an Expr value for a bool literal.
+ LiteralBool(value bool) *exprpb.Expr
+
+ // LiteralBytes creates an Expr value for a byte literal.
+ LiteralBytes(value []byte) *exprpb.Expr
+
+ // LiteralDouble creates an Expr value for double literal.
+ LiteralDouble(value float64) *exprpb.Expr
- // Literal creates an Expr value for a scalar literal value.
- NewLiteral(value ref.Val) ast.Expr
+ // LiteralInt creates an Expr value for an int literal.
+ LiteralInt(value int64) *exprpb.Expr
- // NewList creates a list literal instruction with an optional set of elements.
- NewList(elems ...ast.Expr) ast.Expr
+ // LiteralString creates am Expr value for a string literal.
+ LiteralString(value string) *exprpb.Expr
+
+ // LiteralUint creates an Expr value for a uint literal.
+ LiteralUint(value uint64) *exprpb.Expr
+
+ // NewList creates a CreateList instruction where the list is comprised of the optional set
+ // of elements provided as arguments.
+ NewList(elems ...*exprpb.Expr) *exprpb.Expr
// NewMap creates a CreateStruct instruction for a map where the map is comprised of the
// optional set of key, value entries.
- NewMap(entries ...ast.EntryExpr) ast.Expr
+ NewMap(entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr
// NewMapEntry creates a Map Entry for the key, value pair.
- NewMapEntry(key ast.Expr, val ast.Expr, optional bool) ast.EntryExpr
+ NewMapEntry(key *exprpb.Expr, val *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry
- // NewStruct creates a struct literal expression with an optional set of field initializers.
- NewStruct(typeName string, fieldInits ...ast.EntryExpr) ast.Expr
+ // NewObject creates a CreateStruct instruction for an object with a given type name and
+ // optional set of field initializers.
+ NewObject(typeName string, fieldInits ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr
- // NewStructField creates a new struct field initializer from the field name and value.
- NewStructField(field string, init ast.Expr, optional bool) ast.EntryExpr
+ // NewObjectFieldInit creates a new Object field initializer from the field name and value.
+ NewObjectFieldInit(field string, init *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry
- // NewComprehension creates a new comprehension instruction.
+ // Fold creates a fold comprehension instruction.
//
+ // - iterVar is the iteration variable name.
// - iterRange represents the expression that resolves to a list or map where the elements or
// keys (respectively) will be iterated over.
- // - iterVar is the iteration variable name.
// - accuVar is the accumulation variable name, typically parser.AccumulatorName.
// - accuInit is the initial expression whose value will be set for the accuVar prior to
// folding.
@@ -185,31 +204,31 @@ type ExprHelper interface {
// The accuVar should not shadow variable names that you would like to reference within the
// environment in the step and condition expressions. Presently, the name __result__ is commonly
// used by built-in macros but this may change in the future.
- NewComprehension(iterRange ast.Expr,
- iterVar string,
+ Fold(iterVar string,
+ iterRange *exprpb.Expr,
accuVar string,
- accuInit ast.Expr,
- condition ast.Expr,
- step ast.Expr,
- result ast.Expr) ast.Expr
+ accuInit *exprpb.Expr,
+ condition *exprpb.Expr,
+ step *exprpb.Expr,
+ result *exprpb.Expr) *exprpb.Expr
- // NewIdent creates an identifier Expr value.
- NewIdent(name string) ast.Expr
+ // Ident creates an identifier Expr value.
+ Ident(name string) *exprpb.Expr
- // NewAccuIdent returns an accumulator identifier for use with comprehension results.
- NewAccuIdent() ast.Expr
+ // AccuIdent returns an accumulator identifier for use with comprehension results.
+ AccuIdent() *exprpb.Expr
- // NewCall creates a function call Expr value for a global (free) function.
- NewCall(function string, args ...ast.Expr) ast.Expr
+ // GlobalCall creates a function call Expr value for a global (free) function.
+ GlobalCall(function string, args ...*exprpb.Expr) *exprpb.Expr
- // NewMemberCall creates a function call Expr value for a receiver-style function.
- NewMemberCall(function string, target ast.Expr, args ...ast.Expr) ast.Expr
+ // ReceiverCall creates a function call Expr value for a receiver-style function.
+ ReceiverCall(function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr
- // NewPresenceTest creates a Select TestOnly Expr value for modelling has() semantics.
- NewPresenceTest(operand ast.Expr, field string) ast.Expr
+ // PresenceTest creates a Select TestOnly Expr value for modelling has() semantics.
+ PresenceTest(operand *exprpb.Expr, field string) *exprpb.Expr
- // NewSelect create a field traversal Expr value.
- NewSelect(operand ast.Expr, field string) ast.Expr
+ // Select create a field traversal Expr value.
+ Select(operand *exprpb.Expr, field string) *exprpb.Expr
// OffsetLocation returns the Location of the expression identifier.
OffsetLocation(exprID int64) common.Location
@@ -277,21 +296,21 @@ const (
// MakeAll expands the input call arguments into a comprehension that returns true if all of the
// elements in the range match the predicate expressions:
// .all(, )
-func MakeAll(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) {
+func MakeAll(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
return makeQuantifier(quantifierAll, eh, target, args)
}
// MakeExists expands the input call arguments into a comprehension that returns true if any of the
// elements in the range match the predicate expressions:
// .exists(, )
-func MakeExists(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) {
+func MakeExists(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
return makeQuantifier(quantifierExists, eh, target, args)
}
// MakeExistsOne expands the input call arguments into a comprehension that returns true if exactly
// one of the elements in the range match the predicate expressions:
// .exists_one(, )
-func MakeExistsOne(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) {
+func MakeExistsOne(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
return makeQuantifier(quantifierExistsOne, eh, target, args)
}
@@ -305,14 +324,14 @@ func MakeExistsOne(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *
//
// In the second form only iterVar values which return true when provided to the predicate expression
// are transformed.
-func MakeMap(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) {
+func MakeMap(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
v, found := extractIdent(args[0])
if !found {
- return nil, eh.NewError(args[0].ID(), "argument is not an identifier")
+ return nil, eh.NewError(args[0].GetId(), "argument is not an identifier")
}
- var fn ast.Expr
- var filter ast.Expr
+ var fn *exprpb.Expr
+ var filter *exprpb.Expr
if len(args) == 3 {
filter = args[1]
@@ -322,85 +341,84 @@ func MakeMap(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common
fn = args[1]
}
- accuExpr := eh.NewAccuIdent()
+ accuExpr := eh.Ident(AccumulatorName)
init := eh.NewList()
- condition := eh.NewLiteral(types.True)
- step := eh.NewCall(operators.Add, accuExpr, eh.NewList(fn))
+ condition := eh.LiteralBool(true)
+ step := eh.GlobalCall(operators.Add, accuExpr, eh.NewList(fn))
if filter != nil {
- step = eh.NewCall(operators.Conditional, filter, step, accuExpr)
+ step = eh.GlobalCall(operators.Conditional, filter, step, accuExpr)
}
- return eh.NewComprehension(target, v, AccumulatorName, init, condition, step, accuExpr), nil
+ return eh.Fold(v, target, AccumulatorName, init, condition, step, accuExpr), nil
}
// MakeFilter expands the input call arguments into a comprehension which produces a list which contains
// only elements which match the provided predicate expression:
// .filter(, )
-func MakeFilter(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) {
+func MakeFilter(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
v, found := extractIdent(args[0])
if !found {
- return nil, eh.NewError(args[0].ID(), "argument is not an identifier")
+ return nil, eh.NewError(args[0].GetId(), "argument is not an identifier")
}
filter := args[1]
- accuExpr := eh.NewAccuIdent()
+ accuExpr := eh.Ident(AccumulatorName)
init := eh.NewList()
- condition := eh.NewLiteral(types.True)
- step := eh.NewCall(operators.Add, accuExpr, eh.NewList(args[0]))
- step = eh.NewCall(operators.Conditional, filter, step, accuExpr)
- return eh.NewComprehension(target, v, AccumulatorName, init, condition, step, accuExpr), nil
+ condition := eh.LiteralBool(true)
+ step := eh.GlobalCall(operators.Add, accuExpr, eh.NewList(args[0]))
+ step = eh.GlobalCall(operators.Conditional, filter, step, accuExpr)
+ return eh.Fold(v, target, AccumulatorName, init, condition, step, accuExpr), nil
}
// MakeHas expands the input call arguments into a presence test, e.g. has(.field)
-func MakeHas(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) {
- if args[0].Kind() == ast.SelectKind {
- s := args[0].AsSelect()
- return eh.NewPresenceTest(s.Operand(), s.FieldName()), nil
+func MakeHas(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
+ if s, ok := args[0].ExprKind.(*exprpb.Expr_SelectExpr); ok {
+ return eh.PresenceTest(s.SelectExpr.GetOperand(), s.SelectExpr.GetField()), nil
}
- return nil, eh.NewError(args[0].ID(), "invalid argument to has() macro")
+ return nil, eh.NewError(args[0].GetId(), "invalid argument to has() macro")
}
-func makeQuantifier(kind quantifierKind, eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) {
+func makeQuantifier(kind quantifierKind, eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
v, found := extractIdent(args[0])
if !found {
- return nil, eh.NewError(args[0].ID(), "argument must be a simple name")
+ return nil, eh.NewError(args[0].GetId(), "argument must be a simple name")
}
- var init ast.Expr
- var condition ast.Expr
- var step ast.Expr
- var result ast.Expr
+ var init *exprpb.Expr
+ var condition *exprpb.Expr
+ var step *exprpb.Expr
+ var result *exprpb.Expr
switch kind {
case quantifierAll:
- init = eh.NewLiteral(types.True)
- condition = eh.NewCall(operators.NotStrictlyFalse, eh.NewAccuIdent())
- step = eh.NewCall(operators.LogicalAnd, eh.NewAccuIdent(), args[1])
- result = eh.NewAccuIdent()
+ init = eh.LiteralBool(true)
+ condition = eh.GlobalCall(operators.NotStrictlyFalse, eh.AccuIdent())
+ step = eh.GlobalCall(operators.LogicalAnd, eh.AccuIdent(), args[1])
+ result = eh.AccuIdent()
case quantifierExists:
- init = eh.NewLiteral(types.False)
- condition = eh.NewCall(
+ init = eh.LiteralBool(false)
+ condition = eh.GlobalCall(
operators.NotStrictlyFalse,
- eh.NewCall(operators.LogicalNot, eh.NewAccuIdent()))
- step = eh.NewCall(operators.LogicalOr, eh.NewAccuIdent(), args[1])
- result = eh.NewAccuIdent()
+ eh.GlobalCall(operators.LogicalNot, eh.AccuIdent()))
+ step = eh.GlobalCall(operators.LogicalOr, eh.AccuIdent(), args[1])
+ result = eh.AccuIdent()
case quantifierExistsOne:
- zeroExpr := eh.NewLiteral(types.Int(0))
- oneExpr := eh.NewLiteral(types.Int(1))
+ zeroExpr := eh.LiteralInt(0)
+ oneExpr := eh.LiteralInt(1)
init = zeroExpr
- condition = eh.NewLiteral(types.True)
- step = eh.NewCall(operators.Conditional, args[1],
- eh.NewCall(operators.Add, eh.NewAccuIdent(), oneExpr), eh.NewAccuIdent())
- result = eh.NewCall(operators.Equals, eh.NewAccuIdent(), oneExpr)
+ condition = eh.LiteralBool(true)
+ step = eh.GlobalCall(operators.Conditional, args[1],
+ eh.GlobalCall(operators.Add, eh.AccuIdent(), oneExpr), eh.AccuIdent())
+ result = eh.GlobalCall(operators.Equals, eh.AccuIdent(), oneExpr)
default:
- return nil, eh.NewError(args[0].ID(), fmt.Sprintf("unrecognized quantifier '%v'", kind))
+ return nil, eh.NewError(args[0].GetId(), fmt.Sprintf("unrecognized quantifier '%v'", kind))
}
- return eh.NewComprehension(target, v, AccumulatorName, init, condition, step, result), nil
+ return eh.Fold(v, target, AccumulatorName, init, condition, step, result), nil
}
-func extractIdent(e ast.Expr) (string, bool) {
- switch e.Kind() {
- case ast.IdentKind:
- return e.AsIdent(), true
+func extractIdent(e *exprpb.Expr) (string, bool) {
+ switch e.ExprKind.(type) {
+ case *exprpb.Expr_IdentExpr:
+ return e.GetIdentExpr().GetName(), true
}
return "", false
}
diff --git a/vendor/github.com/google/cel-go/parser/parser.go b/vendor/github.com/google/cel-go/parser/parser.go
index cb753df73..109326a93 100644
--- a/vendor/github.com/google/cel-go/parser/parser.go
+++ b/vendor/github.com/google/cel-go/parser/parser.go
@@ -21,15 +21,17 @@ import (
"regexp"
"strconv"
"strings"
+ "sync"
- antlr "github.com/antlr4-go/antlr/v4"
+ antlr "github.com/antlr/antlr4/runtime/Go/antlr/v4"
"github.com/google/cel-go/common"
- "github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/operators"
"github.com/google/cel-go/common/runes"
- "github.com/google/cel-go/common/types"
"github.com/google/cel-go/parser/gen"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ structpb "google.golang.org/protobuf/types/known/structpb"
)
// Parser encapsulates the context necessary to perform parsing for different expressions.
@@ -86,13 +88,11 @@ func mustNewParser(opts ...Option) *Parser {
}
// Parse parses the expression represented by source and returns the result.
-func (p *Parser) Parse(source common.Source) (*ast.AST, *common.Errors) {
+func (p *Parser) Parse(source common.Source) (*exprpb.ParsedExpr, *common.Errors) {
errs := common.NewErrors(source)
- fac := ast.NewExprFactory()
impl := parser{
errors: &parseErrors{errs},
- exprFactory: fac,
- helper: newParserHelper(source, fac),
+ helper: newParserHelper(source),
macros: p.macros,
maxRecursionDepth: p.maxRecursionDepth,
errorReportingLimit: p.errorReportingLimit,
@@ -106,15 +106,18 @@ func (p *Parser) Parse(source common.Source) (*ast.AST, *common.Errors) {
if !ok {
buf = runes.NewBuffer(source.Content())
}
- var out ast.Expr
+ var e *exprpb.Expr
if buf.Len() > p.expressionSizeCodePointLimit {
- out = impl.reportError(common.NoLocation,
+ e = impl.reportError(common.NoLocation,
"expression code point size exceeds limit: size: %d, limit %d",
buf.Len(), p.expressionSizeCodePointLimit)
} else {
- out = impl.parse(buf, source.Description())
+ e = impl.parse(buf, source.Description())
}
- return ast.NewAST(out, impl.helper.getSourceInfo()), errs
+ return &exprpb.ParsedExpr{
+ Expr: e,
+ SourceInfo: impl.helper.getSourceInfo(),
+ }, errs
}
// reservedIds are not legal to use as variables. We exclude them post-parse, as they *are* valid
@@ -147,7 +150,7 @@ var reservedIds = map[string]struct{}{
// This function calls ParseWithMacros with AllMacros.
//
// Deprecated: Use NewParser().Parse() instead.
-func Parse(source common.Source) (*ast.AST, *common.Errors) {
+func Parse(source common.Source) (*exprpb.ParsedExpr, *common.Errors) {
return mustNewParser(Macros(AllMacros...)).Parse(source)
}
@@ -284,7 +287,6 @@ var _ antlr.ErrorStrategy = &recoveryLimitErrorStrategy{}
type parser struct {
gen.BaseCELVisitor
errors *parseErrors
- exprFactory ast.ExprFactory
helper *parserHelper
macros map[string]Macro
recursionDepth int
@@ -298,21 +300,53 @@ type parser struct {
enableVariadicOperatorASTs bool
}
-var _ gen.CELVisitor = (*parser)(nil)
+var (
+ _ gen.CELVisitor = (*parser)(nil)
-func (p *parser) parse(expr runes.Buffer, desc string) ast.Expr {
- lexer := gen.NewCELLexer(newCharStream(expr, desc))
- lexer.RemoveErrorListeners()
- lexer.AddErrorListener(p)
+ lexerPool *sync.Pool = &sync.Pool{
+ New: func() any {
+ l := gen.NewCELLexer(nil)
+ l.RemoveErrorListeners()
+ return l
+ },
+ }
+
+ parserPool *sync.Pool = &sync.Pool{
+ New: func() any {
+ p := gen.NewCELParser(nil)
+ p.RemoveErrorListeners()
+ return p
+ },
+ }
+)
- prsr := gen.NewCELParser(antlr.NewCommonTokenStream(lexer, 0))
- prsr.RemoveErrorListeners()
+func (p *parser) parse(expr runes.Buffer, desc string) *exprpb.Expr {
+ // TODO: get rid of these pools once https://github.com/antlr/antlr4/pull/3571 is in a release
+ lexer := lexerPool.Get().(*gen.CELLexer)
+ prsr := parserPool.Get().(*gen.CELParser)
prsrListener := &recursionListener{
maxDepth: p.maxRecursionDepth,
ruleTypeDepth: map[int]*int{},
}
+ defer func() {
+ // Unfortunately ANTLR Go runtime is missing (*antlr.BaseParser).RemoveParseListeners,
+ // so this is good enough until that is exported.
+ // Reset the lexer and parser before putting them back in the pool.
+ lexer.RemoveErrorListeners()
+ prsr.RemoveParseListener(prsrListener)
+ prsr.RemoveErrorListeners()
+ lexer.SetInputStream(nil)
+ prsr.SetInputStream(nil)
+ lexerPool.Put(lexer)
+ parserPool.Put(prsr)
+ }()
+
+ lexer.SetInputStream(newCharStream(expr, desc))
+ prsr.SetInputStream(antlr.NewCommonTokenStream(lexer, 0))
+
+ lexer.AddErrorListener(p)
prsr.AddErrorListener(p)
prsr.AddParseListener(prsrListener)
@@ -339,7 +373,7 @@ func (p *parser) parse(expr runes.Buffer, desc string) ast.Expr {
}
}()
- return p.Visit(prsr.Start_()).(ast.Expr)
+ return p.Visit(prsr.Start()).(*exprpb.Expr)
}
// Visitor implementations.
@@ -436,26 +470,26 @@ func (p *parser) VisitStart(ctx *gen.StartContext) any {
// Visit a parse tree produced by CELParser#expr.
func (p *parser) VisitExpr(ctx *gen.ExprContext) any {
- result := p.Visit(ctx.GetE()).(ast.Expr)
+ result := p.Visit(ctx.GetE()).(*exprpb.Expr)
if ctx.GetOp() == nil {
return result
}
opID := p.helper.id(ctx.GetOp())
- ifTrue := p.Visit(ctx.GetE1()).(ast.Expr)
- ifFalse := p.Visit(ctx.GetE2()).(ast.Expr)
+ ifTrue := p.Visit(ctx.GetE1()).(*exprpb.Expr)
+ ifFalse := p.Visit(ctx.GetE2()).(*exprpb.Expr)
return p.globalCallOrMacro(opID, operators.Conditional, result, ifTrue, ifFalse)
}
// Visit a parse tree produced by CELParser#conditionalOr.
func (p *parser) VisitConditionalOr(ctx *gen.ConditionalOrContext) any {
- result := p.Visit(ctx.GetE()).(ast.Expr)
+ result := p.Visit(ctx.GetE()).(*exprpb.Expr)
l := p.newLogicManager(operators.LogicalOr, result)
rest := ctx.GetE1()
for i, op := range ctx.GetOps() {
if i >= len(rest) {
return p.reportError(ctx, "unexpected character, wanted '||'")
}
- next := p.Visit(rest[i]).(ast.Expr)
+ next := p.Visit(rest[i]).(*exprpb.Expr)
opID := p.helper.id(op)
l.addTerm(opID, next)
}
@@ -464,14 +498,14 @@ func (p *parser) VisitConditionalOr(ctx *gen.ConditionalOrContext) any {
// Visit a parse tree produced by CELParser#conditionalAnd.
func (p *parser) VisitConditionalAnd(ctx *gen.ConditionalAndContext) any {
- result := p.Visit(ctx.GetE()).(ast.Expr)
+ result := p.Visit(ctx.GetE()).(*exprpb.Expr)
l := p.newLogicManager(operators.LogicalAnd, result)
rest := ctx.GetE1()
for i, op := range ctx.GetOps() {
if i >= len(rest) {
return p.reportError(ctx, "unexpected character, wanted '&&'")
}
- next := p.Visit(rest[i]).(ast.Expr)
+ next := p.Visit(rest[i]).(*exprpb.Expr)
opID := p.helper.id(op)
l.addTerm(opID, next)
}
@@ -485,9 +519,9 @@ func (p *parser) VisitRelation(ctx *gen.RelationContext) any {
opText = ctx.GetOp().GetText()
}
if op, found := operators.Find(opText); found {
- lhs := p.Visit(ctx.Relation(0)).(ast.Expr)
+ lhs := p.Visit(ctx.Relation(0)).(*exprpb.Expr)
opID := p.helper.id(ctx.GetOp())
- rhs := p.Visit(ctx.Relation(1)).(ast.Expr)
+ rhs := p.Visit(ctx.Relation(1)).(*exprpb.Expr)
return p.globalCallOrMacro(opID, op, lhs, rhs)
}
return p.reportError(ctx, "operator not found")
@@ -500,9 +534,9 @@ func (p *parser) VisitCalc(ctx *gen.CalcContext) any {
opText = ctx.GetOp().GetText()
}
if op, found := operators.Find(opText); found {
- lhs := p.Visit(ctx.Calc(0)).(ast.Expr)
+ lhs := p.Visit(ctx.Calc(0)).(*exprpb.Expr)
opID := p.helper.id(ctx.GetOp())
- rhs := p.Visit(ctx.Calc(1)).(ast.Expr)
+ rhs := p.Visit(ctx.Calc(1)).(*exprpb.Expr)
return p.globalCallOrMacro(opID, op, lhs, rhs)
}
return p.reportError(ctx, "operator not found")
@@ -518,7 +552,7 @@ func (p *parser) VisitLogicalNot(ctx *gen.LogicalNotContext) any {
return p.Visit(ctx.Member())
}
opID := p.helper.id(ctx.GetOps()[0])
- target := p.Visit(ctx.Member()).(ast.Expr)
+ target := p.Visit(ctx.Member()).(*exprpb.Expr)
return p.globalCallOrMacro(opID, operators.LogicalNot, target)
}
@@ -527,13 +561,13 @@ func (p *parser) VisitNegate(ctx *gen.NegateContext) any {
return p.Visit(ctx.Member())
}
opID := p.helper.id(ctx.GetOps()[0])
- target := p.Visit(ctx.Member()).(ast.Expr)
+ target := p.Visit(ctx.Member()).(*exprpb.Expr)
return p.globalCallOrMacro(opID, operators.Negate, target)
}
// VisitSelect visits a parse tree produced by CELParser#Select.
func (p *parser) VisitSelect(ctx *gen.SelectContext) any {
- operand := p.Visit(ctx.Member()).(ast.Expr)
+ operand := p.Visit(ctx.Member()).(*exprpb.Expr)
// Handle the error case where no valid identifier is specified.
if ctx.GetId() == nil || ctx.GetOp() == nil {
return p.helper.newExpr(ctx)
@@ -554,7 +588,7 @@ func (p *parser) VisitSelect(ctx *gen.SelectContext) any {
// VisitMemberCall visits a parse tree produced by CELParser#MemberCall.
func (p *parser) VisitMemberCall(ctx *gen.MemberCallContext) any {
- operand := p.Visit(ctx.Member()).(ast.Expr)
+ operand := p.Visit(ctx.Member()).(*exprpb.Expr)
// Handle the error case where no valid identifier is specified.
if ctx.GetId() == nil {
return p.helper.newExpr(ctx)
@@ -566,13 +600,13 @@ func (p *parser) VisitMemberCall(ctx *gen.MemberCallContext) any {
// Visit a parse tree produced by CELParser#Index.
func (p *parser) VisitIndex(ctx *gen.IndexContext) any {
- target := p.Visit(ctx.Member()).(ast.Expr)
+ target := p.Visit(ctx.Member()).(*exprpb.Expr)
// Handle the error case where no valid identifier is specified.
if ctx.GetOp() == nil {
return p.helper.newExpr(ctx)
}
opID := p.helper.id(ctx.GetOp())
- index := p.Visit(ctx.GetIndex()).(ast.Expr)
+ index := p.Visit(ctx.GetIndex()).(*exprpb.Expr)
operator := operators.Index
if ctx.GetOpt() != nil {
if !p.enableOptionalSyntax {
@@ -596,7 +630,7 @@ func (p *parser) VisitCreateMessage(ctx *gen.CreateMessageContext) any {
messageName = "." + messageName
}
objID := p.helper.id(ctx.GetOp())
- entries := p.VisitIFieldInitializerList(ctx.GetEntries()).([]ast.EntryExpr)
+ entries := p.VisitIFieldInitializerList(ctx.GetEntries()).([]*exprpb.Expr_CreateStruct_Entry)
return p.helper.newObject(objID, messageName, entries...)
}
@@ -604,16 +638,16 @@ func (p *parser) VisitCreateMessage(ctx *gen.CreateMessageContext) any {
func (p *parser) VisitIFieldInitializerList(ctx gen.IFieldInitializerListContext) any {
if ctx == nil || ctx.GetFields() == nil {
// This is the result of a syntax error handled elswhere, return empty.
- return []ast.EntryExpr{}
+ return []*exprpb.Expr_CreateStruct_Entry{}
}
- result := make([]ast.EntryExpr, len(ctx.GetFields()))
+ result := make([]*exprpb.Expr_CreateStruct_Entry, len(ctx.GetFields()))
cols := ctx.GetCols()
vals := ctx.GetValues()
for i, f := range ctx.GetFields() {
if i >= len(cols) || i >= len(vals) {
// This is the result of a syntax error detected elsewhere.
- return []ast.EntryExpr{}
+ return []*exprpb.Expr_CreateStruct_Entry{}
}
initID := p.helper.id(cols[i])
optField := f.(*gen.OptFieldContext)
@@ -625,10 +659,10 @@ func (p *parser) VisitIFieldInitializerList(ctx gen.IFieldInitializerListContext
// The field may be empty due to a prior error.
id := optField.IDENTIFIER()
if id == nil {
- return []ast.EntryExpr{}
+ return []*exprpb.Expr_CreateStruct_Entry{}
}
fieldName := id.GetText()
- value := p.Visit(vals[i]).(ast.Expr)
+ value := p.Visit(vals[i]).(*exprpb.Expr)
field := p.helper.newObjectField(initID, fieldName, value, optional)
result[i] = field
}
@@ -668,9 +702,9 @@ func (p *parser) VisitCreateList(ctx *gen.CreateListContext) any {
// Visit a parse tree produced by CELParser#CreateStruct.
func (p *parser) VisitCreateStruct(ctx *gen.CreateStructContext) any {
structID := p.helper.id(ctx.GetOp())
- entries := []ast.EntryExpr{}
+ entries := []*exprpb.Expr_CreateStruct_Entry{}
if ctx.GetEntries() != nil {
- entries = p.Visit(ctx.GetEntries()).([]ast.EntryExpr)
+ entries = p.Visit(ctx.GetEntries()).([]*exprpb.Expr_CreateStruct_Entry)
}
return p.helper.newMap(structID, entries...)
}
@@ -679,17 +713,17 @@ func (p *parser) VisitCreateStruct(ctx *gen.CreateStructContext) any {
func (p *parser) VisitMapInitializerList(ctx *gen.MapInitializerListContext) any {
if ctx == nil || ctx.GetKeys() == nil {
// This is the result of a syntax error handled elswhere, return empty.
- return []ast.EntryExpr{}
+ return []*exprpb.Expr_CreateStruct_Entry{}
}
- result := make([]ast.EntryExpr, len(ctx.GetCols()))
+ result := make([]*exprpb.Expr_CreateStruct_Entry, len(ctx.GetCols()))
keys := ctx.GetKeys()
vals := ctx.GetValues()
for i, col := range ctx.GetCols() {
colID := p.helper.id(col)
if i >= len(keys) || i >= len(vals) {
// This is the result of a syntax error detected elsewhere.
- return []ast.EntryExpr{}
+ return []*exprpb.Expr_CreateStruct_Entry{}
}
optKey := keys[i]
optional := optKey.GetOpt() != nil
@@ -697,8 +731,8 @@ func (p *parser) VisitMapInitializerList(ctx *gen.MapInitializerListContext) any
p.reportError(optKey, "unsupported syntax '?'")
continue
}
- key := p.Visit(optKey.GetE()).(ast.Expr)
- value := p.Visit(vals[i]).(ast.Expr)
+ key := p.Visit(optKey.GetE()).(*exprpb.Expr)
+ value := p.Visit(vals[i]).(*exprpb.Expr)
entry := p.helper.newMapEntry(colID, key, value, optional)
result[i] = entry
}
@@ -778,27 +812,30 @@ func (p *parser) VisitBoolFalse(ctx *gen.BoolFalseContext) any {
// Visit a parse tree produced by CELParser#Null.
func (p *parser) VisitNull(ctx *gen.NullContext) any {
- return p.helper.exprFactory.NewLiteral(p.helper.newID(ctx), types.NullValue)
+ return p.helper.newLiteral(ctx,
+ &exprpb.Constant{
+ ConstantKind: &exprpb.Constant_NullValue{
+ NullValue: structpb.NullValue_NULL_VALUE}})
}
-func (p *parser) visitExprList(ctx gen.IExprListContext) []ast.Expr {
+func (p *parser) visitExprList(ctx gen.IExprListContext) []*exprpb.Expr {
if ctx == nil {
- return []ast.Expr{}
+ return []*exprpb.Expr{}
}
return p.visitSlice(ctx.GetE())
}
-func (p *parser) visitListInit(ctx gen.IListInitContext) ([]ast.Expr, []int32) {
+func (p *parser) visitListInit(ctx gen.IListInitContext) ([]*exprpb.Expr, []int32) {
if ctx == nil {
- return []ast.Expr{}, []int32{}
+ return []*exprpb.Expr{}, []int32{}
}
elements := ctx.GetElems()
- result := make([]ast.Expr, len(elements))
+ result := make([]*exprpb.Expr, len(elements))
optionals := []int32{}
for i, e := range elements {
- ex := p.Visit(e.GetE()).(ast.Expr)
+ ex := p.Visit(e.GetE()).(*exprpb.Expr)
if ex == nil {
- return []ast.Expr{}, []int32{}
+ return []*exprpb.Expr{}, []int32{}
}
result[i] = ex
if e.GetOpt() != nil {
@@ -812,13 +849,13 @@ func (p *parser) visitListInit(ctx gen.IListInitContext) ([]ast.Expr, []int32) {
return result, optionals
}
-func (p *parser) visitSlice(expressions []gen.IExprContext) []ast.Expr {
+func (p *parser) visitSlice(expressions []gen.IExprContext) []*exprpb.Expr {
if expressions == nil {
- return []ast.Expr{}
+ return []*exprpb.Expr{}
}
- result := make([]ast.Expr, len(expressions))
+ result := make([]*exprpb.Expr, len(expressions))
for i, e := range expressions {
- ex := p.Visit(e).(ast.Expr)
+ ex := p.Visit(e).(*exprpb.Expr)
result[i] = ex
}
return result
@@ -833,24 +870,24 @@ func (p *parser) unquote(ctx any, value string, isBytes bool) string {
return text
}
-func (p *parser) newLogicManager(function string, term ast.Expr) *logicManager {
+func (p *parser) newLogicManager(function string, term *exprpb.Expr) *logicManager {
if p.enableVariadicOperatorASTs {
- return newVariadicLogicManager(p.exprFactory, function, term)
+ return newVariadicLogicManager(p.helper, function, term)
}
- return newBalancingLogicManager(p.exprFactory, function, term)
+ return newBalancingLogicManager(p.helper, function, term)
}
-func (p *parser) reportError(ctx any, format string, args ...any) ast.Expr {
+func (p *parser) reportError(ctx any, format string, args ...any) *exprpb.Expr {
var location common.Location
err := p.helper.newExpr(ctx)
switch c := ctx.(type) {
case common.Location:
location = c
case antlr.Token, antlr.ParserRuleContext:
- location = p.helper.getLocation(err.ID())
+ location = p.helper.getLocation(err.GetId())
}
// Provide arguments to the report error.
- p.errors.reportErrorAtID(err.ID(), location, format, args...)
+ p.errors.reportErrorAtID(err.GetId(), location, format, args...)
return err
}
@@ -875,33 +912,33 @@ func (p *parser) SyntaxError(recognizer antlr.Recognizer, offendingSymbol any, l
}
}
-func (p *parser) ReportAmbiguity(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex int, exact bool, ambigAlts *antlr.BitSet, configs *antlr.ATNConfigSet) {
+func (p *parser) ReportAmbiguity(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex int, exact bool, ambigAlts *antlr.BitSet, configs antlr.ATNConfigSet) {
// Intentional
}
-func (p *parser) ReportAttemptingFullContext(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex int, conflictingAlts *antlr.BitSet, configs *antlr.ATNConfigSet) {
+func (p *parser) ReportAttemptingFullContext(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex int, conflictingAlts *antlr.BitSet, configs antlr.ATNConfigSet) {
// Intentional
}
-func (p *parser) ReportContextSensitivity(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex, prediction int, configs *antlr.ATNConfigSet) {
+func (p *parser) ReportContextSensitivity(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex, prediction int, configs antlr.ATNConfigSet) {
// Intentional
}
-func (p *parser) globalCallOrMacro(exprID int64, function string, args ...ast.Expr) ast.Expr {
+func (p *parser) globalCallOrMacro(exprID int64, function string, args ...*exprpb.Expr) *exprpb.Expr {
if expr, found := p.expandMacro(exprID, function, nil, args...); found {
return expr
}
return p.helper.newGlobalCall(exprID, function, args...)
}
-func (p *parser) receiverCallOrMacro(exprID int64, function string, target ast.Expr, args ...ast.Expr) ast.Expr {
+func (p *parser) receiverCallOrMacro(exprID int64, function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr {
if expr, found := p.expandMacro(exprID, function, target, args...); found {
return expr
}
return p.helper.newReceiverCall(exprID, function, target, args...)
}
-func (p *parser) expandMacro(exprID int64, function string, target ast.Expr, args ...ast.Expr) (ast.Expr, bool) {
+func (p *parser) expandMacro(exprID int64, function string, target *exprpb.Expr, args ...*exprpb.Expr) (*exprpb.Expr, bool) {
macro, found := p.macros[makeMacroKey(function, len(args), target != nil)]
if !found {
macro, found = p.macros[makeVarArgMacroKey(function, target != nil)]
@@ -927,7 +964,7 @@ func (p *parser) expandMacro(exprID int64, function string, target ast.Expr, arg
return nil, false
}
if p.populateMacroCalls {
- p.helper.addMacroCall(expr.ID(), function, target, args...)
+ p.helper.addMacroCall(expr.GetId(), function, target, args...)
}
return expr, true
}
diff --git a/vendor/github.com/google/cel-go/parser/unparser.go b/vendor/github.com/google/cel-go/parser/unparser.go
index 91cf72944..c3c40a0dd 100644
--- a/vendor/github.com/google/cel-go/parser/unparser.go
+++ b/vendor/github.com/google/cel-go/parser/unparser.go
@@ -20,9 +20,9 @@ import (
"strconv"
"strings"
- "github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/operators"
- "github.com/google/cel-go/common/types"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
// Unparse takes an input expression and source position information and generates a human-readable
@@ -39,7 +39,7 @@ import (
//
// This function optionally takes in one or more UnparserOption to alter the unparsing behavior, such as
// performing word wrapping on expressions.
-func Unparse(expr ast.Expr, info *ast.SourceInfo, opts ...UnparserOption) (string, error) {
+func Unparse(expr *exprpb.Expr, info *exprpb.SourceInfo, opts ...UnparserOption) (string, error) {
unparserOpts := &unparserOption{
wrapOnColumn: defaultWrapOnColumn,
wrapAfterColumnLimit: defaultWrapAfterColumnLimit,
@@ -68,12 +68,12 @@ func Unparse(expr ast.Expr, info *ast.SourceInfo, opts ...UnparserOption) (strin
// unparser visits an expression to reconstruct a human-readable string from an AST.
type unparser struct {
str strings.Builder
- info *ast.SourceInfo
+ info *exprpb.SourceInfo
options *unparserOption
lastWrappedIndex int
}
-func (un *unparser) visit(expr ast.Expr) error {
+func (un *unparser) visit(expr *exprpb.Expr) error {
if expr == nil {
return errors.New("unsupported expression")
}
@@ -81,29 +81,27 @@ func (un *unparser) visit(expr ast.Expr) error {
if visited || err != nil {
return err
}
- switch expr.Kind() {
- case ast.CallKind:
+ switch expr.GetExprKind().(type) {
+ case *exprpb.Expr_CallExpr:
return un.visitCall(expr)
- case ast.LiteralKind:
+ case *exprpb.Expr_ConstExpr:
return un.visitConst(expr)
- case ast.IdentKind:
+ case *exprpb.Expr_IdentExpr:
return un.visitIdent(expr)
- case ast.ListKind:
+ case *exprpb.Expr_ListExpr:
return un.visitList(expr)
- case ast.MapKind:
- return un.visitStructMap(expr)
- case ast.SelectKind:
+ case *exprpb.Expr_SelectExpr:
return un.visitSelect(expr)
- case ast.StructKind:
- return un.visitStructMsg(expr)
+ case *exprpb.Expr_StructExpr:
+ return un.visitStruct(expr)
default:
return fmt.Errorf("unsupported expression: %v", expr)
}
}
-func (un *unparser) visitCall(expr ast.Expr) error {
- c := expr.AsCall()
- fun := c.FunctionName()
+func (un *unparser) visitCall(expr *exprpb.Expr) error {
+ c := expr.GetCallExpr()
+ fun := c.GetFunction()
switch fun {
// ternary operator
case operators.Conditional:
@@ -143,10 +141,10 @@ func (un *unparser) visitCall(expr ast.Expr) error {
}
}
-func (un *unparser) visitCallBinary(expr ast.Expr) error {
- c := expr.AsCall()
- fun := c.FunctionName()
- args := c.Args()
+func (un *unparser) visitCallBinary(expr *exprpb.Expr) error {
+ c := expr.GetCallExpr()
+ fun := c.GetFunction()
+ args := c.GetArgs()
lhs := args[0]
// add parens if the current operator is lower precedence than the lhs expr operator.
lhsParen := isComplexOperatorWithRespectTo(fun, lhs)
@@ -170,9 +168,9 @@ func (un *unparser) visitCallBinary(expr ast.Expr) error {
return un.visitMaybeNested(rhs, rhsParen)
}
-func (un *unparser) visitCallConditional(expr ast.Expr) error {
- c := expr.AsCall()
- args := c.Args()
+func (un *unparser) visitCallConditional(expr *exprpb.Expr) error {
+ c := expr.GetCallExpr()
+ args := c.GetArgs()
// add parens if operand is a conditional itself.
nested := isSamePrecedence(operators.Conditional, args[0]) ||
isComplexOperator(args[0])
@@ -198,13 +196,13 @@ func (un *unparser) visitCallConditional(expr ast.Expr) error {
return un.visitMaybeNested(args[2], nested)
}
-func (un *unparser) visitCallFunc(expr ast.Expr) error {
- c := expr.AsCall()
- fun := c.FunctionName()
- args := c.Args()
- if c.IsMemberFunction() {
- nested := isBinaryOrTernaryOperator(c.Target())
- err := un.visitMaybeNested(c.Target(), nested)
+func (un *unparser) visitCallFunc(expr *exprpb.Expr) error {
+ c := expr.GetCallExpr()
+ fun := c.GetFunction()
+ args := c.GetArgs()
+ if c.GetTarget() != nil {
+ nested := isBinaryOrTernaryOperator(c.GetTarget())
+ err := un.visitMaybeNested(c.GetTarget(), nested)
if err != nil {
return err
}
@@ -225,17 +223,17 @@ func (un *unparser) visitCallFunc(expr ast.Expr) error {
return nil
}
-func (un *unparser) visitCallIndex(expr ast.Expr) error {
+func (un *unparser) visitCallIndex(expr *exprpb.Expr) error {
return un.visitCallIndexInternal(expr, "[")
}
-func (un *unparser) visitCallOptIndex(expr ast.Expr) error {
+func (un *unparser) visitCallOptIndex(expr *exprpb.Expr) error {
return un.visitCallIndexInternal(expr, "[?")
}
-func (un *unparser) visitCallIndexInternal(expr ast.Expr, op string) error {
- c := expr.AsCall()
- args := c.Args()
+func (un *unparser) visitCallIndexInternal(expr *exprpb.Expr, op string) error {
+ c := expr.GetCallExpr()
+ args := c.GetArgs()
nested := isBinaryOrTernaryOperator(args[0])
err := un.visitMaybeNested(args[0], nested)
if err != nil {
@@ -250,10 +248,10 @@ func (un *unparser) visitCallIndexInternal(expr ast.Expr, op string) error {
return nil
}
-func (un *unparser) visitCallUnary(expr ast.Expr) error {
- c := expr.AsCall()
- fun := c.FunctionName()
- args := c.Args()
+func (un *unparser) visitCallUnary(expr *exprpb.Expr) error {
+ c := expr.GetCallExpr()
+ fun := c.GetFunction()
+ args := c.GetArgs()
unmangled, found := operators.FindReverse(fun)
if !found {
return fmt.Errorf("cannot unmangle operator: %s", fun)
@@ -263,34 +261,35 @@ func (un *unparser) visitCallUnary(expr ast.Expr) error {
return un.visitMaybeNested(args[0], nested)
}
-func (un *unparser) visitConst(expr ast.Expr) error {
- val := expr.AsLiteral()
- switch val := val.(type) {
- case types.Bool:
- un.str.WriteString(strconv.FormatBool(bool(val)))
- case types.Bytes:
+func (un *unparser) visitConst(expr *exprpb.Expr) error {
+ c := expr.GetConstExpr()
+ switch c.GetConstantKind().(type) {
+ case *exprpb.Constant_BoolValue:
+ un.str.WriteString(strconv.FormatBool(c.GetBoolValue()))
+ case *exprpb.Constant_BytesValue:
// bytes constants are surrounded with b""
+ b := c.GetBytesValue()
un.str.WriteString(`b"`)
- un.str.WriteString(bytesToOctets([]byte(val)))
+ un.str.WriteString(bytesToOctets(b))
un.str.WriteString(`"`)
- case types.Double:
+ case *exprpb.Constant_DoubleValue:
// represent the float using the minimum required digits
- d := strconv.FormatFloat(float64(val), 'g', -1, 64)
+ d := strconv.FormatFloat(c.GetDoubleValue(), 'g', -1, 64)
un.str.WriteString(d)
if !strings.Contains(d, ".") {
un.str.WriteString(".0")
}
- case types.Int:
- i := strconv.FormatInt(int64(val), 10)
+ case *exprpb.Constant_Int64Value:
+ i := strconv.FormatInt(c.GetInt64Value(), 10)
un.str.WriteString(i)
- case types.Null:
+ case *exprpb.Constant_NullValue:
un.str.WriteString("null")
- case types.String:
+ case *exprpb.Constant_StringValue:
// strings will be double quoted with quotes escaped.
- un.str.WriteString(strconv.Quote(string(val)))
- case types.Uint:
+ un.str.WriteString(strconv.Quote(c.GetStringValue()))
+ case *exprpb.Constant_Uint64Value:
// uint literals have a 'u' suffix.
- ui := strconv.FormatUint(uint64(val), 10)
+ ui := strconv.FormatUint(c.GetUint64Value(), 10)
un.str.WriteString(ui)
un.str.WriteString("u")
default:
@@ -299,16 +298,16 @@ func (un *unparser) visitConst(expr ast.Expr) error {
return nil
}
-func (un *unparser) visitIdent(expr ast.Expr) error {
- un.str.WriteString(expr.AsIdent())
+func (un *unparser) visitIdent(expr *exprpb.Expr) error {
+ un.str.WriteString(expr.GetIdentExpr().GetName())
return nil
}
-func (un *unparser) visitList(expr ast.Expr) error {
- l := expr.AsList()
- elems := l.Elements()
+func (un *unparser) visitList(expr *exprpb.Expr) error {
+ l := expr.GetListExpr()
+ elems := l.GetElements()
optIndices := make(map[int]bool, len(elems))
- for _, idx := range l.OptionalIndices() {
+ for _, idx := range l.GetOptionalIndices() {
optIndices[int(idx)] = true
}
un.str.WriteString("[")
@@ -328,20 +327,20 @@ func (un *unparser) visitList(expr ast.Expr) error {
return nil
}
-func (un *unparser) visitOptSelect(expr ast.Expr) error {
- c := expr.AsCall()
- args := c.Args()
+func (un *unparser) visitOptSelect(expr *exprpb.Expr) error {
+ c := expr.GetCallExpr()
+ args := c.GetArgs()
operand := args[0]
- field := args[1].AsLiteral().(types.String)
- return un.visitSelectInternal(operand, false, ".?", string(field))
+ field := args[1].GetConstExpr().GetStringValue()
+ return un.visitSelectInternal(operand, false, ".?", field)
}
-func (un *unparser) visitSelect(expr ast.Expr) error {
- sel := expr.AsSelect()
- return un.visitSelectInternal(sel.Operand(), sel.IsTestOnly(), ".", sel.FieldName())
+func (un *unparser) visitSelect(expr *exprpb.Expr) error {
+ sel := expr.GetSelectExpr()
+ return un.visitSelectInternal(sel.GetOperand(), sel.GetTestOnly(), ".", sel.GetField())
}
-func (un *unparser) visitSelectInternal(operand ast.Expr, testOnly bool, op string, field string) error {
+func (un *unparser) visitSelectInternal(operand *exprpb.Expr, testOnly bool, op string, field string) error {
// handle the case when the select expression was generated by the has() macro.
if testOnly {
un.str.WriteString("has(")
@@ -359,25 +358,34 @@ func (un *unparser) visitSelectInternal(operand ast.Expr, testOnly bool, op stri
return nil
}
-func (un *unparser) visitStructMsg(expr ast.Expr) error {
- m := expr.AsStruct()
- fields := m.Fields()
- un.str.WriteString(m.TypeName())
+func (un *unparser) visitStruct(expr *exprpb.Expr) error {
+ s := expr.GetStructExpr()
+ // If the message name is non-empty, then this should be treated as message construction.
+ if s.GetMessageName() != "" {
+ return un.visitStructMsg(expr)
+ }
+ // Otherwise, build a map.
+ return un.visitStructMap(expr)
+}
+
+func (un *unparser) visitStructMsg(expr *exprpb.Expr) error {
+ m := expr.GetStructExpr()
+ entries := m.GetEntries()
+ un.str.WriteString(m.GetMessageName())
un.str.WriteString("{")
- for i, f := range fields {
- field := f.AsStructField()
- f := field.Name()
- if field.IsOptional() {
+ for i, entry := range entries {
+ f := entry.GetFieldKey()
+ if entry.GetOptionalEntry() {
un.str.WriteString("?")
}
un.str.WriteString(f)
un.str.WriteString(": ")
- v := field.Value()
+ v := entry.GetValue()
err := un.visit(v)
if err != nil {
return err
}
- if i < len(fields)-1 {
+ if i < len(entries)-1 {
un.str.WriteString(", ")
}
}
@@ -385,14 +393,13 @@ func (un *unparser) visitStructMsg(expr ast.Expr) error {
return nil
}
-func (un *unparser) visitStructMap(expr ast.Expr) error {
- m := expr.AsMap()
- entries := m.Entries()
+func (un *unparser) visitStructMap(expr *exprpb.Expr) error {
+ m := expr.GetStructExpr()
+ entries := m.GetEntries()
un.str.WriteString("{")
- for i, e := range entries {
- entry := e.AsMapEntry()
- k := entry.Key()
- if entry.IsOptional() {
+ for i, entry := range entries {
+ k := entry.GetMapKey()
+ if entry.GetOptionalEntry() {
un.str.WriteString("?")
}
err := un.visit(k)
@@ -400,7 +407,7 @@ func (un *unparser) visitStructMap(expr ast.Expr) error {
return err
}
un.str.WriteString(": ")
- v := entry.Value()
+ v := entry.GetValue()
err = un.visit(v)
if err != nil {
return err
@@ -413,15 +420,16 @@ func (un *unparser) visitStructMap(expr ast.Expr) error {
return nil
}
-func (un *unparser) visitMaybeMacroCall(expr ast.Expr) (bool, error) {
- call, found := un.info.GetMacroCall(expr.ID())
+func (un *unparser) visitMaybeMacroCall(expr *exprpb.Expr) (bool, error) {
+ macroCalls := un.info.GetMacroCalls()
+ call, found := macroCalls[expr.GetId()]
if !found {
return false, nil
}
return true, un.visit(call)
}
-func (un *unparser) visitMaybeNested(expr ast.Expr, nested bool) error {
+func (un *unparser) visitMaybeNested(expr *exprpb.Expr, nested bool) error {
if nested {
un.str.WriteString("(")
}
@@ -445,12 +453,12 @@ func isLeftRecursive(op string) bool {
// precedence of the (possible) operation represented in the input Expr.
//
// If the expr is not a Call, the result is false.
-func isSamePrecedence(op string, expr ast.Expr) bool {
- if expr.Kind() != ast.CallKind {
+func isSamePrecedence(op string, expr *exprpb.Expr) bool {
+ if expr.GetCallExpr() == nil {
return false
}
- c := expr.AsCall()
- other := c.FunctionName()
+ c := expr.GetCallExpr()
+ other := c.GetFunction()
return operators.Precedence(op) == operators.Precedence(other)
}
@@ -458,16 +466,16 @@ func isSamePrecedence(op string, expr ast.Expr) bool {
// than the (possible) operation represented in the input Expr.
//
// If the expr is not a Call, the result is false.
-func isLowerPrecedence(op string, expr ast.Expr) bool {
- c := expr.AsCall()
- other := c.FunctionName()
+func isLowerPrecedence(op string, expr *exprpb.Expr) bool {
+ c := expr.GetCallExpr()
+ other := c.GetFunction()
return operators.Precedence(op) < operators.Precedence(other)
}
// Indicates whether the expr is a complex operator, i.e., a call expression
// with 2 or more arguments.
-func isComplexOperator(expr ast.Expr) bool {
- if expr.Kind() == ast.CallKind && len(expr.AsCall().Args()) >= 2 {
+func isComplexOperator(expr *exprpb.Expr) bool {
+ if expr.GetCallExpr() != nil && len(expr.GetCallExpr().GetArgs()) >= 2 {
return true
}
return false
@@ -476,19 +484,19 @@ func isComplexOperator(expr ast.Expr) bool {
// Indicates whether it is a complex operation compared to another.
// expr is *not* considered complex if it is not a call expression or has
// less than two arguments, or if it has a higher precedence than op.
-func isComplexOperatorWithRespectTo(op string, expr ast.Expr) bool {
- if expr.Kind() != ast.CallKind || len(expr.AsCall().Args()) < 2 {
+func isComplexOperatorWithRespectTo(op string, expr *exprpb.Expr) bool {
+ if expr.GetCallExpr() == nil || len(expr.GetCallExpr().GetArgs()) < 2 {
return false
}
return isLowerPrecedence(op, expr)
}
// Indicate whether this is a binary or ternary operator.
-func isBinaryOrTernaryOperator(expr ast.Expr) bool {
- if expr.Kind() != ast.CallKind || len(expr.AsCall().Args()) < 2 {
+func isBinaryOrTernaryOperator(expr *exprpb.Expr) bool {
+ if expr.GetCallExpr() == nil || len(expr.GetCallExpr().GetArgs()) < 2 {
return false
}
- _, isBinaryOp := operators.FindReverseBinaryOperator(expr.AsCall().FunctionName())
+ _, isBinaryOp := operators.FindReverseBinaryOperator(expr.GetCallExpr().GetFunction())
return isBinaryOp || isSamePrecedence(operators.Conditional, expr)
}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 9099018a9..48dc0bb7a 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -5,9 +5,9 @@ github.com/Azure/go-ansiterm/winterm
# github.com/NYTimes/gziphandler v1.1.1
## explicit; go 1.11
github.com/NYTimes/gziphandler
-# github.com/antlr4-go/antlr/v4 v4.13.0
-## explicit; go 1.20
-github.com/antlr4-go/antlr/v4
+# github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df
+## explicit; go 1.18
+github.com/antlr/antlr4/runtime/Go/antlr/v4
# github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2
## explicit; go 1.13
github.com/asaskevich/govalidator
@@ -91,7 +91,7 @@ github.com/golang/protobuf/ptypes
github.com/golang/protobuf/ptypes/any
github.com/golang/protobuf/ptypes/duration
github.com/golang/protobuf/ptypes/timestamp
-# github.com/google/cel-go v0.18.2
+# github.com/google/cel-go v0.17.7
## explicit; go 1.18
github.com/google/cel-go/cel
github.com/google/cel-go/checker