diff --git a/.gitignore b/.gitignore index 2e1973f..27d9474 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,8 @@ tags /local /spiff /spiff++ +/hack +/godoc *.coverprofile spiff_darwin_amd64.zip spiff_linux_amd64.zip diff --git a/.travis.yml b/.travis.yml index 0192043..88b5531 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,7 +1,7 @@ language: go go: - - 1.11.x + - 1.13.x install: - curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh diff --git a/Gopkg.lock b/Gopkg.lock index c90aaad..1dc0881 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -2,6 +2,7 @@ [[projects]] + branch = "master" digest = "1:c12310d3e2b5a97b89dba30d1d006e00b564e04ec4f2d4b5d1631e97e86a8b42" name = "github.com/cloudfoundry-incubator/candiedyaml" packages = ["."] @@ -9,12 +10,12 @@ revision = "a41693b7b7afb422c7ecb1028458ab27da047bbb" [[projects]] - digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd" + digest = "1:80057945464ffb5b0da1f026beb8df0e8dbd098eaf771a349291bed2cd29a83e" name = "github.com/fsnotify/fsnotify" packages = ["."] pruneopts = "UT" - revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" - version = "v1.4.7" + revision = "45d7d09e39ef4ac08d493309fa031790c15bfe8a" + version = "v1.4.9" [[projects]] digest = "1:c0d19ab64b32ce9fe5cf4ddceba78d5bc9807f0016db6b1183599da3dcc24d10" @@ -35,20 +36,6 @@ revision = "8cb6e5b959231cc1119e43259c4a608f9c51a241" version = "v1.0.0" -[[projects]] - digest = "1:a1038ef593beb4771c8f0f9c26e8b00410acd800af5c6864651d9bf160ea1813" - name = "github.com/hpcloud/tail" - packages = [ - ".", - "ratelimiter", - "util", - "watch", - "winfile", - ] - pruneopts = "UT" - revision = "a30252cb686a21eb2d0b98132633053ec2f7f1e5" - version = "v1.0.0" - [[projects]] digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be" name = "github.com/inconshreveable/mousetrap" @@ -58,23 +45,58 @@ version = "v1.0" [[projects]] - digest = "1:c568d7727aa262c32bdf8a3f7db83614f7af0ed661474b24588de635c20024c7" + digest = "1:fae336644ff950d61b17ed30a0e82d710ab56e792321048f31afc82a3aa8375a" name = "github.com/magiconair/properties" packages = ["."] pruneopts = "UT" - revision = "c2353362d570a7bfa228149c62842019201cfb71" - version = "v1.8.0" + revision = "e55ec40311b90df900426eae806bb80a40154583" + version = "v1.8.3" [[projects]] - digest = "1:53bc4cd4914cd7cd52139990d5170d6dc99067ae31c56530621b18b35fc30318" + branch = "master" + digest = "1:79d4c7eac6cf9f9fd5c17cdc6536f28fc9532798f61ac1efa3a57152c379f221" + name = "github.com/mandelsoft/filepath" + packages = ["pkg/filepath"] + pruneopts = "UT" + revision = "3df73d378d552504d3df40fd7b6d8b00c12b4427" + +[[projects]] + digest = "1:e3bff19a88b38e751206f3f28599a9e3d23617255d4747c5fc9ea43691b0bb37" + name = "github.com/mandelsoft/vfs" + packages = [ + "pkg/osfs", + "pkg/projectionfs", + "pkg/utils", + "pkg/vfs", + ] + pruneopts = "UT" + revision = "d03d33d5889a612bddefccdd689094cc517b374a" + version = "v0.1" + +[[projects]] + digest = "1:aff0e9185b5df855488a42e064cb479e994636d3cefa47d053495123d086add4" name = "github.com/mitchellh/mapstructure" packages = ["."] pruneopts = "UT" - revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe" - version = "v1.1.2" + revision = "9e1e4717f8567d7ead72d070d064ad17d444a67e" + version = "v1.3.3" [[projects]] - digest = "1:5f4b78246f0bcb105b1e3b2b9e22b52a57cd02f57a8078572fe27c62f4a75ff7" + digest = "1:a9f00de9b605c251f5f1eba5d34c238131f0a7d38f6e7126c6bacc073f314865" + name = "github.com/nxadm/tail" + packages = [ + ".", + "ratelimiter", + "util", + "watch", + "winfile", + ] + pruneopts = "UT" + revision = "327c577245448d8192115e77a76ea3d6aee88202" + version = "v1.4.4" + +[[projects]] + digest = "1:e6097a089184d8efef123f5c66dc543069f2273806ba552e6d214294cfb7615a" name = "github.com/onsi/ginkgo" packages = [ ".", @@ -82,6 +104,7 @@ "internal/codelocation", "internal/containernode", "internal/failer", + "internal/global", "internal/leafnodes", "internal/remote", "internal/spec", @@ -97,11 +120,11 @@ "types", ] pruneopts = "UT" - revision = "2e1be8f7d90e9d3e3e58b0ce470f2f14d075406f" - version = "v1.7.0" + revision = "6d83527acb3f6cda405ce47a475d480117098381" + version = "v1.14.1" [[projects]] - digest = "1:b4764603c54d74435f246901248aefb2b9d430bb7b160afde1afc41d89d48f1a" + digest = "1:a74054ea234c0ccf5e30e9ac9a438e6db25db2828f2ac636481d21bc3cfcf0b1" name = "github.com/onsi/gomega" packages = [ ".", @@ -120,90 +143,99 @@ "types", ] pruneopts = "UT" - revision = "65fb64232476ad9046e57c26cd0bff3d3a8dc6cd" - version = "v1.4.3" + revision = "d7eb503b14592ef8cefacd3c9589398c7decaf23" + version = "v1.10.2" [[projects]] - digest = "1:95741de3af260a92cc5c7f3f3061e85273f5a81b5db20d4bd68da74bd521675e" + digest = "1:ba7c8395a3bdd3b44d40ba36ff6eb283b583a3db865c02a952b89a1aa639081b" name = "github.com/pelletier/go-toml" packages = ["."] pruneopts = "UT" - revision = "c01d1270ff3e442a8a57cddc1c92dc1138598194" - version = "v1.2.0" + revision = "65ca8064882c8c308e5c804c5d5443d409e0738c" + version = "v1.8.1" [[projects]] - digest = "1:3e39bafd6c2f4bf3c76c3bfd16a2e09e016510ad5db90dc02b88e2f565d6d595" + digest = "1:5bc51628f10ee2e983badf7e302531fd87db15badfb675389450b8765cb3149f" name = "github.com/spf13/afero" packages = [ ".", "mem", ] pruneopts = "UT" - revision = "f4711e4db9e9a1d3887343acb72b2bbfc2f686f5" - version = "v1.2.1" + revision = "a4ea980f2d023f22c7eede4c142190115449de5b" + version = "v1.4.0" [[projects]] - digest = "1:08d65904057412fc0270fc4812a1c90c594186819243160dc779a402d4b6d0bc" + digest = "1:ff4cd55a3666b6ea3a876c9e133bfb54d6c812e725409a773f2c94a0b3a92f4f" name = "github.com/spf13/cast" packages = ["."] pruneopts = "UT" - revision = "8c9545af88b134710ab1cd196795e7f2388358d7" - version = "v1.3.0" + revision = "1ffadf551085444af981432dd0f6d1160c11ec64" + version = "v1.3.1" [[projects]] - digest = "1:645cabccbb4fa8aab25a956cbcbdf6a6845ca736b2c64e197ca7cbb9d210b939" + digest = "1:1e80255eea46af2e2db7da90f95e4b5cfe84f3b58d9a8598ec35b0f2a75c60e4" name = "github.com/spf13/cobra" packages = ["."] pruneopts = "UT" - revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385" - version = "v0.0.3" + revision = "6607e6b8603f56adb027298ee6695e06ffb3a819" + version = "0.0.7" [[projects]] - digest = "1:68ea4e23713989dc20b1bded5d9da2c5f9be14ff9885beef481848edd18c26cb" + digest = "1:1b753ec16506f5864d26a28b43703c58831255059644351bbcb019b843950900" name = "github.com/spf13/jwalterweatherman" packages = ["."] pruneopts = "UT" - revision = "4a4406e478ca629068e7768fc33f3f044173c0a6" - version = "v1.0.0" + revision = "94f6ae3ed3bceceafa716478c5fbf8d29ca601a1" + version = "v1.1.0" [[projects]] - digest = "1:c1b1102241e7f645bc8e0c22ae352e8f0dc6484b6cb4d132fa9f24174e0119e2" + digest = "1:524b71991fc7d9246cc7dc2d9e0886ccb97648091c63e30eef619e6862c955dd" name = "github.com/spf13/pflag" packages = ["."] pruneopts = "UT" - revision = "298182f68c66c05229eb03ac171abe6e309ee79a" - version = "v1.0.3" + revision = "2e9d26c8c37aae03e3f9d4e90b7116f5accb7cab" + version = "v1.0.5" [[projects]] - digest = "1:de37e343c64582d7026bf8ab6ac5b22a72eac54f3a57020db31524affed9f423" + digest = "1:b3027d4cb73b6576e8818c1012389dbcd8385e34bb8e4ee024891214c4338725" name = "github.com/spf13/viper" packages = ["."] pruneopts = "UT" - revision = "6d33b5a963d922d182c91e8a1c88d81fd150cfd4" - version = "v1.3.1" + revision = "3826be313591f83193f048520482a7b3cf17d506" + version = "v1.7.1" + +[[projects]] + digest = "1:f4b32291cad5efac2bfdba89ccde6aa04618b62ce06c1a571da2dc4f3f2677fb" + name = "github.com/subosito/gotenv" + packages = ["."] + pruneopts = "UT" + revision = "2ef7124db659d49edac6aa459693a15ae36c671a" + version = "v1.2.0" [[projects]] branch = "master" - digest = "1:10995dafbbb8715bfdd3df05e8eefa5d4284441f8e70a54e88416a60c85c751e" + digest = "1:4a02b656f2fdded85986e2bf64dcdc2277a8124626d12ef49f9d87fd600d78d4" name = "golang.org/x/crypto" packages = [ "bcrypt", "blowfish", + "chacha20", "curve25519", "ed25519", "ed25519/internal/edwards25519", - "internal/chacha20", "internal/subtle", "md4", "poly1305", "ssh", + "ssh/internal/bcrypt_pbkdf", ] pruneopts = "UT" - revision = "a4c6cb3142f211c99e4bf4cd769535b29a9b616f" + revision = "5c72a883971a4325f8c62bf07b6d38c20ea47a6a" [[projects]] branch = "master" - digest = "1:03a8ff54c1a62710fe21a08ad9ccd554ac90232c4fd34c2731f65dd47e1f2077" + digest = "1:8c294aabd4396170f5bcbb763d558e4c8d21ded4b2a1618dfb664e58a4fffeef" name = "golang.org/x/net" packages = [ "html", @@ -211,18 +243,22 @@ "html/charset", ] pruneopts = "UT" - revision = "3a22650c66bd7f4fb6d1e8072ffd7b75c8a27898" + revision = "62affa334b73ec65ed44a326519ac12c421905e3" [[projects]] branch = "master" - digest = "1:8207c052fb873f83c61a5aa16f6add5feb9881eda2112b56f69fd3b9e7f55c3f" + digest = "1:54af96607722c8391392608c659997d4c51a7844c697689d22d42f282ddd9582" name = "golang.org/x/sys" - packages = ["unix"] + packages = [ + "cpu", + "internal/unsafeheader", + "unix", + ] pruneopts = "UT" - revision = "153ac476189d567564fda94622d7a8c0fb338f6b" + revision = "288bc346aa3906399979ee2fc63bacf4c43e04c9" [[projects]] - digest = "1:4392fcf42d5cf0e3ff78c96b2acf8223d49e4fdc53eb77c99d2f8dfe4680e006" + digest = "1:517b82807b3218687e9eb32fe11de9cb07a38d06554d04cd1965c0441360c0b3" name = "golang.org/x/text" packages = [ "encoding", @@ -236,6 +272,8 @@ "encoding/traditionalchinese", "encoding/unicode", "internal/gen", + "internal/language", + "internal/language/compact", "internal/tag", "internal/triegen", "internal/ucd", @@ -247,17 +285,27 @@ "unicode/norm", ] pruneopts = "UT" - revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" - version = "v0.3.0" + revision = "23ae387dee1f90d29a23c0e87ee0b46038fbed0e" + version = "v0.3.3" + +[[projects]] + branch = "master" + digest = "1:918a46e4a2fb83df33f668f5a6bd51b2996775d073fce1800d3ec01b0a5ddd2b" + name = "golang.org/x/xerrors" + packages = [ + ".", + "internal", + ] + pruneopts = "UT" + revision = "5ec99f83aff198f5fbd629d6c8d8eb38a04218ca" [[projects]] - digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd" - name = "gopkg.in/fsnotify.v1" + digest = "1:e5b45b5c171a805052785685402c31e4a2f3872a7bc2a12fa8c36818149fa437" + name = "gopkg.in/ini.v1" packages = ["."] pruneopts = "UT" - revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" - source = "https://github.com/fsnotify/fsnotify.git" - version = "v1.4.7" + revision = "fcd0515f91612282aba5f5231a7dd71487e6dd8f" + version = "v1.61.0" [[projects]] branch = "v1" @@ -268,18 +316,20 @@ revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8" [[projects]] - digest = "1:4d2e5a73dc1500038e504a8d78b986630e3626dc027bc030ba5c75da257cdb96" + digest = "1:d7f1bd887dc650737a421b872ca883059580e9f8314d601f88025df4f4802dce" name = "gopkg.in/yaml.v2" packages = ["."] pruneopts = "UT" - revision = "51d6538a90f86fe93ac480b35f37b2be17fef232" - version = "v2.2.2" + revision = "0b1645d91e851e735d3e23330303ce81f70adbe3" + version = "v2.3.0" [solve-meta] analyzer-name = "dep" analyzer-version = 1 input-imports = [ "github.com/cloudfoundry-incubator/candiedyaml", + "github.com/mandelsoft/vfs/pkg/osfs", + "github.com/mandelsoft/vfs/pkg/vfs", "github.com/onsi/ginkgo", "github.com/onsi/gomega", "github.com/onsi/gomega/gbytes", diff --git a/Gopkg.toml b/Gopkg.toml index fbcfdc3..ec14658 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -29,6 +29,10 @@ # name = "github.com/cloudfoundry-incubator/candiedyaml" # revision = "a41693b7b7afb422c7ecb1028458ab27da047bbb" +[[constraint]] + name = "github.com/mandelsoft/vfs" + version = "v0.1" + [[constraint]] name = "github.com/spf13/cobra" version = "0.0.3" diff --git a/README.md b/README.md index 6dad5d8..318483e 100644 --- a/README.md +++ b/README.md @@ -38,7 +38,8 @@ Contents: - [(( foo.bar.[1].baz ))](#-foobar1baz-) - [(( foo.[bar].baz ))](#-foobarbaz-) - [(( list.[1..3] ))](#-list13-) - - [(( "foo" ))](#-foo--1) + - [(( 1.2e4 ))](#-12e4-) + - [(( "foo" ))](#-foo-) - [(( [ 1, 2, 3 ] ))](#--1-2-3--) - [(( { "alice" = 25 } ))](#--alice--25--) - [(( ( "alice" = 25 ) alice ))](#--alice--25---alice-) @@ -174,6 +175,7 @@ Contents: - [Bringing it all together](#bringing-it-all-together) - [Useful to Know](#useful-to-know) - [Error Reporting](#error-reporting) +- [Using _spiff_ as Go Library](#using-spiff-as-go-library) # Installation @@ -513,6 +515,10 @@ to `list.[1..]`. evaluates `foo` to the list `[b,c]`. +## `(( 1.2e4 ))` + +Number literatls are supported for integers and floating point values. + ## `(( "foo" ))` String literal. All [json string encodings](https://www.json.org/) are supported @@ -1151,7 +1157,8 @@ value (not equal `~`). ## `(( 1 + 2 * foo ))` -Dynaml expressions can be used to execute arithmetic integer calculations. Supported operations are +, -, *, / and %. +Dynaml expressions can be used to execute arithmetic integer and floating-point calculations. Supported operations are `+`, `-`, `*`, and `/`. +The modulo operator (`%`) only supports integer operands. e.g.: @@ -5895,3 +5902,110 @@ is show, otherwise the line numer is omitted. ``` +# Using _spiff_ as Go Library + +_Spiff_ provides a Go package (`spiffing`) that can be used to include _spiff_ templates in Go programs. + +An example program could look like this: + +```go +import ( + "fmt" + "math" + "os" + + "github.com/mandelsoft/spiff/dynaml" + "github.com/mandelsoft/spiff/spiffing" +) + +func func_pow(arguments []interface{}, binding dynaml.Binding) (interface{}, dynaml.EvaluationInfo, bool) { + info := dynaml.DefaultInfo() + + if len(arguments) != 2 { + return info.Error("pow takes 2 arguments") + } + + a, b, err := dynaml.NumberOperands(arguments[0], arguments[1]) + + if err != nil { + return info.Error("%s", err) + } + _, i := a.(int64) + if i { + r := math.Pow(float64(a.(int64)), float64(b.(int64))) + if float64(int64(r)) == r { + return int64(r), info, true + } + return r, info, true + } else { + return math.Pow(a.(float64), b.(float64)), info, true + } +} + +var state = ` +state: {} +` +var stub = ` +unused: (( input )) +ages: + alice: (( pow(2,5) )) + bob: (( alice + 1 )) +` + +var template = ` +state: + <<<: (( &state )) + random: (( rand("[:alnum:]", 10) )) +ages: (( &temporary )) + +example: + name: (( input )) # direct reference to additional values + sum: (( sum[ages|0|s,k,v|->s + v] )) + int: (( pow(2,4) )) + float: 2.1 + pow: (( pow(1.1e1,2.1) )) +` + +func Error(err error) { + if err != nil { + fmt.Fprintf(os.Stderr, "Error: %s\n", err) + os.Exit(1) + } +} + +func main() { + values := map[string]interface{}{} + values["input"] = "this is an input" + + functions := spiffing.NewFunctions() + functions.RegisterFunction("pow", func_pow) + + spiff, err := spiffing.New().WithFunctions(functions).WithValues(values) + Error(err) + pstate, err := spiff.Unmarshal("state", []byte(state)) + Error(err) + pstub, err := spiff.Unmarshal("stub", []byte(stub)) + Error(err) + ptempl, err := spiff.Unmarshal("template", []byte(template)) + Error(err) + result, err := spiff.Cascade(ptempl, []spiffing.Node{pstub}, pstate) + Error(err) + b, err := spiff.Marshal(result) + Error(err) + newstate, err := spiff.Marshal(spiff.DetermineState(result)) + Error(err) + fmt.Printf("==== new state ===\n") + fmt.Printf("%s\n", string(newstate)) + fmt.Printf("==== result ===\n") + fmt.Printf("%s\n", string(b)) +} +``` + +It supports + - transforming file data to and from spiffs internal node representation + - the processing of stubs and templates with or without state handling + - defining an outer binding for injected path names + - defining additional spiff functions + - enabling/disabling command execution and/or filesystem operations + - using a [virtual filesystem](http://github.com/mandelsoft/vfs) for + file system operations diff --git a/cmd/merge.go b/cmd/merge.go index 3708388..aac09bf 100644 --- a/cmd/merge.go +++ b/cmd/merge.go @@ -51,7 +51,7 @@ func init() { mergeCmd.Flags().StringVar(&outputPath, "path", "", "output is taken from given path") mergeCmd.Flags().BoolVar(&split, "split", false, "if the output is alist it will be split into separate documents") mergeCmd.Flags().BoolVar(&processingOptions.PreserveEscapes, "preserve-escapes", false, "preserve escaping for escaped expressions and merges") - mergeCmd.Flags().BoolVar(&processingOptions.PreserveTemporaray, "preserve-temporary", false, "preserve temporary fields") + mergeCmd.Flags().BoolVar(&processingOptions.PreserveTemporary, "preserve-temporary", false, "preserve temporary fields") mergeCmd.Flags().StringVar(&state, "state", "", "select state file to maintain") mergeCmd.Flags().StringArrayVar(&selection, "select", []string{}, "filter dedicated output fields") } diff --git a/cmd/root.go b/cmd/root.go index ff80303..91b663f 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -17,7 +17,7 @@ var cfgFile string var rootCmd = &cobra.Command{ Use: "spiff", Short: "YAML in-domain templating processor", - Version: "v1.5.0", + Version: "v1.6.0-beta-1", } // Execute adds all child commands to the root command and sets flags appropriately. diff --git a/cmd/run.go b/cmd/run.go index 74c6887..5e6a560 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -40,8 +40,7 @@ func init() { processCmd.Flags().StringVar(&state, "state", "", "select state file to maintain") processCmd.Flags().StringArrayVar(&selection, "select", []string{}, "filter dedicated output fields") processCmd.Flags().BoolVar(&processingOptions.PreserveEscapes, "preserve-escapes", false, "preserve escaping for escaped expressions and merges") - processCmd.Flags().BoolVar(&processingOptions.PreserveTemporaray, "preserve-temporary", false, "preserve temporary fields") - + processCmd.Flags().BoolVar(&processingOptions.PreserveTemporary, "preserve-temporary", false, "preserve temporary fields") } func run(documentFilePath, templateFilePath string, opts flow.Options, json, split bool, diff --git a/dynaml/addition.go b/dynaml/addition.go index 69a9456..2d18af2 100644 --- a/dynaml/addition.go +++ b/dynaml/addition.go @@ -3,6 +3,7 @@ package dynaml import ( "fmt" "net" + "reflect" ) type AdditionExpr struct { @@ -18,7 +19,7 @@ func (e AdditionExpr) Evaluate(binding Binding, locally bool) (interface{}, Eval return nil, info, false } - bint, info, ok := ResolveIntegerExpressionOrPushEvaluation(&e.B, &resolved, &info, binding, false) + b, info, ok := ResolveExpressionOrPushEvaluation(&e.B, &resolved, &info, binding, false) if !ok { return nil, info, false } @@ -27,20 +28,26 @@ func (e AdditionExpr) Evaluate(binding Binding, locally bool) (interface{}, Eval return e, info, true } - aint, ok := a.(int64) - if ok { - return aint + bint, info, true - } - str, ok := a.(string) if ok { ip := net.ParseIP(str) - if ip != nil { - return IPAdd(ip, bint).String(), info, true + if ip == nil { + return info.Error("first argument for addition must be IP address or number") } - return info.Error("string argument for PLUS must be an IP address") + bint, ok := b.(int64) + if !ok { + return info.Error("addition argument for an IP address requires an integer argument") + } + return IPAdd(ip, bint).String(), info, true + } + a, b, err := NumberOperands(a, b) + if err != nil { + return info.Error("non-IP address addition requires number arguments") } - return info.Error("first argument of PLUS must be IP address or integer") + if _, ok := a.(int64); ok { + return a.(int64) + b.(int64), info, true + } + return a.(float64) + b.(float64), info, true } func (e AdditionExpr) String() string { @@ -61,3 +68,63 @@ func IPAdd(ip net.IP, offset int64) net.IP { } return ip } + +func NumberOperands(a, b interface{}) (interface{}, interface{}, error) { + ia, iaok := a.(int64) + fa, faok := a.(float64) + if !iaok && !faok { + return nil, nil, fmt.Errorf("operand must be integer or float (%s)", reflect.TypeOf(a)) + } + ib, ibok := b.(int64) + fb, fbok := b.(float64) + if !ibok && !fbok { + return nil, nil, fmt.Errorf("operand must be integer or float (%s)", reflect.TypeOf(b)) + } + if iaok == ibok { + return a, b, nil + } + if faok { + return fa, float64(ib), nil + } + return float64(ia), fb, nil +} + +const TYPE_INT = 1 +const TYPE_FLOAT = 2 +const TYPE_NUMBER = TYPE_FLOAT | TYPE_INT + +func NumberOperandsN(convert int, ops ...interface{}) ([]interface{}, bool, error) { + isInt := true + var r []interface{} + + for n, o := range ops { + v, ok := o.(int64) + if ok { + if isInt && (convert&TYPE_INT != 0) { + r = append(r, v) + } else { + r = append(r, float64(v)) + } + } else { + v, ok := o.(float64) + if ok { + if isInt { + isInt = false + if convert == TYPE_NUMBER { + for i, v := range r { + r[i] = float64(v.(int64)) + } + } + } + if convert&TYPE_FLOAT != 0 { + r = append(r, v) + } else { + r = append(r, int64(v)) + } + } else { + return nil, false, fmt.Errorf("operand %d must be integer or float (%s)", n, reflect.TypeOf(o)) + } + } + } + return r, isInt, nil +} diff --git a/dynaml/addition_test.go b/dynaml/addition_test.go index cfd4a91..78b2c1a 100644 --- a/dynaml/addition_test.go +++ b/dynaml/addition_test.go @@ -91,5 +91,30 @@ var _ = Describe("addition", func() { Expect(expr).To(EvaluateAs("10.9.9.255", FakeBinding{})) }) + + It("adds floats", func() { + expr := AdditionExpr{ + FloatExpr{1.2}, + FloatExpr{2.3}, + } + + Expect(expr).To(EvaluateAs(3.5, FakeBinding{})) + }) + It("adds ints and floats", func() { + expr := AdditionExpr{ + IntegerExpr{1}, + FloatExpr{2.3}, + } + + Expect(expr).To(EvaluateAs(3.3, FakeBinding{})) + }) + It("adds floats and ints", func() { + expr := AdditionExpr{ + FloatExpr{2.3}, + IntegerExpr{1}, + } + + Expect(expr).To(EvaluateAs(3.3, FakeBinding{})) + }) }) }) diff --git a/dynaml/call.go b/dynaml/call.go index a28b92d..3cd0fc1 100644 --- a/dynaml/call.go +++ b/dynaml/call.go @@ -10,10 +10,30 @@ import ( type Function func(arguments []interface{}, binding Binding) (interface{}, EvaluationInfo, bool) -var functions = map[string]Function{} +type Registry interface { + RegisterFunction(name string, f Function) + LookupFunction(name string) Function +} +type registry struct { + functions map[string]Function +} + +func NewRegistry() Registry { + return ®istry{map[string]Function{}} +} + +func (r *registry) RegisterFunction(name string, f Function) { + r.functions[name] = f +} + +func (r *registry) LookupFunction(name string) Function { + return r.functions[name] +} + +var functions = NewRegistry() func RegisterFunction(name string, f Function) { - functions[name] = f + functions.RegisterFunction(name, f) } type NameArgument struct { @@ -298,7 +318,14 @@ func (e CallExpr) Evaluate(binding Binding, locally bool) (interface{}, Evaluati } default: - f := functions[funcName] + var f Function + ext := binding.GetState().GetFunctions() + if ext != nil { + f = ext.LookupFunction(funcName) + } + if f == nil { + f = functions.LookupFunction(funcName) + } if f == nil { return info.Error("unknown function '%s'", funcName) } diff --git a/dynaml/division.go b/dynaml/division.go index d599ffd..45c4661 100644 --- a/dynaml/division.go +++ b/dynaml/division.go @@ -18,7 +18,7 @@ func (e DivisionExpr) Evaluate(binding Binding, locally bool) (interface{}, Eval return nil, info, false } - bint, info, ok := ResolveIntegerExpressionOrPushEvaluation(&e.B, &resolved, &info, binding, false) + b, info, ok := ResolveExpressionOrPushEvaluation(&e.B, &resolved, &info, binding, false) if !ok { return nil, info, false } @@ -27,24 +27,24 @@ func (e DivisionExpr) Evaluate(binding Binding, locally bool) (interface{}, Eval return e, info, true } - if bint == 0 { - return info.Error("division by zero") - } - - aint, ok := a.(int64) - if ok { - return aint / bint, info, true - } - str, ok := a.(string) if ok { ip, cidr, err := net.ParseCIDR(str) if err != nil { - return info.Error("CIDR or int argument required as first argument for division: %s", err) + return info.Error("first argument of division must be CIDR or number: %s", err) } ones, bits := cidr.Mask.Size() ip = ip.Mask(cidr.Mask) round := false + + bint, ok := b.(int64) + if !ok { + return info.Error("IP address division requires an integer argument") + } + if bint < 1 { + return info.Error("IP address division requires a positive integer argument") + } + for bint > 1 { if bint%2 == 1 { round = true @@ -60,7 +60,21 @@ func (e DivisionExpr) Evaluate(binding Binding, locally bool) (interface{}, Eval } return (&net.IPNet{ip, net.CIDRMask(ones, bits)}).String(), info, true } - return info.Error("CIDR or int argument required as first argument for division") + + a, b, err := NumberOperands(a, b) + if err != nil { + return info.Error("non-CIDR division requires number arguments") + } + if ib, ok := b.(int64); ok { + if ib == 0 { + return info.Error("division by zero") + } + return a.(int64) / ib, info, true + } + if b.(float64) == 0.0 { + return info.Error("division by zero") + } + return a.(float64) / b.(float64), info, true } func (e DivisionExpr) String() string { diff --git a/dynaml/division_test.go b/dynaml/division_test.go index 61e58fb..4f91729 100644 --- a/dynaml/division_test.go +++ b/dynaml/division_test.go @@ -71,6 +71,40 @@ var _ = Describe("division", func() { IntegerExpr{257}, } + Expect(expr).To(FailToEvaluate(FakeBinding{})) + }) + }) + Context("floats", func() { + It("divides floats", func() { + expr := DivisionExpr{ + FloatExpr{2.2}, + FloatExpr{1.1}, + } + + Expect(expr).To(EvaluateAs(2.0, FakeBinding{})) + }) + It("divides ints and floats", func() { + expr := DivisionExpr{ + IntegerExpr{3}, + FloatExpr{0.5}, + } + + Expect(expr).To(EvaluateAs(6.0, FakeBinding{})) + }) + It("divides floats and ints", func() { + expr := DivisionExpr{ + FloatExpr{2.2}, + IntegerExpr{2}, + } + + Expect(expr).To(EvaluateAs(1.1, FakeBinding{})) + }) + It("fails for zero", func() { + expr := DivisionExpr{ + FloatExpr{2.2}, + FloatExpr{0.0}, + } + Expect(expr).To(FailToEvaluate(FakeBinding{})) }) }) diff --git a/dynaml/dynaml.peg b/dynaml/dynaml.peg index 4d4857f..04dcacc 100644 --- a/dynaml/dynaml.peg +++ b/dynaml/dynaml.peg @@ -43,7 +43,7 @@ Multiplication <- '*' req_ws Level0 Division <- '/' req_ws Level0 Modulo <- '%' req_ws Level0 -Level0 <- IP / String / Integer / Boolean / Undefined / Nil / Symbol / Not / +Level0 <- IP / String / Number / Boolean / Undefined / Nil / Symbol / Not / Substitution / Merge / Auto / Lambda / Chained Chained <- ( MapMapping / Sync / Catch / Mapping / MapSelection / Selection / Sum / List / Map / Range / Grouped / Reference ) ChainedQualifiedExpression* @@ -71,7 +71,7 @@ Range <- StartRange Expression? RangeOp Expression? ']' StartRange <- '[' RangeOp <- '..' -Integer <- '-'? [0-9] [0-9_]* +Number <- '-'? [0-9] [0-9_]* ( '.' [0-9] [0-9]* )? ( ( 'e' / 'E' ) '-'? [0-9] [0-9]* )? String <- '"' ('\\"' / !'"' .)* '"' Boolean <- 'true' / 'false' Nil <- 'nil' / '~' diff --git a/dynaml/dynaml.peg.go b/dynaml/dynaml.peg.go index 037bb15..609a9be 100644 --- a/dynaml/dynaml.peg.go +++ b/dynaml/dynaml.peg.go @@ -66,7 +66,7 @@ const ( ruleRange ruleStartRange ruleRangeOp - ruleInteger + ruleNumber ruleString ruleBoolean ruleNil @@ -176,7 +176,7 @@ var rul3s = [...]string{ "Range", "StartRange", "RangeOp", - "Integer", + "Number", "String", "Boolean", "Nil", @@ -1831,7 +1831,7 @@ func (p *DynamlGrammar) Init() { position, tokenIndex, depth = position107, tokenIndex107, depth107 return false }, - /* 30 Level0 <- <(IP / String / Integer / Boolean / Undefined / Nil / Symbol / Not / Substitution / Merge / Auto / Lambda / Chained)> */ + /* 30 Level0 <- <(IP / String / Number / Boolean / Undefined / Nil / Symbol / Not / Substitution / Merge / Auto / Lambda / Chained)> */ func() bool { position109, tokenIndex109, depth109 := position, tokenIndex, depth { @@ -1851,7 +1851,7 @@ func (p *DynamlGrammar) Init() { goto l111 l113: position, tokenIndex, depth = position111, tokenIndex111, depth111 - if !_rules[ruleInteger]() { + if !_rules[ruleNumber]() { goto l114 } goto l111 @@ -2621,7 +2621,7 @@ func (p *DynamlGrammar) Init() { position, tokenIndex, depth = position207, tokenIndex207, depth207 return false }, - /* 52 Integer <- <('-'? [0-9] ([0-9] / '_')*)> */ + /* 52 Number <- <('-'? [0-9] ([0-9] / '_')* ('.' [0-9] [0-9]*)? (('e' / 'E') '-'? [0-9] [0-9]*)?)> */ func() bool { position209, tokenIndex209, depth209 := position, tokenIndex, depth { @@ -2664,8 +2664,82 @@ func (p *DynamlGrammar) Init() { l214: position, tokenIndex, depth = position214, tokenIndex214, depth214 } + { + position217, tokenIndex217, depth217 := position, tokenIndex, depth + if buffer[position] != rune('.') { + goto l217 + } + position++ + if c := buffer[position]; c < rune('0') || c > rune('9') { + goto l217 + } + position++ + l219: + { + position220, tokenIndex220, depth220 := position, tokenIndex, depth + if c := buffer[position]; c < rune('0') || c > rune('9') { + goto l220 + } + position++ + goto l219 + l220: + position, tokenIndex, depth = position220, tokenIndex220, depth220 + } + goto l218 + l217: + position, tokenIndex, depth = position217, tokenIndex217, depth217 + } + l218: + { + position221, tokenIndex221, depth221 := position, tokenIndex, depth + { + position223, tokenIndex223, depth223 := position, tokenIndex, depth + if buffer[position] != rune('e') { + goto l224 + } + position++ + goto l223 + l224: + position, tokenIndex, depth = position223, tokenIndex223, depth223 + if buffer[position] != rune('E') { + goto l221 + } + position++ + } + l223: + { + position225, tokenIndex225, depth225 := position, tokenIndex, depth + if buffer[position] != rune('-') { + goto l225 + } + position++ + goto l226 + l225: + position, tokenIndex, depth = position225, tokenIndex225, depth225 + } + l226: + if c := buffer[position]; c < rune('0') || c > rune('9') { + goto l221 + } + position++ + l227: + { + position228, tokenIndex228, depth228 := position, tokenIndex, depth + if c := buffer[position]; c < rune('0') || c > rune('9') { + goto l228 + } + position++ + goto l227 + l228: + position, tokenIndex, depth = position228, tokenIndex228, depth228 + } + goto l222 + l221: + position, tokenIndex, depth = position221, tokenIndex221, depth221 + } + l222: depth-- - add(ruleInteger, position210) + add(ruleNumber, position210) } return true l209: @@ -2674,2018 +2748,2018 @@ func (p *DynamlGrammar) Init() { }, /* 53 String <- <('"' (('\\' '"') / (!'"' .))* '"')> */ func() bool { - position217, tokenIndex217, depth217 := position, tokenIndex, depth + position229, tokenIndex229, depth229 := position, tokenIndex, depth { - position218 := position + position230 := position depth++ if buffer[position] != rune('"') { - goto l217 + goto l229 } position++ - l219: + l231: { - position220, tokenIndex220, depth220 := position, tokenIndex, depth + position232, tokenIndex232, depth232 := position, tokenIndex, depth { - position221, tokenIndex221, depth221 := position, tokenIndex, depth + position233, tokenIndex233, depth233 := position, tokenIndex, depth if buffer[position] != rune('\\') { - goto l222 + goto l234 } position++ if buffer[position] != rune('"') { - goto l222 + goto l234 } position++ - goto l221 - l222: - position, tokenIndex, depth = position221, tokenIndex221, depth221 + goto l233 + l234: + position, tokenIndex, depth = position233, tokenIndex233, depth233 { - position223, tokenIndex223, depth223 := position, tokenIndex, depth + position235, tokenIndex235, depth235 := position, tokenIndex, depth if buffer[position] != rune('"') { - goto l223 + goto l235 } position++ - goto l220 - l223: - position, tokenIndex, depth = position223, tokenIndex223, depth223 + goto l232 + l235: + position, tokenIndex, depth = position235, tokenIndex235, depth235 } if !matchDot() { - goto l220 + goto l232 } } - l221: - goto l219 - l220: - position, tokenIndex, depth = position220, tokenIndex220, depth220 + l233: + goto l231 + l232: + position, tokenIndex, depth = position232, tokenIndex232, depth232 } if buffer[position] != rune('"') { - goto l217 + goto l229 } position++ depth-- - add(ruleString, position218) + add(ruleString, position230) } return true - l217: - position, tokenIndex, depth = position217, tokenIndex217, depth217 + l229: + position, tokenIndex, depth = position229, tokenIndex229, depth229 return false }, /* 54 Boolean <- <(('t' 'r' 'u' 'e') / ('f' 'a' 'l' 's' 'e'))> */ func() bool { - position224, tokenIndex224, depth224 := position, tokenIndex, depth + position236, tokenIndex236, depth236 := position, tokenIndex, depth { - position225 := position + position237 := position depth++ { - position226, tokenIndex226, depth226 := position, tokenIndex, depth + position238, tokenIndex238, depth238 := position, tokenIndex, depth if buffer[position] != rune('t') { - goto l227 + goto l239 } position++ if buffer[position] != rune('r') { - goto l227 + goto l239 } position++ if buffer[position] != rune('u') { - goto l227 + goto l239 } position++ if buffer[position] != rune('e') { - goto l227 + goto l239 } position++ - goto l226 - l227: - position, tokenIndex, depth = position226, tokenIndex226, depth226 + goto l238 + l239: + position, tokenIndex, depth = position238, tokenIndex238, depth238 if buffer[position] != rune('f') { - goto l224 + goto l236 } position++ if buffer[position] != rune('a') { - goto l224 + goto l236 } position++ if buffer[position] != rune('l') { - goto l224 + goto l236 } position++ if buffer[position] != rune('s') { - goto l224 + goto l236 } position++ if buffer[position] != rune('e') { - goto l224 + goto l236 } position++ } - l226: + l238: depth-- - add(ruleBoolean, position225) + add(ruleBoolean, position237) } return true - l224: - position, tokenIndex, depth = position224, tokenIndex224, depth224 + l236: + position, tokenIndex, depth = position236, tokenIndex236, depth236 return false }, /* 55 Nil <- <(('n' 'i' 'l') / '~')> */ func() bool { - position228, tokenIndex228, depth228 := position, tokenIndex, depth + position240, tokenIndex240, depth240 := position, tokenIndex, depth { - position229 := position + position241 := position depth++ { - position230, tokenIndex230, depth230 := position, tokenIndex, depth + position242, tokenIndex242, depth242 := position, tokenIndex, depth if buffer[position] != rune('n') { - goto l231 + goto l243 } position++ if buffer[position] != rune('i') { - goto l231 + goto l243 } position++ if buffer[position] != rune('l') { - goto l231 + goto l243 } position++ - goto l230 - l231: - position, tokenIndex, depth = position230, tokenIndex230, depth230 + goto l242 + l243: + position, tokenIndex, depth = position242, tokenIndex242, depth242 if buffer[position] != rune('~') { - goto l228 + goto l240 } position++ } - l230: + l242: depth-- - add(ruleNil, position229) + add(ruleNil, position241) } return true - l228: - position, tokenIndex, depth = position228, tokenIndex228, depth228 + l240: + position, tokenIndex, depth = position240, tokenIndex240, depth240 return false }, /* 56 Undefined <- <('~' '~')> */ func() bool { - position232, tokenIndex232, depth232 := position, tokenIndex, depth + position244, tokenIndex244, depth244 := position, tokenIndex, depth { - position233 := position + position245 := position depth++ if buffer[position] != rune('~') { - goto l232 + goto l244 } position++ if buffer[position] != rune('~') { - goto l232 + goto l244 } position++ depth-- - add(ruleUndefined, position233) + add(ruleUndefined, position245) } return true - l232: - position, tokenIndex, depth = position232, tokenIndex232, depth232 + l244: + position, tokenIndex, depth = position244, tokenIndex244, depth244 return false }, /* 57 Symbol <- <('$' Name)> */ func() bool { - position234, tokenIndex234, depth234 := position, tokenIndex, depth + position246, tokenIndex246, depth246 := position, tokenIndex, depth { - position235 := position + position247 := position depth++ if buffer[position] != rune('$') { - goto l234 + goto l246 } position++ if !_rules[ruleName]() { - goto l234 + goto l246 } depth-- - add(ruleSymbol, position235) + add(ruleSymbol, position247) } return true - l234: - position, tokenIndex, depth = position234, tokenIndex234, depth234 + l246: + position, tokenIndex, depth = position246, tokenIndex246, depth246 return false }, /* 58 List <- <(StartList ExpressionList? ']')> */ func() bool { - position236, tokenIndex236, depth236 := position, tokenIndex, depth + position248, tokenIndex248, depth248 := position, tokenIndex, depth { - position237 := position + position249 := position depth++ if !_rules[ruleStartList]() { - goto l236 + goto l248 } { - position238, tokenIndex238, depth238 := position, tokenIndex, depth + position250, tokenIndex250, depth250 := position, tokenIndex, depth if !_rules[ruleExpressionList]() { - goto l238 + goto l250 } - goto l239 - l238: - position, tokenIndex, depth = position238, tokenIndex238, depth238 + goto l251 + l250: + position, tokenIndex, depth = position250, tokenIndex250, depth250 } - l239: + l251: if buffer[position] != rune(']') { - goto l236 + goto l248 } position++ depth-- - add(ruleList, position237) + add(ruleList, position249) } return true - l236: - position, tokenIndex, depth = position236, tokenIndex236, depth236 + l248: + position, tokenIndex, depth = position248, tokenIndex248, depth248 return false }, /* 59 StartList <- <('[' ws)> */ func() bool { - position240, tokenIndex240, depth240 := position, tokenIndex, depth + position252, tokenIndex252, depth252 := position, tokenIndex, depth { - position241 := position + position253 := position depth++ if buffer[position] != rune('[') { - goto l240 + goto l252 } position++ if !_rules[rulews]() { - goto l240 + goto l252 } depth-- - add(ruleStartList, position241) + add(ruleStartList, position253) } return true - l240: - position, tokenIndex, depth = position240, tokenIndex240, depth240 + l252: + position, tokenIndex, depth = position252, tokenIndex252, depth252 return false }, /* 60 Map <- <(CreateMap ws Assignments? '}')> */ func() bool { - position242, tokenIndex242, depth242 := position, tokenIndex, depth + position254, tokenIndex254, depth254 := position, tokenIndex, depth { - position243 := position + position255 := position depth++ if !_rules[ruleCreateMap]() { - goto l242 + goto l254 } if !_rules[rulews]() { - goto l242 + goto l254 } { - position244, tokenIndex244, depth244 := position, tokenIndex, depth + position256, tokenIndex256, depth256 := position, tokenIndex, depth if !_rules[ruleAssignments]() { - goto l244 + goto l256 } - goto l245 - l244: - position, tokenIndex, depth = position244, tokenIndex244, depth244 + goto l257 + l256: + position, tokenIndex, depth = position256, tokenIndex256, depth256 } - l245: + l257: if buffer[position] != rune('}') { - goto l242 + goto l254 } position++ depth-- - add(ruleMap, position243) + add(ruleMap, position255) } return true - l242: - position, tokenIndex, depth = position242, tokenIndex242, depth242 + l254: + position, tokenIndex, depth = position254, tokenIndex254, depth254 return false }, /* 61 CreateMap <- <'{'> */ func() bool { - position246, tokenIndex246, depth246 := position, tokenIndex, depth + position258, tokenIndex258, depth258 := position, tokenIndex, depth { - position247 := position + position259 := position depth++ if buffer[position] != rune('{') { - goto l246 + goto l258 } position++ depth-- - add(ruleCreateMap, position247) + add(ruleCreateMap, position259) } return true - l246: - position, tokenIndex, depth = position246, tokenIndex246, depth246 + l258: + position, tokenIndex, depth = position258, tokenIndex258, depth258 return false }, /* 62 Assignments <- <(Assignment (',' Assignment)*)> */ func() bool { - position248, tokenIndex248, depth248 := position, tokenIndex, depth + position260, tokenIndex260, depth260 := position, tokenIndex, depth { - position249 := position + position261 := position depth++ if !_rules[ruleAssignment]() { - goto l248 + goto l260 } - l250: + l262: { - position251, tokenIndex251, depth251 := position, tokenIndex, depth + position263, tokenIndex263, depth263 := position, tokenIndex, depth if buffer[position] != rune(',') { - goto l251 + goto l263 } position++ if !_rules[ruleAssignment]() { - goto l251 + goto l263 } - goto l250 - l251: - position, tokenIndex, depth = position251, tokenIndex251, depth251 + goto l262 + l263: + position, tokenIndex, depth = position263, tokenIndex263, depth263 } depth-- - add(ruleAssignments, position249) + add(ruleAssignments, position261) } return true - l248: - position, tokenIndex, depth = position248, tokenIndex248, depth248 + l260: + position, tokenIndex, depth = position260, tokenIndex260, depth260 return false }, /* 63 Assignment <- <(Expression '=' Expression)> */ func() bool { - position252, tokenIndex252, depth252 := position, tokenIndex, depth + position264, tokenIndex264, depth264 := position, tokenIndex, depth { - position253 := position + position265 := position depth++ if !_rules[ruleExpression]() { - goto l252 + goto l264 } if buffer[position] != rune('=') { - goto l252 + goto l264 } position++ if !_rules[ruleExpression]() { - goto l252 + goto l264 } depth-- - add(ruleAssignment, position253) + add(ruleAssignment, position265) } return true - l252: - position, tokenIndex, depth = position252, tokenIndex252, depth252 + l264: + position, tokenIndex, depth = position264, tokenIndex264, depth264 return false }, /* 64 Merge <- <(RefMerge / SimpleMerge)> */ func() bool { - position254, tokenIndex254, depth254 := position, tokenIndex, depth + position266, tokenIndex266, depth266 := position, tokenIndex, depth { - position255 := position + position267 := position depth++ { - position256, tokenIndex256, depth256 := position, tokenIndex, depth + position268, tokenIndex268, depth268 := position, tokenIndex, depth if !_rules[ruleRefMerge]() { - goto l257 + goto l269 } - goto l256 - l257: - position, tokenIndex, depth = position256, tokenIndex256, depth256 + goto l268 + l269: + position, tokenIndex, depth = position268, tokenIndex268, depth268 if !_rules[ruleSimpleMerge]() { - goto l254 + goto l266 } } - l256: + l268: depth-- - add(ruleMerge, position255) + add(ruleMerge, position267) } return true - l254: - position, tokenIndex, depth = position254, tokenIndex254, depth254 + l266: + position, tokenIndex, depth = position266, tokenIndex266, depth266 return false }, /* 65 RefMerge <- <('m' 'e' 'r' 'g' 'e' !(req_ws Required) (req_ws (Replace / On))? req_ws Reference)> */ func() bool { - position258, tokenIndex258, depth258 := position, tokenIndex, depth + position270, tokenIndex270, depth270 := position, tokenIndex, depth { - position259 := position + position271 := position depth++ if buffer[position] != rune('m') { - goto l258 + goto l270 } position++ if buffer[position] != rune('e') { - goto l258 + goto l270 } position++ if buffer[position] != rune('r') { - goto l258 + goto l270 } position++ if buffer[position] != rune('g') { - goto l258 + goto l270 } position++ if buffer[position] != rune('e') { - goto l258 + goto l270 } position++ { - position260, tokenIndex260, depth260 := position, tokenIndex, depth + position272, tokenIndex272, depth272 := position, tokenIndex, depth if !_rules[rulereq_ws]() { - goto l260 + goto l272 } if !_rules[ruleRequired]() { - goto l260 + goto l272 } - goto l258 - l260: - position, tokenIndex, depth = position260, tokenIndex260, depth260 + goto l270 + l272: + position, tokenIndex, depth = position272, tokenIndex272, depth272 } { - position261, tokenIndex261, depth261 := position, tokenIndex, depth + position273, tokenIndex273, depth273 := position, tokenIndex, depth if !_rules[rulereq_ws]() { - goto l261 + goto l273 } { - position263, tokenIndex263, depth263 := position, tokenIndex, depth + position275, tokenIndex275, depth275 := position, tokenIndex, depth if !_rules[ruleReplace]() { - goto l264 + goto l276 } - goto l263 - l264: - position, tokenIndex, depth = position263, tokenIndex263, depth263 + goto l275 + l276: + position, tokenIndex, depth = position275, tokenIndex275, depth275 if !_rules[ruleOn]() { - goto l261 + goto l273 } } - l263: - goto l262 - l261: - position, tokenIndex, depth = position261, tokenIndex261, depth261 + l275: + goto l274 + l273: + position, tokenIndex, depth = position273, tokenIndex273, depth273 } - l262: + l274: if !_rules[rulereq_ws]() { - goto l258 + goto l270 } if !_rules[ruleReference]() { - goto l258 + goto l270 } depth-- - add(ruleRefMerge, position259) + add(ruleRefMerge, position271) } return true - l258: - position, tokenIndex, depth = position258, tokenIndex258, depth258 + l270: + position, tokenIndex, depth = position270, tokenIndex270, depth270 return false }, /* 66 SimpleMerge <- <('m' 'e' 'r' 'g' 'e' !'(' (req_ws (Replace / Required / On))?)> */ func() bool { - position265, tokenIndex265, depth265 := position, tokenIndex, depth + position277, tokenIndex277, depth277 := position, tokenIndex, depth { - position266 := position + position278 := position depth++ if buffer[position] != rune('m') { - goto l265 + goto l277 } position++ if buffer[position] != rune('e') { - goto l265 + goto l277 } position++ if buffer[position] != rune('r') { - goto l265 + goto l277 } position++ if buffer[position] != rune('g') { - goto l265 + goto l277 } position++ if buffer[position] != rune('e') { - goto l265 + goto l277 } position++ { - position267, tokenIndex267, depth267 := position, tokenIndex, depth + position279, tokenIndex279, depth279 := position, tokenIndex, depth if buffer[position] != rune('(') { - goto l267 + goto l279 } position++ - goto l265 - l267: - position, tokenIndex, depth = position267, tokenIndex267, depth267 + goto l277 + l279: + position, tokenIndex, depth = position279, tokenIndex279, depth279 } { - position268, tokenIndex268, depth268 := position, tokenIndex, depth + position280, tokenIndex280, depth280 := position, tokenIndex, depth if !_rules[rulereq_ws]() { - goto l268 + goto l280 } { - position270, tokenIndex270, depth270 := position, tokenIndex, depth + position282, tokenIndex282, depth282 := position, tokenIndex, depth if !_rules[ruleReplace]() { - goto l271 + goto l283 } - goto l270 - l271: - position, tokenIndex, depth = position270, tokenIndex270, depth270 + goto l282 + l283: + position, tokenIndex, depth = position282, tokenIndex282, depth282 if !_rules[ruleRequired]() { - goto l272 + goto l284 } - goto l270 - l272: - position, tokenIndex, depth = position270, tokenIndex270, depth270 + goto l282 + l284: + position, tokenIndex, depth = position282, tokenIndex282, depth282 if !_rules[ruleOn]() { - goto l268 + goto l280 } } - l270: - goto l269 - l268: - position, tokenIndex, depth = position268, tokenIndex268, depth268 + l282: + goto l281 + l280: + position, tokenIndex, depth = position280, tokenIndex280, depth280 } - l269: + l281: depth-- - add(ruleSimpleMerge, position266) + add(ruleSimpleMerge, position278) } return true - l265: - position, tokenIndex, depth = position265, tokenIndex265, depth265 + l277: + position, tokenIndex, depth = position277, tokenIndex277, depth277 return false }, /* 67 Replace <- <('r' 'e' 'p' 'l' 'a' 'c' 'e')> */ func() bool { - position273, tokenIndex273, depth273 := position, tokenIndex, depth + position285, tokenIndex285, depth285 := position, tokenIndex, depth { - position274 := position + position286 := position depth++ if buffer[position] != rune('r') { - goto l273 + goto l285 } position++ if buffer[position] != rune('e') { - goto l273 + goto l285 } position++ if buffer[position] != rune('p') { - goto l273 + goto l285 } position++ if buffer[position] != rune('l') { - goto l273 + goto l285 } position++ if buffer[position] != rune('a') { - goto l273 + goto l285 } position++ if buffer[position] != rune('c') { - goto l273 + goto l285 } position++ if buffer[position] != rune('e') { - goto l273 + goto l285 } position++ depth-- - add(ruleReplace, position274) + add(ruleReplace, position286) } return true - l273: - position, tokenIndex, depth = position273, tokenIndex273, depth273 + l285: + position, tokenIndex, depth = position285, tokenIndex285, depth285 return false }, /* 68 Required <- <('r' 'e' 'q' 'u' 'i' 'r' 'e' 'd')> */ func() bool { - position275, tokenIndex275, depth275 := position, tokenIndex, depth + position287, tokenIndex287, depth287 := position, tokenIndex, depth { - position276 := position + position288 := position depth++ if buffer[position] != rune('r') { - goto l275 + goto l287 } position++ if buffer[position] != rune('e') { - goto l275 + goto l287 } position++ if buffer[position] != rune('q') { - goto l275 + goto l287 } position++ if buffer[position] != rune('u') { - goto l275 + goto l287 } position++ if buffer[position] != rune('i') { - goto l275 + goto l287 } position++ if buffer[position] != rune('r') { - goto l275 + goto l287 } position++ if buffer[position] != rune('e') { - goto l275 + goto l287 } position++ if buffer[position] != rune('d') { - goto l275 + goto l287 } position++ depth-- - add(ruleRequired, position276) + add(ruleRequired, position288) } return true - l275: - position, tokenIndex, depth = position275, tokenIndex275, depth275 + l287: + position, tokenIndex, depth = position287, tokenIndex287, depth287 return false }, /* 69 On <- <('o' 'n' req_ws Name)> */ func() bool { - position277, tokenIndex277, depth277 := position, tokenIndex, depth + position289, tokenIndex289, depth289 := position, tokenIndex, depth { - position278 := position + position290 := position depth++ if buffer[position] != rune('o') { - goto l277 + goto l289 } position++ if buffer[position] != rune('n') { - goto l277 + goto l289 } position++ if !_rules[rulereq_ws]() { - goto l277 + goto l289 } if !_rules[ruleName]() { - goto l277 + goto l289 } depth-- - add(ruleOn, position278) + add(ruleOn, position290) } return true - l277: - position, tokenIndex, depth = position277, tokenIndex277, depth277 + l289: + position, tokenIndex, depth = position289, tokenIndex289, depth289 return false }, /* 70 Auto <- <('a' 'u' 't' 'o')> */ func() bool { - position279, tokenIndex279, depth279 := position, tokenIndex, depth + position291, tokenIndex291, depth291 := position, tokenIndex, depth { - position280 := position + position292 := position depth++ if buffer[position] != rune('a') { - goto l279 + goto l291 } position++ if buffer[position] != rune('u') { - goto l279 + goto l291 } position++ if buffer[position] != rune('t') { - goto l279 + goto l291 } position++ if buffer[position] != rune('o') { - goto l279 + goto l291 } position++ depth-- - add(ruleAuto, position280) + add(ruleAuto, position292) } return true - l279: - position, tokenIndex, depth = position279, tokenIndex279, depth279 + l291: + position, tokenIndex, depth = position291, tokenIndex291, depth291 return false }, /* 71 Default <- */ func() bool { - position281, tokenIndex281, depth281 := position, tokenIndex, depth + position293, tokenIndex293, depth293 := position, tokenIndex, depth { - position282 := position + position294 := position depth++ if !_rules[ruleAction1]() { - goto l281 + goto l293 } depth-- - add(ruleDefault, position282) + add(ruleDefault, position294) } return true - l281: - position, tokenIndex, depth = position281, tokenIndex281, depth281 + l293: + position, tokenIndex, depth = position293, tokenIndex293, depth293 return false }, /* 72 Sync <- <('s' 'y' 'n' 'c' '[' Level7 ((((LambdaExpr LambdaExt) / (LambdaOrExpr LambdaOrExpr)) (('|' Expression) / Default)) / (LambdaOrExpr Default Default)) ']')> */ func() bool { - position283, tokenIndex283, depth283 := position, tokenIndex, depth + position295, tokenIndex295, depth295 := position, tokenIndex, depth { - position284 := position + position296 := position depth++ if buffer[position] != rune('s') { - goto l283 + goto l295 } position++ if buffer[position] != rune('y') { - goto l283 + goto l295 } position++ if buffer[position] != rune('n') { - goto l283 + goto l295 } position++ if buffer[position] != rune('c') { - goto l283 + goto l295 } position++ if buffer[position] != rune('[') { - goto l283 + goto l295 } position++ if !_rules[ruleLevel7]() { - goto l283 + goto l295 } { - position285, tokenIndex285, depth285 := position, tokenIndex, depth + position297, tokenIndex297, depth297 := position, tokenIndex, depth { - position287, tokenIndex287, depth287 := position, tokenIndex, depth + position299, tokenIndex299, depth299 := position, tokenIndex, depth if !_rules[ruleLambdaExpr]() { - goto l288 + goto l300 } if !_rules[ruleLambdaExt]() { - goto l288 + goto l300 } - goto l287 - l288: - position, tokenIndex, depth = position287, tokenIndex287, depth287 + goto l299 + l300: + position, tokenIndex, depth = position299, tokenIndex299, depth299 if !_rules[ruleLambdaOrExpr]() { - goto l286 + goto l298 } if !_rules[ruleLambdaOrExpr]() { - goto l286 + goto l298 } } - l287: + l299: { - position289, tokenIndex289, depth289 := position, tokenIndex, depth + position301, tokenIndex301, depth301 := position, tokenIndex, depth if buffer[position] != rune('|') { - goto l290 + goto l302 } position++ if !_rules[ruleExpression]() { - goto l290 + goto l302 } - goto l289 - l290: - position, tokenIndex, depth = position289, tokenIndex289, depth289 + goto l301 + l302: + position, tokenIndex, depth = position301, tokenIndex301, depth301 if !_rules[ruleDefault]() { - goto l286 + goto l298 } } - l289: - goto l285 - l286: - position, tokenIndex, depth = position285, tokenIndex285, depth285 + l301: + goto l297 + l298: + position, tokenIndex, depth = position297, tokenIndex297, depth297 if !_rules[ruleLambdaOrExpr]() { - goto l283 + goto l295 } if !_rules[ruleDefault]() { - goto l283 + goto l295 } if !_rules[ruleDefault]() { - goto l283 + goto l295 } } - l285: + l297: if buffer[position] != rune(']') { - goto l283 + goto l295 } position++ depth-- - add(ruleSync, position284) + add(ruleSync, position296) } return true - l283: - position, tokenIndex, depth = position283, tokenIndex283, depth283 + l295: + position, tokenIndex, depth = position295, tokenIndex295, depth295 return false }, /* 73 LambdaExt <- <(',' Expression)> */ func() bool { - position291, tokenIndex291, depth291 := position, tokenIndex, depth + position303, tokenIndex303, depth303 := position, tokenIndex, depth { - position292 := position + position304 := position depth++ if buffer[position] != rune(',') { - goto l291 + goto l303 } position++ if !_rules[ruleExpression]() { - goto l291 + goto l303 } depth-- - add(ruleLambdaExt, position292) + add(ruleLambdaExt, position304) } return true - l291: - position, tokenIndex, depth = position291, tokenIndex291, depth291 + l303: + position, tokenIndex, depth = position303, tokenIndex303, depth303 return false }, /* 74 LambdaOrExpr <- <(LambdaExpr / ('|' Expression))> */ func() bool { - position293, tokenIndex293, depth293 := position, tokenIndex, depth + position305, tokenIndex305, depth305 := position, tokenIndex, depth { - position294 := position + position306 := position depth++ { - position295, tokenIndex295, depth295 := position, tokenIndex, depth + position307, tokenIndex307, depth307 := position, tokenIndex, depth if !_rules[ruleLambdaExpr]() { - goto l296 + goto l308 } - goto l295 - l296: - position, tokenIndex, depth = position295, tokenIndex295, depth295 + goto l307 + l308: + position, tokenIndex, depth = position307, tokenIndex307, depth307 if buffer[position] != rune('|') { - goto l293 + goto l305 } position++ if !_rules[ruleExpression]() { - goto l293 + goto l305 } } - l295: + l307: depth-- - add(ruleLambdaOrExpr, position294) + add(ruleLambdaOrExpr, position306) } return true - l293: - position, tokenIndex, depth = position293, tokenIndex293, depth293 + l305: + position, tokenIndex, depth = position305, tokenIndex305, depth305 return false }, /* 75 Catch <- <('c' 'a' 't' 'c' 'h' '[' Level7 LambdaOrExpr ']')> */ func() bool { - position297, tokenIndex297, depth297 := position, tokenIndex, depth + position309, tokenIndex309, depth309 := position, tokenIndex, depth { - position298 := position + position310 := position depth++ if buffer[position] != rune('c') { - goto l297 + goto l309 } position++ if buffer[position] != rune('a') { - goto l297 + goto l309 } position++ if buffer[position] != rune('t') { - goto l297 + goto l309 } position++ if buffer[position] != rune('c') { - goto l297 + goto l309 } position++ if buffer[position] != rune('h') { - goto l297 + goto l309 } position++ if buffer[position] != rune('[') { - goto l297 + goto l309 } position++ if !_rules[ruleLevel7]() { - goto l297 + goto l309 } if !_rules[ruleLambdaOrExpr]() { - goto l297 + goto l309 } if buffer[position] != rune(']') { - goto l297 + goto l309 } position++ depth-- - add(ruleCatch, position298) + add(ruleCatch, position310) } return true - l297: - position, tokenIndex, depth = position297, tokenIndex297, depth297 + l309: + position, tokenIndex, depth = position309, tokenIndex309, depth309 return false }, /* 76 MapMapping <- <('m' 'a' 'p' '{' Level7 LambdaOrExpr '}')> */ func() bool { - position299, tokenIndex299, depth299 := position, tokenIndex, depth + position311, tokenIndex311, depth311 := position, tokenIndex, depth { - position300 := position + position312 := position depth++ if buffer[position] != rune('m') { - goto l299 + goto l311 } position++ if buffer[position] != rune('a') { - goto l299 + goto l311 } position++ if buffer[position] != rune('p') { - goto l299 + goto l311 } position++ if buffer[position] != rune('{') { - goto l299 + goto l311 } position++ if !_rules[ruleLevel7]() { - goto l299 + goto l311 } if !_rules[ruleLambdaOrExpr]() { - goto l299 + goto l311 } if buffer[position] != rune('}') { - goto l299 + goto l311 } position++ depth-- - add(ruleMapMapping, position300) + add(ruleMapMapping, position312) } return true - l299: - position, tokenIndex, depth = position299, tokenIndex299, depth299 + l311: + position, tokenIndex, depth = position311, tokenIndex311, depth311 return false }, /* 77 Mapping <- <('m' 'a' 'p' '[' Level7 LambdaOrExpr ']')> */ func() bool { - position301, tokenIndex301, depth301 := position, tokenIndex, depth + position313, tokenIndex313, depth313 := position, tokenIndex, depth { - position302 := position + position314 := position depth++ if buffer[position] != rune('m') { - goto l301 + goto l313 } position++ if buffer[position] != rune('a') { - goto l301 + goto l313 } position++ if buffer[position] != rune('p') { - goto l301 + goto l313 } position++ if buffer[position] != rune('[') { - goto l301 + goto l313 } position++ if !_rules[ruleLevel7]() { - goto l301 + goto l313 } if !_rules[ruleLambdaOrExpr]() { - goto l301 + goto l313 } if buffer[position] != rune(']') { - goto l301 + goto l313 } position++ depth-- - add(ruleMapping, position302) + add(ruleMapping, position314) } return true - l301: - position, tokenIndex, depth = position301, tokenIndex301, depth301 + l313: + position, tokenIndex, depth = position313, tokenIndex313, depth313 return false }, /* 78 MapSelection <- <('s' 'e' 'l' 'e' 'c' 't' '{' Level7 LambdaOrExpr '}')> */ func() bool { - position303, tokenIndex303, depth303 := position, tokenIndex, depth + position315, tokenIndex315, depth315 := position, tokenIndex, depth { - position304 := position + position316 := position depth++ if buffer[position] != rune('s') { - goto l303 + goto l315 } position++ if buffer[position] != rune('e') { - goto l303 + goto l315 } position++ if buffer[position] != rune('l') { - goto l303 + goto l315 } position++ if buffer[position] != rune('e') { - goto l303 + goto l315 } position++ if buffer[position] != rune('c') { - goto l303 + goto l315 } position++ if buffer[position] != rune('t') { - goto l303 + goto l315 } position++ if buffer[position] != rune('{') { - goto l303 + goto l315 } position++ if !_rules[ruleLevel7]() { - goto l303 + goto l315 } if !_rules[ruleLambdaOrExpr]() { - goto l303 + goto l315 } if buffer[position] != rune('}') { - goto l303 + goto l315 } position++ depth-- - add(ruleMapSelection, position304) + add(ruleMapSelection, position316) } return true - l303: - position, tokenIndex, depth = position303, tokenIndex303, depth303 + l315: + position, tokenIndex, depth = position315, tokenIndex315, depth315 return false }, /* 79 Selection <- <('s' 'e' 'l' 'e' 'c' 't' '[' Level7 LambdaOrExpr ']')> */ func() bool { - position305, tokenIndex305, depth305 := position, tokenIndex, depth + position317, tokenIndex317, depth317 := position, tokenIndex, depth { - position306 := position + position318 := position depth++ if buffer[position] != rune('s') { - goto l305 + goto l317 } position++ if buffer[position] != rune('e') { - goto l305 + goto l317 } position++ if buffer[position] != rune('l') { - goto l305 + goto l317 } position++ if buffer[position] != rune('e') { - goto l305 + goto l317 } position++ if buffer[position] != rune('c') { - goto l305 + goto l317 } position++ if buffer[position] != rune('t') { - goto l305 + goto l317 } position++ if buffer[position] != rune('[') { - goto l305 + goto l317 } position++ if !_rules[ruleLevel7]() { - goto l305 + goto l317 } if !_rules[ruleLambdaOrExpr]() { - goto l305 + goto l317 } if buffer[position] != rune(']') { - goto l305 + goto l317 } position++ depth-- - add(ruleSelection, position306) + add(ruleSelection, position318) } return true - l305: - position, tokenIndex, depth = position305, tokenIndex305, depth305 + l317: + position, tokenIndex, depth = position317, tokenIndex317, depth317 return false }, /* 80 Sum <- <('s' 'u' 'm' '[' Level7 '|' Level7 LambdaOrExpr ']')> */ func() bool { - position307, tokenIndex307, depth307 := position, tokenIndex, depth + position319, tokenIndex319, depth319 := position, tokenIndex, depth { - position308 := position + position320 := position depth++ if buffer[position] != rune('s') { - goto l307 + goto l319 } position++ if buffer[position] != rune('u') { - goto l307 + goto l319 } position++ if buffer[position] != rune('m') { - goto l307 + goto l319 } position++ if buffer[position] != rune('[') { - goto l307 + goto l319 } position++ if !_rules[ruleLevel7]() { - goto l307 + goto l319 } if buffer[position] != rune('|') { - goto l307 + goto l319 } position++ if !_rules[ruleLevel7]() { - goto l307 + goto l319 } if !_rules[ruleLambdaOrExpr]() { - goto l307 + goto l319 } if buffer[position] != rune(']') { - goto l307 + goto l319 } position++ depth-- - add(ruleSum, position308) + add(ruleSum, position320) } return true - l307: - position, tokenIndex, depth = position307, tokenIndex307, depth307 + l319: + position, tokenIndex, depth = position319, tokenIndex319, depth319 return false }, /* 81 Lambda <- <('l' 'a' 'm' 'b' 'd' 'a' (LambdaRef / LambdaExpr))> */ func() bool { - position309, tokenIndex309, depth309 := position, tokenIndex, depth + position321, tokenIndex321, depth321 := position, tokenIndex, depth { - position310 := position + position322 := position depth++ if buffer[position] != rune('l') { - goto l309 + goto l321 } position++ if buffer[position] != rune('a') { - goto l309 + goto l321 } position++ if buffer[position] != rune('m') { - goto l309 + goto l321 } position++ if buffer[position] != rune('b') { - goto l309 + goto l321 } position++ if buffer[position] != rune('d') { - goto l309 + goto l321 } position++ if buffer[position] != rune('a') { - goto l309 + goto l321 } position++ { - position311, tokenIndex311, depth311 := position, tokenIndex, depth + position323, tokenIndex323, depth323 := position, tokenIndex, depth if !_rules[ruleLambdaRef]() { - goto l312 + goto l324 } - goto l311 - l312: - position, tokenIndex, depth = position311, tokenIndex311, depth311 + goto l323 + l324: + position, tokenIndex, depth = position323, tokenIndex323, depth323 if !_rules[ruleLambdaExpr]() { - goto l309 + goto l321 } } - l311: + l323: depth-- - add(ruleLambda, position310) + add(ruleLambda, position322) } return true - l309: - position, tokenIndex, depth = position309, tokenIndex309, depth309 + l321: + position, tokenIndex, depth = position321, tokenIndex321, depth321 return false }, /* 82 LambdaRef <- <(req_ws Expression)> */ func() bool { - position313, tokenIndex313, depth313 := position, tokenIndex, depth + position325, tokenIndex325, depth325 := position, tokenIndex, depth { - position314 := position + position326 := position depth++ if !_rules[rulereq_ws]() { - goto l313 + goto l325 } if !_rules[ruleExpression]() { - goto l313 + goto l325 } depth-- - add(ruleLambdaRef, position314) + add(ruleLambdaRef, position326) } return true - l313: - position, tokenIndex, depth = position313, tokenIndex313, depth313 + l325: + position, tokenIndex, depth = position325, tokenIndex325, depth325 return false }, /* 83 LambdaExpr <- <(ws Params ws ('-' '>') Expression)> */ func() bool { - position315, tokenIndex315, depth315 := position, tokenIndex, depth + position327, tokenIndex327, depth327 := position, tokenIndex, depth { - position316 := position + position328 := position depth++ if !_rules[rulews]() { - goto l315 + goto l327 } if !_rules[ruleParams]() { - goto l315 + goto l327 } if !_rules[rulews]() { - goto l315 + goto l327 } if buffer[position] != rune('-') { - goto l315 + goto l327 } position++ if buffer[position] != rune('>') { - goto l315 + goto l327 } position++ if !_rules[ruleExpression]() { - goto l315 + goto l327 } depth-- - add(ruleLambdaExpr, position316) + add(ruleLambdaExpr, position328) } return true - l315: - position, tokenIndex, depth = position315, tokenIndex315, depth315 + l327: + position, tokenIndex, depth = position327, tokenIndex327, depth327 return false }, /* 84 Params <- <('|' StartParams ws Names? '|')> */ func() bool { - position317, tokenIndex317, depth317 := position, tokenIndex, depth + position329, tokenIndex329, depth329 := position, tokenIndex, depth { - position318 := position + position330 := position depth++ if buffer[position] != rune('|') { - goto l317 + goto l329 } position++ if !_rules[ruleStartParams]() { - goto l317 + goto l329 } if !_rules[rulews]() { - goto l317 + goto l329 } { - position319, tokenIndex319, depth319 := position, tokenIndex, depth + position331, tokenIndex331, depth331 := position, tokenIndex, depth if !_rules[ruleNames]() { - goto l319 + goto l331 } - goto l320 - l319: - position, tokenIndex, depth = position319, tokenIndex319, depth319 + goto l332 + l331: + position, tokenIndex, depth = position331, tokenIndex331, depth331 } - l320: + l332: if buffer[position] != rune('|') { - goto l317 + goto l329 } position++ depth-- - add(ruleParams, position318) + add(ruleParams, position330) } return true - l317: - position, tokenIndex, depth = position317, tokenIndex317, depth317 + l329: + position, tokenIndex, depth = position329, tokenIndex329, depth329 return false }, /* 85 StartParams <- */ func() bool { - position321, tokenIndex321, depth321 := position, tokenIndex, depth + position333, tokenIndex333, depth333 := position, tokenIndex, depth { - position322 := position + position334 := position depth++ if !_rules[ruleAction2]() { - goto l321 + goto l333 } depth-- - add(ruleStartParams, position322) + add(ruleStartParams, position334) } return true - l321: - position, tokenIndex, depth = position321, tokenIndex321, depth321 + l333: + position, tokenIndex, depth = position333, tokenIndex333, depth333 return false }, /* 86 Names <- <(NextName (',' NextName)* DefaultValue? (',' NextName DefaultValue)* VarParams?)> */ func() bool { - position323, tokenIndex323, depth323 := position, tokenIndex, depth + position335, tokenIndex335, depth335 := position, tokenIndex, depth { - position324 := position + position336 := position depth++ if !_rules[ruleNextName]() { - goto l323 + goto l335 } - l325: + l337: { - position326, tokenIndex326, depth326 := position, tokenIndex, depth + position338, tokenIndex338, depth338 := position, tokenIndex, depth if buffer[position] != rune(',') { - goto l326 + goto l338 } position++ if !_rules[ruleNextName]() { - goto l326 + goto l338 } - goto l325 - l326: - position, tokenIndex, depth = position326, tokenIndex326, depth326 + goto l337 + l338: + position, tokenIndex, depth = position338, tokenIndex338, depth338 } { - position327, tokenIndex327, depth327 := position, tokenIndex, depth + position339, tokenIndex339, depth339 := position, tokenIndex, depth if !_rules[ruleDefaultValue]() { - goto l327 + goto l339 } - goto l328 - l327: - position, tokenIndex, depth = position327, tokenIndex327, depth327 + goto l340 + l339: + position, tokenIndex, depth = position339, tokenIndex339, depth339 } - l328: - l329: + l340: + l341: { - position330, tokenIndex330, depth330 := position, tokenIndex, depth + position342, tokenIndex342, depth342 := position, tokenIndex, depth if buffer[position] != rune(',') { - goto l330 + goto l342 } position++ if !_rules[ruleNextName]() { - goto l330 + goto l342 } if !_rules[ruleDefaultValue]() { - goto l330 + goto l342 } - goto l329 - l330: - position, tokenIndex, depth = position330, tokenIndex330, depth330 + goto l341 + l342: + position, tokenIndex, depth = position342, tokenIndex342, depth342 } { - position331, tokenIndex331, depth331 := position, tokenIndex, depth + position343, tokenIndex343, depth343 := position, tokenIndex, depth if !_rules[ruleVarParams]() { - goto l331 + goto l343 } - goto l332 - l331: - position, tokenIndex, depth = position331, tokenIndex331, depth331 + goto l344 + l343: + position, tokenIndex, depth = position343, tokenIndex343, depth343 } - l332: + l344: depth-- - add(ruleNames, position324) + add(ruleNames, position336) } return true - l323: - position, tokenIndex, depth = position323, tokenIndex323, depth323 + l335: + position, tokenIndex, depth = position335, tokenIndex335, depth335 return false }, /* 87 NextName <- <(ws Name ws)> */ func() bool { - position333, tokenIndex333, depth333 := position, tokenIndex, depth + position345, tokenIndex345, depth345 := position, tokenIndex, depth { - position334 := position + position346 := position depth++ if !_rules[rulews]() { - goto l333 + goto l345 } if !_rules[ruleName]() { - goto l333 + goto l345 } if !_rules[rulews]() { - goto l333 + goto l345 } depth-- - add(ruleNextName, position334) + add(ruleNextName, position346) } return true - l333: - position, tokenIndex, depth = position333, tokenIndex333, depth333 + l345: + position, tokenIndex, depth = position345, tokenIndex345, depth345 return false }, /* 88 Name <- <([a-z] / [A-Z] / [0-9] / '_')+> */ func() bool { - position335, tokenIndex335, depth335 := position, tokenIndex, depth + position347, tokenIndex347, depth347 := position, tokenIndex, depth { - position336 := position + position348 := position depth++ { - position339, tokenIndex339, depth339 := position, tokenIndex, depth + position351, tokenIndex351, depth351 := position, tokenIndex, depth if c := buffer[position]; c < rune('a') || c > rune('z') { - goto l340 + goto l352 } position++ - goto l339 - l340: - position, tokenIndex, depth = position339, tokenIndex339, depth339 + goto l351 + l352: + position, tokenIndex, depth = position351, tokenIndex351, depth351 if c := buffer[position]; c < rune('A') || c > rune('Z') { - goto l341 + goto l353 } position++ - goto l339 - l341: - position, tokenIndex, depth = position339, tokenIndex339, depth339 + goto l351 + l353: + position, tokenIndex, depth = position351, tokenIndex351, depth351 if c := buffer[position]; c < rune('0') || c > rune('9') { - goto l342 + goto l354 } position++ - goto l339 - l342: - position, tokenIndex, depth = position339, tokenIndex339, depth339 + goto l351 + l354: + position, tokenIndex, depth = position351, tokenIndex351, depth351 if buffer[position] != rune('_') { - goto l335 + goto l347 } position++ } - l339: - l337: + l351: + l349: { - position338, tokenIndex338, depth338 := position, tokenIndex, depth + position350, tokenIndex350, depth350 := position, tokenIndex, depth { - position343, tokenIndex343, depth343 := position, tokenIndex, depth + position355, tokenIndex355, depth355 := position, tokenIndex, depth if c := buffer[position]; c < rune('a') || c > rune('z') { - goto l344 + goto l356 } position++ - goto l343 - l344: - position, tokenIndex, depth = position343, tokenIndex343, depth343 + goto l355 + l356: + position, tokenIndex, depth = position355, tokenIndex355, depth355 if c := buffer[position]; c < rune('A') || c > rune('Z') { - goto l345 + goto l357 } position++ - goto l343 - l345: - position, tokenIndex, depth = position343, tokenIndex343, depth343 + goto l355 + l357: + position, tokenIndex, depth = position355, tokenIndex355, depth355 if c := buffer[position]; c < rune('0') || c > rune('9') { - goto l346 + goto l358 } position++ - goto l343 - l346: - position, tokenIndex, depth = position343, tokenIndex343, depth343 + goto l355 + l358: + position, tokenIndex, depth = position355, tokenIndex355, depth355 if buffer[position] != rune('_') { - goto l338 + goto l350 } position++ } - l343: - goto l337 - l338: - position, tokenIndex, depth = position338, tokenIndex338, depth338 + l355: + goto l349 + l350: + position, tokenIndex, depth = position350, tokenIndex350, depth350 } depth-- - add(ruleName, position336) + add(ruleName, position348) } return true - l335: - position, tokenIndex, depth = position335, tokenIndex335, depth335 + l347: + position, tokenIndex, depth = position347, tokenIndex347, depth347 return false }, /* 89 DefaultValue <- <('=' Expression)> */ func() bool { - position347, tokenIndex347, depth347 := position, tokenIndex, depth + position359, tokenIndex359, depth359 := position, tokenIndex, depth { - position348 := position + position360 := position depth++ if buffer[position] != rune('=') { - goto l347 + goto l359 } position++ if !_rules[ruleExpression]() { - goto l347 + goto l359 } depth-- - add(ruleDefaultValue, position348) + add(ruleDefaultValue, position360) } return true - l347: - position, tokenIndex, depth = position347, tokenIndex347, depth347 + l359: + position, tokenIndex, depth = position359, tokenIndex359, depth359 return false }, /* 90 VarParams <- <('.' '.' '.' ws)> */ func() bool { - position349, tokenIndex349, depth349 := position, tokenIndex, depth + position361, tokenIndex361, depth361 := position, tokenIndex, depth { - position350 := position + position362 := position depth++ if buffer[position] != rune('.') { - goto l349 + goto l361 } position++ if buffer[position] != rune('.') { - goto l349 + goto l361 } position++ if buffer[position] != rune('.') { - goto l349 + goto l361 } position++ if !_rules[rulews]() { - goto l349 + goto l361 } depth-- - add(ruleVarParams, position350) + add(ruleVarParams, position362) } return true - l349: - position, tokenIndex, depth = position349, tokenIndex349, depth349 + l361: + position, tokenIndex, depth = position361, tokenIndex361, depth361 return false }, /* 91 Reference <- <('.'? Key FollowUpRef)> */ func() bool { - position351, tokenIndex351, depth351 := position, tokenIndex, depth + position363, tokenIndex363, depth363 := position, tokenIndex, depth { - position352 := position + position364 := position depth++ { - position353, tokenIndex353, depth353 := position, tokenIndex, depth + position365, tokenIndex365, depth365 := position, tokenIndex, depth if buffer[position] != rune('.') { - goto l353 + goto l365 } position++ - goto l354 - l353: - position, tokenIndex, depth = position353, tokenIndex353, depth353 + goto l366 + l365: + position, tokenIndex, depth = position365, tokenIndex365, depth365 } - l354: + l366: if !_rules[ruleKey]() { - goto l351 + goto l363 } if !_rules[ruleFollowUpRef]() { - goto l351 + goto l363 } depth-- - add(ruleReference, position352) + add(ruleReference, position364) } return true - l351: - position, tokenIndex, depth = position351, tokenIndex351, depth351 + l363: + position, tokenIndex, depth = position363, tokenIndex363, depth363 return false }, /* 92 FollowUpRef <- */ func() bool { { - position356 := position + position368 := position depth++ - l357: + l369: { - position358, tokenIndex358, depth358 := position, tokenIndex, depth + position370, tokenIndex370, depth370 := position, tokenIndex, depth if !_rules[rulePathComponent]() { - goto l358 + goto l370 } - goto l357 - l358: - position, tokenIndex, depth = position358, tokenIndex358, depth358 + goto l369 + l370: + position, tokenIndex, depth = position370, tokenIndex370, depth370 } depth-- - add(ruleFollowUpRef, position356) + add(ruleFollowUpRef, position368) } return true }, /* 93 PathComponent <- <(('.' Key) / ('.'? Index))> */ func() bool { - position359, tokenIndex359, depth359 := position, tokenIndex, depth + position371, tokenIndex371, depth371 := position, tokenIndex, depth { - position360 := position + position372 := position depth++ { - position361, tokenIndex361, depth361 := position, tokenIndex, depth + position373, tokenIndex373, depth373 := position, tokenIndex, depth if buffer[position] != rune('.') { - goto l362 + goto l374 } position++ if !_rules[ruleKey]() { - goto l362 + goto l374 } - goto l361 - l362: - position, tokenIndex, depth = position361, tokenIndex361, depth361 + goto l373 + l374: + position, tokenIndex, depth = position373, tokenIndex373, depth373 { - position363, tokenIndex363, depth363 := position, tokenIndex, depth + position375, tokenIndex375, depth375 := position, tokenIndex, depth if buffer[position] != rune('.') { - goto l363 + goto l375 } position++ - goto l364 - l363: - position, tokenIndex, depth = position363, tokenIndex363, depth363 + goto l376 + l375: + position, tokenIndex, depth = position375, tokenIndex375, depth375 } - l364: + l376: if !_rules[ruleIndex]() { - goto l359 + goto l371 } } - l361: + l373: depth-- - add(rulePathComponent, position360) + add(rulePathComponent, position372) } return true - l359: - position, tokenIndex, depth = position359, tokenIndex359, depth359 + l371: + position, tokenIndex, depth = position371, tokenIndex371, depth371 return false }, /* 94 Key <- <(([a-z] / [A-Z] / [0-9] / '_') ([a-z] / [A-Z] / [0-9] / '_' / '-')* (':' ([a-z] / [A-Z] / [0-9] / '_') ([a-z] / [A-Z] / [0-9] / '_' / '-')*)?)> */ func() bool { - position365, tokenIndex365, depth365 := position, tokenIndex, depth + position377, tokenIndex377, depth377 := position, tokenIndex, depth { - position366 := position + position378 := position depth++ { - position367, tokenIndex367, depth367 := position, tokenIndex, depth + position379, tokenIndex379, depth379 := position, tokenIndex, depth if c := buffer[position]; c < rune('a') || c > rune('z') { - goto l368 + goto l380 } position++ - goto l367 - l368: - position, tokenIndex, depth = position367, tokenIndex367, depth367 + goto l379 + l380: + position, tokenIndex, depth = position379, tokenIndex379, depth379 if c := buffer[position]; c < rune('A') || c > rune('Z') { - goto l369 + goto l381 } position++ - goto l367 - l369: - position, tokenIndex, depth = position367, tokenIndex367, depth367 + goto l379 + l381: + position, tokenIndex, depth = position379, tokenIndex379, depth379 if c := buffer[position]; c < rune('0') || c > rune('9') { - goto l370 + goto l382 } position++ - goto l367 - l370: - position, tokenIndex, depth = position367, tokenIndex367, depth367 + goto l379 + l382: + position, tokenIndex, depth = position379, tokenIndex379, depth379 if buffer[position] != rune('_') { - goto l365 + goto l377 } position++ } - l367: - l371: + l379: + l383: { - position372, tokenIndex372, depth372 := position, tokenIndex, depth + position384, tokenIndex384, depth384 := position, tokenIndex, depth { - position373, tokenIndex373, depth373 := position, tokenIndex, depth + position385, tokenIndex385, depth385 := position, tokenIndex, depth if c := buffer[position]; c < rune('a') || c > rune('z') { - goto l374 + goto l386 } position++ - goto l373 - l374: - position, tokenIndex, depth = position373, tokenIndex373, depth373 + goto l385 + l386: + position, tokenIndex, depth = position385, tokenIndex385, depth385 if c := buffer[position]; c < rune('A') || c > rune('Z') { - goto l375 + goto l387 } position++ - goto l373 - l375: - position, tokenIndex, depth = position373, tokenIndex373, depth373 + goto l385 + l387: + position, tokenIndex, depth = position385, tokenIndex385, depth385 if c := buffer[position]; c < rune('0') || c > rune('9') { - goto l376 + goto l388 } position++ - goto l373 - l376: - position, tokenIndex, depth = position373, tokenIndex373, depth373 + goto l385 + l388: + position, tokenIndex, depth = position385, tokenIndex385, depth385 if buffer[position] != rune('_') { - goto l377 + goto l389 } position++ - goto l373 - l377: - position, tokenIndex, depth = position373, tokenIndex373, depth373 + goto l385 + l389: + position, tokenIndex, depth = position385, tokenIndex385, depth385 if buffer[position] != rune('-') { - goto l372 + goto l384 } position++ } - l373: - goto l371 - l372: - position, tokenIndex, depth = position372, tokenIndex372, depth372 + l385: + goto l383 + l384: + position, tokenIndex, depth = position384, tokenIndex384, depth384 } { - position378, tokenIndex378, depth378 := position, tokenIndex, depth + position390, tokenIndex390, depth390 := position, tokenIndex, depth if buffer[position] != rune(':') { - goto l378 + goto l390 } position++ { - position380, tokenIndex380, depth380 := position, tokenIndex, depth + position392, tokenIndex392, depth392 := position, tokenIndex, depth if c := buffer[position]; c < rune('a') || c > rune('z') { - goto l381 + goto l393 } position++ - goto l380 - l381: - position, tokenIndex, depth = position380, tokenIndex380, depth380 + goto l392 + l393: + position, tokenIndex, depth = position392, tokenIndex392, depth392 if c := buffer[position]; c < rune('A') || c > rune('Z') { - goto l382 + goto l394 } position++ - goto l380 - l382: - position, tokenIndex, depth = position380, tokenIndex380, depth380 + goto l392 + l394: + position, tokenIndex, depth = position392, tokenIndex392, depth392 if c := buffer[position]; c < rune('0') || c > rune('9') { - goto l383 + goto l395 } position++ - goto l380 - l383: - position, tokenIndex, depth = position380, tokenIndex380, depth380 + goto l392 + l395: + position, tokenIndex, depth = position392, tokenIndex392, depth392 if buffer[position] != rune('_') { - goto l378 + goto l390 } position++ } - l380: - l384: + l392: + l396: { - position385, tokenIndex385, depth385 := position, tokenIndex, depth + position397, tokenIndex397, depth397 := position, tokenIndex, depth { - position386, tokenIndex386, depth386 := position, tokenIndex, depth + position398, tokenIndex398, depth398 := position, tokenIndex, depth if c := buffer[position]; c < rune('a') || c > rune('z') { - goto l387 + goto l399 } position++ - goto l386 - l387: - position, tokenIndex, depth = position386, tokenIndex386, depth386 + goto l398 + l399: + position, tokenIndex, depth = position398, tokenIndex398, depth398 if c := buffer[position]; c < rune('A') || c > rune('Z') { - goto l388 + goto l400 } position++ - goto l386 - l388: - position, tokenIndex, depth = position386, tokenIndex386, depth386 + goto l398 + l400: + position, tokenIndex, depth = position398, tokenIndex398, depth398 if c := buffer[position]; c < rune('0') || c > rune('9') { - goto l389 + goto l401 } position++ - goto l386 - l389: - position, tokenIndex, depth = position386, tokenIndex386, depth386 + goto l398 + l401: + position, tokenIndex, depth = position398, tokenIndex398, depth398 if buffer[position] != rune('_') { - goto l390 + goto l402 } position++ - goto l386 - l390: - position, tokenIndex, depth = position386, tokenIndex386, depth386 + goto l398 + l402: + position, tokenIndex, depth = position398, tokenIndex398, depth398 if buffer[position] != rune('-') { - goto l385 + goto l397 } position++ } - l386: - goto l384 - l385: - position, tokenIndex, depth = position385, tokenIndex385, depth385 + l398: + goto l396 + l397: + position, tokenIndex, depth = position397, tokenIndex397, depth397 } - goto l379 - l378: - position, tokenIndex, depth = position378, tokenIndex378, depth378 + goto l391 + l390: + position, tokenIndex, depth = position390, tokenIndex390, depth390 } - l379: + l391: depth-- - add(ruleKey, position366) + add(ruleKey, position378) } return true - l365: - position, tokenIndex, depth = position365, tokenIndex365, depth365 + l377: + position, tokenIndex, depth = position377, tokenIndex377, depth377 return false }, /* 95 Index <- <('[' '-'? [0-9]+ ']')> */ func() bool { - position391, tokenIndex391, depth391 := position, tokenIndex, depth + position403, tokenIndex403, depth403 := position, tokenIndex, depth { - position392 := position + position404 := position depth++ if buffer[position] != rune('[') { - goto l391 + goto l403 } position++ { - position393, tokenIndex393, depth393 := position, tokenIndex, depth + position405, tokenIndex405, depth405 := position, tokenIndex, depth if buffer[position] != rune('-') { - goto l393 + goto l405 } position++ - goto l394 - l393: - position, tokenIndex, depth = position393, tokenIndex393, depth393 + goto l406 + l405: + position, tokenIndex, depth = position405, tokenIndex405, depth405 } - l394: + l406: if c := buffer[position]; c < rune('0') || c > rune('9') { - goto l391 + goto l403 } position++ - l395: + l407: { - position396, tokenIndex396, depth396 := position, tokenIndex, depth + position408, tokenIndex408, depth408 := position, tokenIndex, depth if c := buffer[position]; c < rune('0') || c > rune('9') { - goto l396 + goto l408 } position++ - goto l395 - l396: - position, tokenIndex, depth = position396, tokenIndex396, depth396 + goto l407 + l408: + position, tokenIndex, depth = position408, tokenIndex408, depth408 } if buffer[position] != rune(']') { - goto l391 + goto l403 } position++ depth-- - add(ruleIndex, position392) + add(ruleIndex, position404) } return true - l391: - position, tokenIndex, depth = position391, tokenIndex391, depth391 + l403: + position, tokenIndex, depth = position403, tokenIndex403, depth403 return false }, /* 96 IP <- <([0-9]+ '.' [0-9]+ '.' [0-9]+ '.' [0-9]+)> */ func() bool { - position397, tokenIndex397, depth397 := position, tokenIndex, depth + position409, tokenIndex409, depth409 := position, tokenIndex, depth { - position398 := position + position410 := position depth++ if c := buffer[position]; c < rune('0') || c > rune('9') { - goto l397 + goto l409 } position++ - l399: + l411: { - position400, tokenIndex400, depth400 := position, tokenIndex, depth + position412, tokenIndex412, depth412 := position, tokenIndex, depth if c := buffer[position]; c < rune('0') || c > rune('9') { - goto l400 + goto l412 } position++ - goto l399 - l400: - position, tokenIndex, depth = position400, tokenIndex400, depth400 + goto l411 + l412: + position, tokenIndex, depth = position412, tokenIndex412, depth412 } if buffer[position] != rune('.') { - goto l397 + goto l409 } position++ if c := buffer[position]; c < rune('0') || c > rune('9') { - goto l397 + goto l409 } position++ - l401: + l413: { - position402, tokenIndex402, depth402 := position, tokenIndex, depth + position414, tokenIndex414, depth414 := position, tokenIndex, depth if c := buffer[position]; c < rune('0') || c > rune('9') { - goto l402 + goto l414 } position++ - goto l401 - l402: - position, tokenIndex, depth = position402, tokenIndex402, depth402 + goto l413 + l414: + position, tokenIndex, depth = position414, tokenIndex414, depth414 } if buffer[position] != rune('.') { - goto l397 + goto l409 } position++ if c := buffer[position]; c < rune('0') || c > rune('9') { - goto l397 + goto l409 } position++ - l403: + l415: { - position404, tokenIndex404, depth404 := position, tokenIndex, depth + position416, tokenIndex416, depth416 := position, tokenIndex, depth if c := buffer[position]; c < rune('0') || c > rune('9') { - goto l404 + goto l416 } position++ - goto l403 - l404: - position, tokenIndex, depth = position404, tokenIndex404, depth404 + goto l415 + l416: + position, tokenIndex, depth = position416, tokenIndex416, depth416 } if buffer[position] != rune('.') { - goto l397 + goto l409 } position++ if c := buffer[position]; c < rune('0') || c > rune('9') { - goto l397 + goto l409 } position++ - l405: + l417: { - position406, tokenIndex406, depth406 := position, tokenIndex, depth + position418, tokenIndex418, depth418 := position, tokenIndex, depth if c := buffer[position]; c < rune('0') || c > rune('9') { - goto l406 + goto l418 } position++ - goto l405 - l406: - position, tokenIndex, depth = position406, tokenIndex406, depth406 + goto l417 + l418: + position, tokenIndex, depth = position418, tokenIndex418, depth418 } depth-- - add(ruleIP, position398) + add(ruleIP, position410) } return true - l397: - position, tokenIndex, depth = position397, tokenIndex397, depth397 + l409: + position, tokenIndex, depth = position409, tokenIndex409, depth409 return false }, /* 97 ws <- <(' ' / '\t' / '\n' / '\r')*> */ func() bool { { - position408 := position + position420 := position depth++ - l409: + l421: { - position410, tokenIndex410, depth410 := position, tokenIndex, depth + position422, tokenIndex422, depth422 := position, tokenIndex, depth { - position411, tokenIndex411, depth411 := position, tokenIndex, depth + position423, tokenIndex423, depth423 := position, tokenIndex, depth if buffer[position] != rune(' ') { - goto l412 + goto l424 } position++ - goto l411 - l412: - position, tokenIndex, depth = position411, tokenIndex411, depth411 + goto l423 + l424: + position, tokenIndex, depth = position423, tokenIndex423, depth423 if buffer[position] != rune('\t') { - goto l413 + goto l425 } position++ - goto l411 - l413: - position, tokenIndex, depth = position411, tokenIndex411, depth411 + goto l423 + l425: + position, tokenIndex, depth = position423, tokenIndex423, depth423 if buffer[position] != rune('\n') { - goto l414 + goto l426 } position++ - goto l411 - l414: - position, tokenIndex, depth = position411, tokenIndex411, depth411 + goto l423 + l426: + position, tokenIndex, depth = position423, tokenIndex423, depth423 if buffer[position] != rune('\r') { - goto l410 + goto l422 } position++ } - l411: - goto l409 - l410: - position, tokenIndex, depth = position410, tokenIndex410, depth410 + l423: + goto l421 + l422: + position, tokenIndex, depth = position422, tokenIndex422, depth422 } depth-- - add(rulews, position408) + add(rulews, position420) } return true }, /* 98 req_ws <- <(' ' / '\t' / '\n' / '\r')+> */ func() bool { - position415, tokenIndex415, depth415 := position, tokenIndex, depth + position427, tokenIndex427, depth427 := position, tokenIndex, depth { - position416 := position + position428 := position depth++ { - position419, tokenIndex419, depth419 := position, tokenIndex, depth + position431, tokenIndex431, depth431 := position, tokenIndex, depth if buffer[position] != rune(' ') { - goto l420 + goto l432 } position++ - goto l419 - l420: - position, tokenIndex, depth = position419, tokenIndex419, depth419 + goto l431 + l432: + position, tokenIndex, depth = position431, tokenIndex431, depth431 if buffer[position] != rune('\t') { - goto l421 + goto l433 } position++ - goto l419 - l421: - position, tokenIndex, depth = position419, tokenIndex419, depth419 + goto l431 + l433: + position, tokenIndex, depth = position431, tokenIndex431, depth431 if buffer[position] != rune('\n') { - goto l422 + goto l434 } position++ - goto l419 - l422: - position, tokenIndex, depth = position419, tokenIndex419, depth419 + goto l431 + l434: + position, tokenIndex, depth = position431, tokenIndex431, depth431 if buffer[position] != rune('\r') { - goto l415 + goto l427 } position++ } - l419: - l417: + l431: + l429: { - position418, tokenIndex418, depth418 := position, tokenIndex, depth + position430, tokenIndex430, depth430 := position, tokenIndex, depth { - position423, tokenIndex423, depth423 := position, tokenIndex, depth + position435, tokenIndex435, depth435 := position, tokenIndex, depth if buffer[position] != rune(' ') { - goto l424 + goto l436 } position++ - goto l423 - l424: - position, tokenIndex, depth = position423, tokenIndex423, depth423 + goto l435 + l436: + position, tokenIndex, depth = position435, tokenIndex435, depth435 if buffer[position] != rune('\t') { - goto l425 + goto l437 } position++ - goto l423 - l425: - position, tokenIndex, depth = position423, tokenIndex423, depth423 + goto l435 + l437: + position, tokenIndex, depth = position435, tokenIndex435, depth435 if buffer[position] != rune('\n') { - goto l426 + goto l438 } position++ - goto l423 - l426: - position, tokenIndex, depth = position423, tokenIndex423, depth423 + goto l435 + l438: + position, tokenIndex, depth = position435, tokenIndex435, depth435 if buffer[position] != rune('\r') { - goto l418 + goto l430 } position++ } - l423: - goto l417 - l418: - position, tokenIndex, depth = position418, tokenIndex418, depth418 + l435: + goto l429 + l430: + position, tokenIndex, depth = position430, tokenIndex430, depth430 } depth-- - add(rulereq_ws, position416) + add(rulereq_ws, position428) } return true - l415: - position, tokenIndex, depth = position415, tokenIndex415, depth415 + l427: + position, tokenIndex, depth = position427, tokenIndex427, depth427 return false }, /* 100 Action0 <- <{}> */ diff --git a/dynaml/expression.go b/dynaml/expression.go index 98fc6bb..6d8a074 100644 --- a/dynaml/expression.go +++ b/dynaml/expression.go @@ -1,6 +1,8 @@ package dynaml import ( + "github.com/mandelsoft/vfs/pkg/vfs" + "github.com/mandelsoft/spiff/yaml" ) @@ -19,6 +21,9 @@ type State interface { GetFileContent(file string, cached bool) ([]byte, error) GetEncryptionKey() string OSAccessAllowed() bool + FileAccessAllowed() bool + FileSystem() vfs.VFS + GetFunctions() Registry } type Binding interface { diff --git a/dynaml/files.go b/dynaml/files.go index 5ab9061..737b3cb 100644 --- a/dynaml/files.go +++ b/dynaml/files.go @@ -2,12 +2,15 @@ package dynaml import ( "github.com/mandelsoft/spiff/yaml" - "io/ioutil" ) func func_listFiles(directory bool, arguments []interface{}, binding Binding) (interface{}, EvaluationInfo, bool) { info := DefaultInfo() + if !binding.GetState().FileAccessAllowed() { + return info.DenyOSOperation("listFiles") + } + if len(arguments) != 1 { return info.Error("list requires exactly one arguments") } @@ -21,11 +24,11 @@ func func_listFiles(directory bool, arguments []interface{}, binding Binding) (i return info.Error("list: argument is empty string") } - if !checkExistence(name, true) { + if !checkExistence(binding, name, true) { return info.Error("list: %q is no directory or does not exist", name) } - files, err := ioutil.ReadDir(name) + files, err := binding.GetState().FileSystem().ReadDir(name) if err != nil { return info.Error("list: %q: error reading directory", name, err) } diff --git a/dynaml/float.go b/dynaml/float.go new file mode 100644 index 0000000..5a9b85b --- /dev/null +++ b/dynaml/float.go @@ -0,0 +1,17 @@ +package dynaml + +import ( + "strconv" +) + +type FloatExpr struct { + Value float64 +} + +func (e FloatExpr) Evaluate(binding Binding, locally bool) (interface{}, EvaluationInfo, bool) { + return e.Value, DefaultInfo(), true +} + +func (e FloatExpr) String() string { + return strconv.FormatFloat(e.Value, 'g', -1, 64) +} diff --git a/dynaml/lookup.go b/dynaml/lookup.go index 92cb662..0864cca 100644 --- a/dynaml/lookup.go +++ b/dynaml/lookup.go @@ -1,14 +1,19 @@ package dynaml import ( + "github.com/mandelsoft/vfs/pkg/vfs" + "github.com/mandelsoft/spiff/yaml" - "os" "path/filepath" ) func func_lookup(directory bool, arguments []interface{}, binding Binding) (interface{}, EvaluationInfo, bool) { info := DefaultInfo() + if !binding.GetState().FileAccessAllowed() { + return info.DenyOSOperation("lookup") + } + paths := []string{} switch len(arguments) { @@ -48,7 +53,7 @@ func func_lookup(directory bool, arguments []interface{}, binding Binding) (inte result := []yaml.Node{} if filepath.IsAbs(name) { - if checkExistence(name, directory) { + if checkExistence(binding, name, directory) { result = append(result, NewNode(name, binding)) } return result, info, true @@ -57,7 +62,7 @@ func func_lookup(directory bool, arguments []interface{}, binding Binding) (inte for _, d := range paths { if d != "" { p := d + "/" + name - if checkExistence(p, directory) { + if checkExistence(binding, p, directory) { result = append(result, NewNode(p, binding)) } } @@ -65,9 +70,12 @@ func func_lookup(directory bool, arguments []interface{}, binding Binding) (inte return result, info, true } -func checkExistence(path string, directory bool) bool { - s, err := os.Stat(path) - if os.IsNotExist(err) || err != nil { +func checkExistence(binding Binding, path string, directory bool) bool { + if !binding.GetState().FileAccessAllowed() { + return false + } + s, err := binding.GetState().FileSystem().Stat(path) + if vfs.IsErrNotExist(err) || err != nil { return false } return s.IsDir() == directory diff --git a/dynaml/mkdir.go b/dynaml/mkdir.go index 7528571..2be9aab 100644 --- a/dynaml/mkdir.go +++ b/dynaml/mkdir.go @@ -15,6 +15,10 @@ func func_mkdir(arguments []interface{}, binding Binding) (interface{}, Evaluati var err error info := DefaultInfo() + if !binding.GetState().FileAccessAllowed() { + return info.DenyOSOperation("mkdir") + } + if len(arguments) < 1 || len(arguments) > 2 { return info.Error("mkdir requires one or two arguments") } @@ -55,7 +59,7 @@ func func_mkdir(arguments []interface{}, binding Binding) (interface{}, Evaluati } } - err = os.MkdirAll(path, os.FileMode(permissions)) + err = binding.GetState().FileSystem().MkdirAll(path, os.FileMode(permissions)) if err != nil { return info.Error("cannot create directory %q: %s", path, err) } diff --git a/dynaml/multiplication.go b/dynaml/multiplication.go index 8cc4bb6..d7f6d7a 100644 --- a/dynaml/multiplication.go +++ b/dynaml/multiplication.go @@ -18,7 +18,7 @@ func (e MultiplicationExpr) Evaluate(binding Binding, locally bool) (interface{} return nil, info, false } - bint, info, ok := ResolveIntegerExpressionOrPushEvaluation(&e.B, &resolved, &info, binding, false) + b, info, ok := ResolveExpressionOrPushEvaluation(&e.B, &resolved, &info, binding, false) if !ok { return nil, info, false } @@ -27,23 +27,32 @@ func (e MultiplicationExpr) Evaluate(binding Binding, locally bool) (interface{} return e, info, true } - aint, ok := a.(int64) - if ok { - return aint * bint, info, true - } - str, ok := a.(string) if ok { ip, cidr, err := net.ParseCIDR(str) if err != nil { - return info.Error("CIDR or int argument required for multiplication: %s", err) + return info.Error("first argument of multiplication must be CIDR or number: %s", err) } ones, _ := cidr.Mask.Size() size := int64(1 << (32 - uint32(ones))) + + bint, ok := b.(int64) + if !ok { + return info.Error("CIDR multiplication requires an integer argument") + } + ip = IPAdd(ip.Mask(cidr.Mask), size*bint) return (&net.IPNet{ip, cidr.Mask}).String(), info, true } - return info.Error("CIDR or int argument required as first argument for multiplication") + + a, b, err := NumberOperands(a, b) + if err != nil { + return info.Error("non-CIDR multiplication requires number arguments") + } + if _, ok := a.(int64); ok { + return a.(int64) * b.(int64), info, true + } + return a.(float64) * b.(float64), info, true } func (e MultiplicationExpr) String() string { diff --git a/dynaml/multiplication_test.go b/dynaml/multiplication_test.go index 3deb317..a86208c 100644 --- a/dynaml/multiplication_test.go +++ b/dynaml/multiplication_test.go @@ -47,4 +47,32 @@ var _ = Describe("multiplication", func() { Expect(expr).To(EvaluateAs("10.1.5.0/24", FakeBinding{})) }) }) + + Context("multiplaction", func() { + + It("multiplies floats", func() { + expr := MultiplicationExpr{ + FloatExpr{1.2}, + FloatExpr{2.2}, + } + + Expect(expr).To(EvaluateAs(2.64, FakeBinding{})) + }) + It("multiplies ints and floats", func() { + expr := MultiplicationExpr{ + IntegerExpr{2}, + FloatExpr{2.3}, + } + + Expect(expr).To(EvaluateAs(4.6, FakeBinding{})) + }) + It("multiplies floats and ints", func() { + expr := MultiplicationExpr{ + FloatExpr{2.3}, + IntegerExpr{2}, + } + + Expect(expr).To(EvaluateAs(4.6, FakeBinding{})) + }) + }) }) diff --git a/dynaml/parser.go b/dynaml/parser.go index 6d26b0e..f41d8f6 100644 --- a/dynaml/parser.go +++ b/dynaml/parser.go @@ -249,13 +249,22 @@ func buildExpression(grammar *DynamlGrammar, path []string, stubPath []string) ( expr := tokens.Pop() tokens.Push(ProjectionExpr{expr, value.(ProjectionValueExpr).Value, qual}) - case ruleInteger: - val, err := strconv.ParseInt(contents, 10, 64) - if err != nil { - panic(err) + case ruleNumber: + contents = strings.ReplaceAll(contents, "_", "") + if strings.ContainsAny(contents, ".eE") { + val, err := strconv.ParseFloat(contents, 64) + if err != nil { + panic(err) + } + tokens.Push(FloatExpr{val}) + } else { + val, err := strconv.ParseInt(contents, 10, 64) + if err != nil { + panic(err) + } + tokens.Push(IntegerExpr{val}) } - tokens.Push(IntegerExpr{val}) case ruleNil: tokens.Push(NilExpr{}) case ruleUndefined: diff --git a/dynaml/read.go b/dynaml/read.go index a892c66..a42887a 100644 --- a/dynaml/read.go +++ b/dynaml/read.go @@ -17,7 +17,7 @@ func func_read(cached bool, arguments []interface{}, binding Binding) (interface if len(arguments) > 2 { return info.Error("read takes a maximum of two arguments") } - if !binding.GetState().OSAccessAllowed() { + if !binding.GetState().FileAccessAllowed() { return info.DenyOSOperation("read") } diff --git a/dynaml/subtraction.go b/dynaml/subtraction.go index ae4541f..4043aba 100644 --- a/dynaml/subtraction.go +++ b/dynaml/subtraction.go @@ -27,19 +27,11 @@ func (e SubtractionExpr) Evaluate(binding Binding, locally bool) (interface{}, E return e, info, true } - aint, ok := a.(int64) - bint, bok := b.(int64) - if ok { - if !bok { - return info.Error("integer operand required") - } - return aint - bint, info, true - } - str, ok := a.(string) if ok { ip := net.ParseIP(str) if ip != nil { + bint, bok := b.(int64) if bok { return IPAdd(ip, -bint).String(), info, true } @@ -52,13 +44,21 @@ func (e SubtractionExpr) Evaluate(binding Binding, locally bool) (interface{}, E } return DiffIP(ip, ipb), info, true } - return info.Error("string argument for MINUS must be an IP address") + return info.Error("second argument of IP address subtraction must be IP address or integer") } - return info.Error("second argument of MINUS must be IP address or integer") + return info.Error("second argument of IP address subtraction must be IP address or integer") } return info.Error("string argument for MINUS must be an IP address") } - return info.Error("first argument of MINUS must be IP address or integer") + + a, b, err := NumberOperands(a, b) + if err != nil { + return info.Error("non-IP address subtration requires number arguments") + } + if _, ok := a.(int64); ok { + return a.(int64) - b.(int64), info, true + } + return a.(float64) - b.(float64), info, true } func (e SubtractionExpr) String() string { diff --git a/dynaml/subtraction_test.go b/dynaml/subtraction_test.go index 48d28c8..cb062e4 100644 --- a/dynaml/subtraction_test.go +++ b/dynaml/subtraction_test.go @@ -56,4 +56,31 @@ var _ = Describe("subtraction", func() { Expect(expr).To(EvaluateAs("10.10.9.9", FakeBinding{})) }) }) + + Context("floats", func() { + It("subtracts floats", func() { + expr := SubtractionExpr{ + FloatExpr{1.25}, + FloatExpr{2.125}, + } + + Expect(expr).To(EvaluateAs(-0.875, FakeBinding{})) + }) + It("subtracts ints and floats", func() { + expr := SubtractionExpr{ + IntegerExpr{1}, + FloatExpr{2.25}, + } + + Expect(expr).To(EvaluateAs(-1.25, FakeBinding{})) + }) + It("subtracts floats and ints", func() { + expr := SubtractionExpr{ + FloatExpr{2.25}, + IntegerExpr{1}, + } + + Expect(expr).To(EvaluateAs(1.25, FakeBinding{})) + }) + }) }) diff --git a/dynaml/tempfile.go b/dynaml/tempfile.go index f9636a3..eca0683 100644 --- a/dynaml/tempfile.go +++ b/dynaml/tempfile.go @@ -1,7 +1,6 @@ package dynaml import ( - "io/ioutil" "os" ) @@ -10,6 +9,10 @@ func func_tempfile(arguments []interface{}, binding Binding) (interface{}, Evalu info := DefaultInfo() + if !binding.GetState().FileAccessAllowed() { + return info.DenyOSOperation("tempfile") + } + if len(arguments) < 1 || len(arguments) > 2 { return info.Error("temp_file requires exactly one or two arguments") } @@ -34,7 +37,7 @@ func func_tempfile(arguments []interface{}, binding Binding) (interface{}, Evalu return info.Error("cannot create temporary file: %s", err) } - err = ioutil.WriteFile(name, []byte(data), os.FileMode(permissions)) + err = binding.GetState().FileSystem().WriteFile(name, []byte(data), os.FileMode(permissions)) if err != nil { return info.Error("cannot write file: %s", err) } diff --git a/dynaml/write.go b/dynaml/write.go index aca98e5..8f5d939 100644 --- a/dynaml/write.go +++ b/dynaml/write.go @@ -3,7 +3,6 @@ package dynaml import ( "encoding/base64" "fmt" - "io/ioutil" "os" "strconv" "strings" @@ -16,7 +15,7 @@ func func_write(arguments []interface{}, binding Binding) (interface{}, Evaluati if len(arguments) < 2 || len(arguments) > 3 { return info.Error("write requires two or three arguments") } - if !binding.GetState().OSAccessAllowed() { + if !binding.GetState().FileAccessAllowed() { return info.DenyOSOperation("write") } file, _, ok := getArg(0, arguments[0], false) @@ -42,7 +41,7 @@ func func_write(arguments []interface{}, binding Binding) (interface{}, Evaluati file, raw, data, _ := getData(file, binary, 1, arguments[1], true) - err = ioutil.WriteFile(file, data, os.FileMode(permissions)) + err = binding.GetState().FileSystem().WriteFile(file, data, os.FileMode(permissions)) if err != nil { return info.Error("cannot write file: %s", err) } diff --git a/examples/spifflib/lib-example.go b/examples/spifflib/lib-example.go new file mode 100644 index 0000000..172309b --- /dev/null +++ b/examples/spifflib/lib-example.go @@ -0,0 +1,86 @@ +package main + +import ( + "fmt" + "math" + "os" + + "github.com/mandelsoft/spiff/dynaml" + "github.com/mandelsoft/spiff/spiffing" +) + +func func_pow(arguments []interface{}, binding dynaml.Binding) (interface{}, dynaml.EvaluationInfo, bool) { + info := dynaml.DefaultInfo() + + if len(arguments) != 2 { + return info.Error("pow takes 2 arguments") + } + + args, wasInt, err := dynaml.NumberOperandsN(dynaml.TYPE_FLOAT, arguments...) + + if err != nil { + return info.Error("%s", err) + } + r := math.Pow(args[0].(float64), args[1].(float64)) + if wasInt && float64(int64(r)) == r { + return int64(r), info, true + } + return r, info, true +} + +var state = ` +state: {} +` +var stub = ` +unused: (( input )) +ages: + alice: (( pow(2,5) )) + bob: (( alice + 1 )) +` + +var template = ` +state: + <<<: (( &state )) + random: (( rand("[:alnum:]", 10) )) +ages: (( &temporary )) + +example: + name: (( input )) # direct reference to additional values + sum: (( sum[ages|0|s,k,v|->s + v] )) + int: (( pow(2,4) )) + pow: (( pow(1.1e-1,2) )) +` + +func Error(err error) { + if err != nil { + fmt.Fprintf(os.Stderr, "Error: %s\n", err) + os.Exit(1) + } +} + +func main() { + values := map[string]interface{}{} + values["input"] = "this is an input" + + functions := spiffing.NewFunctions() + functions.RegisterFunction("pow", func_pow) + + spiff, err := spiffing.New().WithFunctions(functions).WithValues(values) + Error(err) + pstate, err := spiff.Unmarshal("state", []byte(state)) + Error(err) + pstub, err := spiff.Unmarshal("stub", []byte(stub)) + Error(err) + ptempl, err := spiff.Unmarshal("template", []byte(template)) + Error(err) + result, err := spiff.Cascade(ptempl, []spiffing.Node{pstub}, pstate) + Error(err) + b, err := spiff.Marshal(result) + Error(err) + newstate, err := spiff.Marshal(spiff.DetermineState(result)) + Error(err) + fmt.Printf("==== new state ===\n") + fmt.Printf("%s\n", string(newstate)) + fmt.Printf("==== result ===\n") + fmt.Printf("%s\n", string(b)) +} diff --git a/flow/cascade.go b/flow/cascade.go index 9d4daaa..0eb2bed 100644 --- a/flow/cascade.go +++ b/flow/cascade.go @@ -5,10 +5,14 @@ import ( "github.com/mandelsoft/spiff/yaml" ) +// Options bundles the options for processing yaml templates type Options struct { - PreserveEscapes bool - PreserveTemporaray bool - Partial bool + // PreserveEscapes prevents escaped dynaml expressions to be unescaped for the final output + PreserveEscapes bool + // PreserveTemporary will keep temporary elements in the final output + PreserveTemporary bool + // Partial will not treat unevaluated dynaml expressions as error, but keep it in the output. + Partial bool } func PrepareStubs(outer dynaml.Binding, partial bool, stubs ...yaml.Node) ([]yaml.Node, error) { @@ -26,7 +30,7 @@ func PrepareStubs(outer dynaml.Binding, partial bool, stubs ...yaml.Node) ([]yam func Apply(outer dynaml.Binding, template yaml.Node, prepared []yaml.Node, opts Options) (yaml.Node, error) { result, err := NestedFlow(outer, template, prepared...) if err == nil { - if !opts.PreserveTemporaray { + if !opts.PreserveTemporary { result = Cleanup(result, discardTemporary) } if !opts.PreserveEscapes { @@ -102,3 +106,7 @@ func Cleanup(node yaml.Node, test CleanupFunction) yaml.Node { } return yaml.ReplaceValue(value, node) } + +func DetermineState(node yaml.Node) yaml.Node { + return Cleanup(node, DiscardNonState) +} diff --git a/flow/environment.go b/flow/environment.go index 487196f..3607ac0 100644 --- a/flow/environment.go +++ b/flow/environment.go @@ -278,12 +278,19 @@ func (e DefaultEnvironment) Cascade(outer dynaml.Binding, template yaml.Node, pa return Cascade(outer, template, Options{Partial: partial}, templates...) } -func NewEnvironment(stubs []yaml.Node, source string) dynaml.Binding { - return NewNestedEnvironment(stubs, source, nil) +func NewEnvironment(stubs []yaml.Node, source string, optstate ...*State) dynaml.Binding { + var state *State + if len(optstate) > 0 { + state = optstate[0] + } + if state == nil { + state = NewState(os.Getenv("SPIFF_ENCRYPTION_KEY"), MODE_OS_ACCESS|MODE_FILE_ACCESS) + } + return DefaultEnvironment{state: state, stubs: stubs, sourceName: source, currentSourceName: source, outer: nil, active: true} } func NewProcessLocalEnvironment(stubs []yaml.Node, source string) dynaml.Binding { - state := NewState(os.Getenv("SPIFF_ENCRYPTION_KEY"), false) + state := NewState(os.Getenv("SPIFF_ENCRYPTION_KEY"), 0) return DefaultEnvironment{state: state, stubs: stubs, sourceName: source, currentSourceName: source, outer: nil, active: true} } @@ -297,7 +304,7 @@ func CleanupEnvironment(binding dynaml.Binding) { func NewNestedEnvironment(stubs []yaml.Node, source string, outer dynaml.Binding) dynaml.Binding { var state *State if outer == nil { - state = NewState(os.Getenv("SPIFF_ENCRYPTION_KEY"), true) + state = NewState(os.Getenv("SPIFF_ENCRYPTION_KEY"), MODE_OS_ACCESS|MODE_FILE_ACCESS) } return DefaultEnvironment{state: state, stubs: stubs, sourceName: source, currentSourceName: source, outer: outer, active: true} } diff --git a/flow/error_check_test.go b/flow/error_check_test.go index 8b4f4d6..a8923de 100644 --- a/flow/error_check_test.go +++ b/flow/error_check_test.go @@ -24,7 +24,7 @@ a: true node: (( a + 1 )) `) Expect(source).To(FlowToErr( - ` (( a + 1 )) in test node () *first argument of PLUS must be IP address or integer`, + ` (( a + 1 )) in test node () *non-IP address addition requires number arguments`, )) }) @@ -35,7 +35,7 @@ a: true node: (( a - 1 )) `) Expect(source).To(FlowToErr( - ` (( a - 1 )) in test node () *first argument of MINUS must be IP address or integer`, + ` (( a - 1 )) in test node () *non-IP address subtration requires number arguments`, )) }) @@ -57,7 +57,7 @@ a: 1 node: (( a / true )) `) Expect(source).To(FlowToErr( - ` (( a / true )) in test node () *integer operand required`, + ` (( a / true )) in test node () *non-CIDR division requires number arguments`, )) }) diff --git a/flow/state.go b/flow/state.go index 22c6826..86a9508 100644 --- a/flow/state.go +++ b/flow/state.go @@ -4,27 +4,70 @@ import ( "crypto/sha512" "encoding/base64" "fmt" - "github.com/mandelsoft/spiff/debug" "io/ioutil" "net/http" - "os" "path" "strings" + + "github.com/mandelsoft/vfs/pkg/osfs" + "github.com/mandelsoft/vfs/pkg/vfs" + + "github.com/mandelsoft/spiff/debug" + "github.com/mandelsoft/spiff/dynaml" ) +const MODE_FILE_ACCESS = 1 // support file system access +const MODE_OS_ACCESS = 2 // support os commands like pipe and exec + type State struct { - files map[string]string // content hash to temp file name - fileCache map[string][]byte // file content cache - key string // default encryption key - osaccess bool // allow OS access + files map[string]string // content hash to temp file name + fileCache map[string][]byte // file content cache + key string // default encryption key + mode int + fileSystem vfs.VFS // virtual filesystem to use for filesystem based operations + functions dynaml.Registry +} + +var _ dynaml.State = &State{} + +func NewState(key string, mode int, optfs ...vfs.FileSystem) *State { + var fs vfs.FileSystem + if len(optfs) > 0 { + fs = optfs[0] + } + if fs == nil { + fs = osfs.New() + } else { + mode = mode & ^MODE_OS_ACCESS + } + return &State{ + files: map[string]string{}, + fileCache: map[string][]byte{}, + key: key, + mode: mode, + fileSystem: vfs.New(fs), + } } -func NewState(key string, osaccess bool) *State { - return &State{map[string]string{}, map[string][]byte{}, key, osaccess} +func (s *State) SetFunctions(f dynaml.Registry) *State { + s.functions = f + return s } func (s *State) OSAccessAllowed() bool { - return s.osaccess + return s.mode&MODE_OS_ACCESS != 0 +} + +func (s *State) FileAccessAllowed() bool { + return s.mode&MODE_FILE_ACCESS != 0 +} + +func (s *State) FileSystem() vfs.VFS { + return s.fileSystem +} + +func (s *State) GetFunctions() dynaml.Registry { + return s.functions } func (s *State) GetEncryptionKey() string { @@ -32,12 +75,15 @@ func (s *State) GetEncryptionKey() string { } func (s *State) GetTempName(data []byte) (string, error) { + if !s.FileAccessAllowed() { + return "", fmt.Errorf("tempname: no OS operations supported in this execution environment") + } sum := sha512.Sum512(data) hash := base64.StdEncoding.EncodeToString(sum[:]) name, ok := s.files[hash] if !ok { - file, err := ioutil.TempFile("", "spiff-") + file, err := s.fileSystem.TempFile("", "spiff-") if err != nil { return "", err } @@ -49,7 +95,7 @@ func (s *State) GetTempName(data []byte) (string, error) { func (s *State) Cleanup() { for _, n := range s.files { - os.Remove(n) + s.fileSystem.Remove(n) } s.files = map[string]string{} } @@ -73,7 +119,7 @@ func (s *State) GetFileContent(file string, cached bool) ([]byte, error) { data = contents } } else { - data, err = ioutil.ReadFile(file) + data, err = s.fileSystem.ReadFile(file) if err != nil { return nil, fmt.Errorf("error reading [%s]: %s", path.Clean(file), err) } diff --git a/spiffing/example_test.go b/spiffing/example_test.go new file mode 100644 index 0000000..3217017 --- /dev/null +++ b/spiffing/example_test.go @@ -0,0 +1,85 @@ +package spiffing + +import ( + "fmt" + "math" + "os" + + "github.com/mandelsoft/spiff/dynaml" +) + +func func_pow(arguments []interface{}, binding dynaml.Binding) (interface{}, dynaml.EvaluationInfo, bool) { + info := dynaml.DefaultInfo() + + if len(arguments) != 2 { + return info.Error("pow takes 2 arguments") + } + + args, wasInt, err := dynaml.NumberOperandsN(dynaml.TYPE_FLOAT, arguments...) + + if err != nil { + return info.Error("%s", err) + } + r := math.Pow(args[0].(float64), args[1].(float64)) + if wasInt && float64(int64(r)) == r { + return int64(r), info, true + } + return r, info, true +} + +var state = ` +state: {} +` +var stub = ` +unused: (( input )) +ages: + alice: (( pow(2,5) )) + bob: (( alice + 1 )) +` + +var template = ` +state: + <<<: (( &state )) + random: (( rand("[:alnum:]", 10) )) +ages: (( &temporary )) + +example: + name: (( input )) # direct reference to additional values + sum: (( sum[ages|0|s,k,v|->s + v] )) + int: (( pow(2,4) )) + pow: (( pow(1.1e-1,2) )) +` + +func Error(err error) { + if err != nil { + fmt.Fprintf(os.Stderr, "Error: %s\n", err) + os.Exit(1) + } +} + +func Example() { + values := map[string]interface{}{} + values["input"] = "this is an input" + + functions := NewFunctions() + functions.RegisterFunction("pow", func_pow) + + spiff, err := New().WithFunctions(functions).WithValues(values) + Error(err) + pstate, err := spiff.Unmarshal("state", []byte(state)) + Error(err) + pstub, err := spiff.Unmarshal("stub", []byte(stub)) + Error(err) + ptempl, err := spiff.Unmarshal("template", []byte(template)) + Error(err) + result, err := spiff.Cascade(ptempl, []Node{pstub}, pstate) + Error(err) + b, err := spiff.Marshal(result) + Error(err) + newstate, err := spiff.Marshal(spiff.DetermineState(result)) + Error(err) + fmt.Printf("==== new state ===\n") + fmt.Printf("%s\n", string(newstate)) + fmt.Printf("==== result ===\n") + fmt.Printf("%s\n", string(b)) +} diff --git a/spiffing/examples_test.go b/spiffing/examples_test.go new file mode 100644 index 0000000..f38b3c8 --- /dev/null +++ b/spiffing/examples_test.go @@ -0,0 +1,30 @@ +package spiffing + +import ( + "fmt" +) + +func ExampleEvaluateDynamlExpression() { + ctx, _ := New().WithValues(map[string]interface{}{ + "values": map[string]interface{}{ + "alice": 25, + "bob": 26, + }, + }) + result, _ := EvaluateDynamlExpression(ctx, "values.alice + values.bob") + fmt.Printf("%s", result) + // Output: 51 +} + +func ExampleEvaluateDynamlExpression_complex_data() { + ctx, _ := New().WithValues(map[string]interface{}{ + "values": map[string]interface{}{ + "alice": 25, + "bob": 26, + }, + }) + result, _ := EvaluateDynamlExpression(ctx, "[values.alice, values.bob]") + fmt.Printf("%s", result) + // Output: - 25 + //- 26 +} diff --git a/spiffing/init_test.go b/spiffing/init_test.go new file mode 100644 index 0000000..922f50a --- /dev/null +++ b/spiffing/init_test.go @@ -0,0 +1,13 @@ +package spiffing + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func Test(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Spiffing") +} diff --git a/spiffing/interface.go b/spiffing/interface.go new file mode 100644 index 0000000..9924202 --- /dev/null +++ b/spiffing/interface.go @@ -0,0 +1,100 @@ +package spiffing + +import ( + "github.com/mandelsoft/vfs/pkg/vfs" + + "github.com/mandelsoft/spiff/dynaml" + "github.com/mandelsoft/spiff/flow" + "github.com/mandelsoft/spiff/yaml" +) + +// MODE_OS_ACCESS allows os command execution (pipe, exec) +const MODE_OS_ACCESS = flow.MODE_OS_ACCESS + +// MODE_FILE_ACCESS allows file access to virtual filesystem +const MODE_FILE_ACCESS = flow.MODE_FILE_ACCESS + +// MODE_DEFAULT (default) enables all os related spiff functions +const MODE_DEFAULT = MODE_OS_ACCESS | MODE_FILE_ACCESS + +// Node is a document node of the processing representation of a document +type Node = yaml.Node + +// Options described the processing options +type Options = flow.Options + +// Functions provides access to a set of spiff functions used to extend +// the standrd function set +type Functions = dynaml.Registry + +// Spiff is a configuration end execution context for +// executing spiff operations +type Spiff interface { + // WithEncryptionKey creates a new context with + // dedicated encryption key used for the spiff encryption feature + WithEncryptionKey(key string) Spiff + // WithMode creates a new context with the given processing mode. + // (see MODE constants) + WithMode(mode int) Spiff + // WithFileSystem creates a new context with the given + // virtual filesystem used for filesystem functions during + // prcessing. Setting a filesystem disables the command + // execution functions. + WithFileSystem(fs vfs.FileSystem) Spiff + // WithFunctions creates a new context with the given + // additional function definitions + WithFunctions(functions Functions) Spiff + // WithValues creates a new context with the given + // additional structured values usable by path expressions + // during processing. + // It is highly recommended to decide for a common root + // value (like `values`) to minimize the blocked root + // elements in the processed documents. + WithValues(values map[string]interface{}) (Spiff, error) + + // FileSystem return the virtual filesystem set for the execution context. + FileSystem() vfs.FileSystem + // FileSource create a new file source based on the configured file system. + FileSource(path string) Source + + // Unmarshal parses a single document yaml representation and + // returns the internal representation + Unmarshal(name string, source []byte) (Node, error) + // Unmarshal parses a single source and + // returns the internal representation + UnmarshalSource(source Source) (Node, error) + // UnmarshalMulti parses a multi document yaml representation and + // returns the list of documents in the internal representation + UnmarshalMulti(name string, source []byte) ([]Node, error) + // UnmarshalMultiSource parses a multi document source and + // returns the list of documents in the internal representation + UnmarshalMultiSource(source Source) ([]Node, error) + // Marshal transform the internal node representation into a + // yaml representation + Marshal(node Node) ([]byte, error) + // DetermineState extracts the intended new state representation from + // a processing result. + DetermineState(node Node) Node + // Normalize transform the node representation to a regular go value representation + // consisting of map[string]interface{}`, `[]interface{}`, `string `boolean`, + // `int64`, `float64` and []byte objects + Normalize(node Node) (interface{}, error) + + // Cascade processes a template with a list of given subs and state + // documents + Cascade(template Node, stubs []Node, states ...Node) (Node, error) + // PrepareStubs processes a list a stubs and returns a prepared + // represenation usable to process a template + PrepareStubs(stubs ...Node) ([]Node, error) + // ApplyStubs uses already prepared subs to process a template. + ApplyStubs(template Node, preparedstubs []Node) (Node, error) +} + +// Source is used to get access to a template or stub source data and name +type Source interface { + // Name resturns the name of the source + // For file based sources this should be the path name of the file. + Name() string + // Data returns the yaml representation of the source document + Data() ([]byte, error) +} diff --git a/spiffing/spiff.go b/spiffing/spiff.go new file mode 100644 index 0000000..19a6a4e --- /dev/null +++ b/spiffing/spiff.go @@ -0,0 +1,231 @@ +// package spiffing is a wrapper for internal spiff functionality +// distributed over multiple packaes to offer a coherent interface +// for using spiff as go library + +package spiffing + +import ( + "os" + + "github.com/mandelsoft/vfs/pkg/osfs" + "github.com/mandelsoft/vfs/pkg/vfs" + + "github.com/mandelsoft/spiff/dynaml" + "github.com/mandelsoft/spiff/flow" + "github.com/mandelsoft/spiff/yaml" +) + +type sourceBase struct { + name string +} + +func (s *sourceBase) Name() string { + return s.name +} + +type sourceFile struct { + sourceBase + fs vfs.FileSystem +} + +// NewSourceFile returns a source based on a file in a virtual filesystem +// If no filesystem is given the os filesystem is used by default +func NewSourceFile(path string, optfs ...vfs.FileSystem) Source { + var fs vfs.FileSystem + if len(optfs) > 0 { + fs = optfs[0] + } + if fs == nil { + fs = osfs.New() + } + return &sourceFile{sourceBase{path}, fs} +} + +func (s *sourceFile) Data() ([]byte, error) { + return vfs.ReadFile(s.fs, s.name) +} + +type sourceData struct { + sourceBase + data []byte +} + +// NewSourceData creates a source based on yaml data +func NewSourceData(name string, data []byte) Source { + return &sourceData{sourceBase{name}, data} +} + +func (s *sourceData) Data() ([]byte, error) { + return s.data, nil +} + +//////////////////////////////////////////////////////////////////////////////// + +type spiff struct { + key string + mode int + fs vfs.FileSystem + opts flow.Options + values map[string]yaml.Node + functions Functions + + binding dynaml.Binding +} + +// NewFunctions provides a new registry for additional spiff functions +func NewFunctions() Functions { + return dynaml.NewRegistry() +} + +// New create a new default spiff context. +func New() Spiff { + return &spiff{ + key: os.Getenv("SPIFF_ENCRYPTION_KEY"), + mode: MODE_DEFAULT, + } +} + +func (s *spiff) reset() Spiff { + s.binding = nil + return s +} + +// WithEncryptionKey creates a new context with +// dedicated encryption key used for the spiff encryption feature +func (s spiff) WithEncryptionKey(key string) Spiff { + s.key = key + return s.reset() +} + +// WithMode creates a new context with the given processing mode. +// (see MODE constants) +func (s spiff) WithMode(mode int) Spiff { + if s.fs != nil { + mode = mode & ^MODE_OS_ACCESS + } + s.mode = mode + return s.reset() +} + +// WithFileSystem creates a new context with the given +// virtual filesystem used for filesystem functions during +// prcessing. Setting a filesystem disables the command +// execution functions. +func (s spiff) WithFileSystem(fs vfs.FileSystem) Spiff { + s.fs = fs + if fs != nil { + s.mode = s.mode & ^MODE_OS_ACCESS + } + return s.reset() +} + +// WithFunctions creates a new context with the given +// additional function definitions +func (s spiff) WithFunctions(functions Functions) Spiff { + s.functions = functions + return s.reset() +} + +// WithValues creates a new context with the given +// additional structured values usable by path expressions +// during processing. +// It is highly recommended to decide for a common root +// value (like `values`) to minimize the blocked root +// elements in the processed documents. +func (s spiff) WithValues(values map[string]interface{}) (Spiff, error) { + if values != nil { + nodes, err := yaml.Sanitize("values", values) + if err != nil { + return nil, err + } + s.values = nodes.Value().(map[string]yaml.Node) + } else { + s.values = nil + } + return s.reset(), nil +} + +// FileSystem return the virtual filesystem set for the execution context. +func (s *spiff) FileSystem() vfs.FileSystem { + return s.fs +} + +// FileSource create a new file source based on the configured file system. +func (s *spiff) FileSource(path string) Source { + return NewSourceFile(path, s.fs) +} + +// Cascade processes a template with a list of given subs and state +// documents +func (s *spiff) Cascade(template Node, stubs []Node, states ...Node) (Node, error) { + if s.binding == nil { + s.binding = flow.NewEnvironment( + nil, "context", flow.NewState(s.key, s.mode, s.fs).SetFunctions(s.functions)) + if s.values != nil { + s.binding = s.binding.WithLocalScope(s.values) + } + } + return flow.Cascade(s.binding, template, s.opts, append(stubs, states...)...) +} + +// PrepareStubs processes a list a stubs and returns a prepared +// represenation usable to process a template +func (s *spiff) PrepareStubs(stubs ...Node) ([]Node, error) { + return flow.PrepareStubs(s.binding, s.opts.Partial, stubs...) +} + +// ApplyStubs uses already prepared subs to process a template. +func (s *spiff) ApplyStubs(template Node, preparedstubs []Node) (Node, error) { + return flow.Apply(s.binding, template, preparedstubs, s.opts) +} + +// Unmarshal parses a single document yaml representation and +// returns the internal representation +func (s *spiff) Unmarshal(name string, source []byte) (Node, error) { + return yaml.Unmarshal(name, source) +} + +// Unmarshal parses a single source and +// returns the internal representation +func (s *spiff) UnmarshalSource(source Source) (Node, error) { + data, err := source.Data() + if err != nil { + return nil, err + } + return yaml.Unmarshal(source.Name(), data) +} + +// UnmarshalMulti parses a multi document yaml representation and +// returns the list of documents in the internal representation +func (s *spiff) UnmarshalMulti(name string, source []byte) ([]Node, error) { + return yaml.UnmarshalMulti(name, source) +} + +// UnmarshalMulti parses a multi document source and +// returns the list of documents in the internal representation +func (s *spiff) UnmarshalMultiSource(source Source) ([]Node, error) { + data, err := source.Data() + if err != nil { + return nil, err + } + return yaml.UnmarshalMulti(source.Name(), data) +} + +// DetermineState extracts the intended new state representation from +// a processing result. +func (s *spiff) DetermineState(node Node) Node { + return flow.DetermineState(node) +} + +// Marshal transform the internal node representation into a +// yaml representation +func (s *spiff) Marshal(node Node) ([]byte, error) { + return yaml.Marshal(node) +} + +// Normalize transform the node representation to a regular go value representation +// consisting of map[string]interface{}`, `[]interface{}`, `string `boolean`, +// `int64`, `float64` and []byte objects +func (s *spiff) Normalize(node Node) (interface{}, error) { + return yaml.Normalize(node) +} diff --git a/spiffing/spiffing_test.go b/spiffing/spiffing_test.go new file mode 100644 index 0000000..d66ba96 --- /dev/null +++ b/spiffing/spiffing_test.go @@ -0,0 +1,40 @@ +package spiffing + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Spiffing", func() { + + Context("Simple processing", func() { + ctx, err := New().WithValues(map[string]interface{}{ + "values": map[string]interface{}{ + "alice": 25, + "bob": 26, + }, + }) + Expect(err).To(Succeed()) + + It("Handles value document", func() { + templ, err := ctx.Unmarshal("test", []byte("(( values.alice + values.bob ))")) + Expect(err).To(Succeed()) + result, err := ctx.Cascade(templ, nil) + Expect(err).To(Succeed()) + data, err := ctx.Marshal(result) + Expect(err).To(Succeed()) + Expect(string(data)).To(Equal("51\n")) + }) + It("Handles dynaml expression", func() { + result, err := EvaluateDynamlExpression(ctx, "values.alice + values.bob") + Expect(err).To(Succeed()) + Expect(string(result)).To(Equal("51")) + }) + + It("Handles complex dynaml expression", func() { + result, err := EvaluateDynamlExpression(ctx, "[values.alice, values.bob]") + Expect(err).To(Succeed()) + Expect(string(result)).To(Equal("- 25\n- 26\n")) + }) + }) +}) diff --git a/spiffing/utils.go b/spiffing/utils.go new file mode 100644 index 0000000..744b995 --- /dev/null +++ b/spiffing/utils.go @@ -0,0 +1,84 @@ +package spiffing + +import ( + "fmt" + "strings" +) + +// Process just processes a template with the values set in the execution +// context. It directly takes and delivers byte array containing yaml data. +func Process(s Spiff, template Source) ([]byte, error) { + templ, err := s.UnmarshalSource(template) + if err != nil { + return nil, err + } + result, err := s.Cascade(templ, nil) + if err != nil { + return nil, err + } + return s.Marshal(result) +} + +// ProcessFile just processes a template give by a file with the values set in +// the execution context. +// The path name of the file is interpreted in the context of the filesystem +// found in the execution context, which is defaulted by the OS filesystem. +func ProcessFile(s Spiff, path string) ([]byte, error) { + return Process(s, s.FileSource(path)) +} + +// EvaluateDynamlExpression just processes a plain dynaml expression with the values set in +// the execution context. +func EvaluateDynamlExpression(s Spiff, expr string) ([]byte, error) { + r, err := Process(s, NewSourceData("dynaml", []byte("(( "+expr+" ))"))) + if err != nil { + return nil, err + } + lines := strings.Split(string(r), "\n") + if len(lines) == 2 && lines[1] == "" { + return []byte(lines[0]), nil + } + return r, nil +} + +// Cascade processes a template source with a list of stub sources and optional state and +// devivers the cascading results and the new state as yaml data +func Cascade(s Spiff, template Source, stubs []Source, optstate ...Source) ([]byte, []byte, error) { + var nstubs []Node + + for i, src := range stubs { + stub, err := s.UnmarshalSource(src) + if err != nil { + return nil, nil, fmt.Errorf("stub %d [%s] failed: %s", i+1, src.Name(), err) + } + nstubs = append(nstubs, stub) + } + for i, src := range optstate { + stub, err := s.UnmarshalSource(src) + if err != nil { + return nil, nil, fmt.Errorf("state %d [%s] failed: %s", i+1, src.Name(), err) + } + nstubs = append(nstubs, stub) + } + node, err := s.UnmarshalSource(template) + if err != nil { + return nil, nil, fmt.Errorf("template [%s] failed: %s", template.Name(), err) + } + result, err := s.Cascade(node, nstubs) + if err != nil { + return nil, nil, err + } + rdata, err := s.Marshal(result) + if err != nil { + return nil, nil, fmt.Errorf("error marshalling result: %s", err) + } + state := s.DetermineState(result) + if state != nil { + sdata, err := s.Marshal(state) + if err != nil { + return nil, nil, fmt.Errorf("error marshalling result: %s", err) + } + return rdata, sdata, err + } + return rdata, nil, err +} diff --git a/vendor/github.com/mandelsoft/filepath/LICENSE b/vendor/github.com/mandelsoft/filepath/LICENSE new file mode 100644 index 0000000..d86fddb --- /dev/null +++ b/vendor/github.com/mandelsoft/filepath/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2017 by mandelsoft. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/mandelsoft/filepath/cmd/ojoin/example/version b/vendor/github.com/mandelsoft/filepath/cmd/ojoin/example/version new file mode 120000 index 0000000..28b45cd --- /dev/null +++ b/vendor/github.com/mandelsoft/filepath/cmd/ojoin/example/version @@ -0,0 +1 @@ +versions/v1 \ No newline at end of file diff --git a/vendor/github.com/mandelsoft/filepath/cmd/osyml/example/src/versions/current b/vendor/github.com/mandelsoft/filepath/cmd/osyml/example/src/versions/current new file mode 120000 index 0000000..587c3e8 --- /dev/null +++ b/vendor/github.com/mandelsoft/filepath/cmd/osyml/example/src/versions/current @@ -0,0 +1 @@ +../../version \ No newline at end of file diff --git a/vendor/github.com/mandelsoft/filepath/cmd/osyml/example/src/versions/v1/modules/test b/vendor/github.com/mandelsoft/filepath/cmd/osyml/example/src/versions/v1/modules/test new file mode 120000 index 0000000..277a128 --- /dev/null +++ b/vendor/github.com/mandelsoft/filepath/cmd/osyml/example/src/versions/v1/modules/test @@ -0,0 +1 @@ +../../../pool/test \ No newline at end of file diff --git a/vendor/github.com/mandelsoft/filepath/cmd/osyml/example/version b/vendor/github.com/mandelsoft/filepath/cmd/osyml/example/version new file mode 120000 index 0000000..7fda260 --- /dev/null +++ b/vendor/github.com/mandelsoft/filepath/cmd/osyml/example/version @@ -0,0 +1 @@ +src/versions/v1 \ No newline at end of file diff --git a/vendor/github.com/mandelsoft/filepath/pkg/filepath/eval.go b/vendor/github.com/mandelsoft/filepath/pkg/filepath/eval.go new file mode 100644 index 0000000..6c77a6f --- /dev/null +++ b/vendor/github.com/mandelsoft/filepath/pkg/filepath/eval.go @@ -0,0 +1,130 @@ +package filepath + +import ( + "errors" + "os" + "strings" +) + +// SplitVolume splits a path into a volume and a path part. +func SplitVolume(path string) (string, string) { + vol := VolumeName(path) + return vol, path[len(vol):] +} + +// SplitPath splits a path into a volume, an array of the path segments and a rooted flag. +// The rooted flag is true, if the given path is an absolute one. In this case the +// segment array does not contain a root segment. +func SplitPath(path string) (string, []string, bool) { + vol, path := SplitVolume(path) + rest := path + elems := []string{} + for rest != "" { + i := 0 + for i < len(rest) && os.IsPathSeparator(rest[i]) { + i++ + } + j := i + for j < len(rest) && !os.IsPathSeparator(rest[j]) { + j++ + } + b := rest[i:j] + rest = rest[j:] + if b == "." || b == "" { + continue + } + elems = append(elems, b) + } + return vol, elems, strings.HasPrefix(path, PathSeparatorString) +} + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +func evalPath(path string, exist bool, link ...bool) (string, error) { + var parsed string + var dir bool + + vol, elems, rooted := SplitPath(path) + getlink := true + if len(link) > 0 { + getlink = link[0] + } +outer: + for { + parsed = "" + dir = true + + for i := 0; i < len(elems); i++ { + e := elems[i] + next := e + if len(parsed) > 0 { + next = parsed + PathSeparatorString + e + } + switch e { + case ".": + if !dir { + return "", errors.New("not a directory") + } + continue + case "..": + if !dir { + return "", errors.New("not a directory") + } + base := Base(parsed) + if parsed == "" || base == ".." { + if !rooted { + parsed = next + } + } else { + parsed = Dir(parsed) + if parsed == "." { + parsed = "" + } + } + continue + } + p := next + if rooted { + p = string(os.PathSeparator) + next + } + fi, err := os.Lstat(p) + if err != nil { + if os.IsPermission(err) { + return "", &os.PathError{"", p, err} + } + if exist || !os.IsNotExist(err) { + return "", &os.PathError{"", p, err} + } + dir = true + parsed = next + } else { + if fi.Mode()&os.ModeType != os.ModeSymlink || (!getlink && i == len(elems)-1) { + dir = fi.IsDir() + parsed = next + continue + } + link, err := os.Readlink(p) + if err != nil { + return "", &os.PathError{"", next, err} + } + v, nested, r := SplitPath(link) + if r { + elems = append(nested, elems[i+1:]...) + vol = v + rooted = r + continue outer + } + elems = append(elems[:i], append(nested, elems[i+1:]...)...) + i-- + } + } + break + } + if rooted { + return vol + PathSeparatorString + parsed, nil + } + if len(parsed) == 0 { + parsed = "." + } + return vol + parsed, nil +} diff --git a/vendor/github.com/mandelsoft/filepath/pkg/filepath/orig.go b/vendor/github.com/mandelsoft/filepath/pkg/filepath/orig.go new file mode 100644 index 0000000..734f120 --- /dev/null +++ b/vendor/github.com/mandelsoft/filepath/pkg/filepath/orig.go @@ -0,0 +1,71 @@ +package filepath + +import ( + orig "path/filepath" +) + +func ToSlash(path string) string { + return orig.ToSlash(path) +} + +func FromSlash(path string) string { + return orig.FromSlash(path) +} + +func SplitList(path string) []string { + return orig.SplitList(path) +} + +func Split(path string) (string, string) { + return orig.Split(path) +} + +func Clean(path string) string { + return orig.Clean(path) +} + +func Ext(path string) string { + return orig.Ext(path) +} + +func VolumeName(path string) string { + return orig.VolumeName(path) +} + +func Rel(basepath, targpath string) (string, error) { + base, err := Canonical(basepath, false) + if err != nil { + return "", err + } + targpath, err = EvalSymlinks(targpath) + if err != nil { + return "", err + } + return orig.Rel(base, targpath) +} + +// SkipDir is used as a return value from WalkFuncs to indicate that +// the directory named in the call is to be skipped. It is not returned +// as an error by any function. +var SkipDir = orig.SkipDir + +// WalkFunc is the type of the function called for each file or directory +// visited by Walk. The path argument contains the argument to Walk as a +// prefix; that is, if Walk is called with "dir", which is a directory +// containing the file "a", the walk function will be called with argument +// "dir/a". The info argument is the os.FileInfo for the named path. +// +// If there was a problem walking to the file or directory named by path, the +// incoming error will describe the problem and the function can decide how +// to handle that error (and Walk will not descend into that directory). In the +// case of an error, the info argument will be nil. If an error is returned, +// processing stops. The sole exception is when the function returns the special +// value SkipDir. If the function returns SkipDir when invoked on a directory, +// Walk skips the directory's contents entirely. If the function returns SkipDir +// when invoked on a non-directory file, Walk skips the remaining files in the +// containing directory. +type WalkFunc orig.WalkFunc + +func Walk(root string, walkFn WalkFunc) error { + return orig.Walk(root, orig.WalkFunc(walkFn)) +} diff --git a/vendor/github.com/mandelsoft/filepath/pkg/filepath/path.go b/vendor/github.com/mandelsoft/filepath/pkg/filepath/path.go new file mode 100644 index 0000000..9e43f02 --- /dev/null +++ b/vendor/github.com/mandelsoft/filepath/pkg/filepath/path.go @@ -0,0 +1,345 @@ +// Copyright 2017 by mandelsoft. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package filepath implements utility routines for manipulating filename paths +// in a way compatible with the target operating system-defined file paths. +// It is a modification of the original GO package path/filepath solving +// severe errors in handling symbolic links. +// +// The original package defines a function Clean that formally normalizes +// a file path by eliminating .. and . entries. This is done WITHOUT +// observing the actual file system. Although this is no problem for +// the function itself, because it is designed to do so, it becomes a severe +// problem for the whole package, because nearly all functions internally use +// Clean to clean the path. As a consequence even functions like Join deliver +// corrupted invalid results for valid inputs if the path incorporates +// symbolic links to directories. Especially EvalSymlinks cannot be used +// to evaluate paths to existing files, because Clean is internally used to +// normalize content of symbolic links. +// +// This package provides a set of functions that do not hamper the meaning +// of path names keeping the rest of the original specification as far as +// possible. Additionally some new functions (like Canonical) or alternate +// versions of existing functions (like Split2) are provided +// that offer a more useful specification than the original one. +package filepath + +import ( + "errors" + "fmt" + "os" + "runtime" + "strings" +) + +const PathSeparatorString = string(os.PathSeparator) + +func debug(f string, args ...interface{}) { + if false { + fmt.Printf(f, args...) + } +} + +// IsAbs return true if the given path is an absolute one +// starting with a Separator or is quailified by a volume name. +func IsAbs(path string) bool { + return strings.HasPrefix(path, PathSeparatorString) || + strings.HasPrefix(path, VolumeName(path)+PathSeparatorString) +} + +// Canonical returns the canonical absolute path of a file. +// If exist=false the denoted file must not exist, but +// then the part of the initial path refering to a not existing +// directoy structure is lexically resolved (like Clean) and +// does not consider potential symbolic links that might occur +// if the file is finally created in the future. +func Canonical(path string, exist bool) (string, error) { + return walk(path, -1, exist) +} + +// EvalSymLinks resolves all symbolic links in a path +// and returns a path not containing any symbolic link +// anymore. It does not call Clean on a non-canonical path, +// so the result always denotes the same file than the original path. +// If the given path is a relative one, a +// reLative one is returned as long as there is no +// absolute symbolic link and the relative path does +// not goes up the current working diretory. +// If a relative path is returned, symbolic links +// up the current working directory are not resolved. +func EvalSymlinks(path string) (string, error) { + return walk(path, 0, false) +} + +// Abs returns an absolute representation of path. +// If the path is not absolute it will be joined with the current +// working directory to turn it into an absolute path. The absolute +// path name for a given file is not guaranteed to be unique. +// Symbolic links in the given path will be resolved, but not in +// the current working directory, if used to make the path absolute. +// The denoted file may not exist. +// Abs never calls Clean on the result, so the resulting path +// will denote the same file as the argument. +func Abs(path string) (string, error) { + path, err := walk(path, 0, false) + if err != nil { + return "", err + } + if IsAbs(path) { + return path, nil + } + p, err := os.Getwd() + if err != nil { + return "", err + } + return Join(p, path), nil +} + +func walk(p string, parent int, exist bool) (string, error) { + var rest []string = []string{} + + links := 0 + + for !IsRoot(p) && p != "" { + n, b := Split2(p) + if b == "" { + fmt.Printf("debug: ignoring empty base -> %s \n", n) + p = n + continue + } + fi, err := os.Lstat(p) + debug("debug: %s // %s %v\n", n, b, err) + if exists_(err) { + if err != nil && !os.IsPermission(err) { + return "", err + } + debug("debug: file exists '%s'\n", p) + if fi.Mode()&os.ModeSymlink != 0 { + newpath, err := os.Readlink(p) + if err != nil { + return "", err + } + if IsAbs(newpath) { + p = newpath + } else { + p = Join(n, newpath) + } + debug("LINK %s -> %s\n", newpath, p) + links++ + if links > 255 { + return "", errors.New("AbsPath: too many links") + } + continue + } + } else { + if exist { + return "", err + } + debug("debug: %s does not exist\n", p) + } + if b != "." { + rest = append([]string{b}, rest...) + if parent >= 0 && b == ".." { + parent++ + } else { + if parent > 0 { + parent-- + } + } + } + if parent != 0 && n == "" { + p, err = os.Getwd() + if err != nil { + return "", err + } + debug("debug: continue with wd '%s'\n", p) + } else { + p = n + } + } + if p == "" { + return Clean(Join(rest...)), nil + } + return Clean(Join(append([]string{p}, rest...)...)), nil +} + +// Exists checks whether a file exists. +func Exists(path string) bool { + _, err := os.Stat(path) + return exists_(err) +} + +func exists_(err error) bool { + return err == nil || !os.IsNotExist(err) +} + +// Dir2 returns the path's directory dropping the final element +// after removing trailing Separators, Dir2 goes not call Clean on the path. +// If the path is empty, Dir2 returns ".". +// If the path consists entirely of Separators, Dir2 returns a single Separator. +// The returned path does not end in a Separator unless it is the root directory. +// This function is the counterpart of Base +// Base("a/b/")="b" and Dir("a/b/") = "a". +// In general Trim(Join(Dir2(path),Base(path))) should be Trim(path) +func Dir2(path string) string { + def := "." + vol := VolumeName(path) + i := len(path) - 1 + for i > len(vol) && os.IsPathSeparator(path[i]) { + i-- + } + for i >= len(vol) && !os.IsPathSeparator(path[i]) { + i-- + } + for i > len(vol) && os.IsPathSeparator(path[i]) { + def = string(os.PathSeparator) + i-- + } + path = path[len(vol) : i+1] + if path == "" { + return def + } + return vol + path +} + +// Dir acts like filepath.Dir, but does not +// clean the path +// Like the original Dir function this is NOT +// the counterpart of Base if the path ends with +// a trailing Separator. Base("a/b/")="b" and +// Dir("a/b/") = "a/b". +func Dir(path string) string { + def := "." + vol := VolumeName(path) + i := len(path) - 1 + for i >= len(vol) && !os.IsPathSeparator(path[i]) { + i-- + } + for i > len(vol) && os.IsPathSeparator(path[i]) { + def = string(os.PathSeparator) + i-- + } + + path = path[len(vol) : i+1] + if path == "" { + path = def + } + return vol + path +} + +func Base(path string) string { + vol := VolumeName(path) + i := len(path) - 1 + for i > len(vol) && os.IsPathSeparator(path[i]) { + i-- + } + j := i + for j >= len(vol) && !os.IsPathSeparator(path[j]) { + j-- + } + path = path[j+1 : i+1] + if path == "" { + if j == len(vol) { + return PathSeparatorString + } + return "." + } + return path +} + +// Trim eleminates additional slashes and dot segments from a path name. +// An empty path is unchanged. +// +func Trim(path string) string { + vol := VolumeName(path) + i := len(path) - 1 + for i > len(vol) && os.IsPathSeparator(path[i]) { + i-- + } + + path = path[:i+1] + + k := len(path) + i = k - 1 + for i >= len(vol) { + j := i + for j >= len(vol) && os.IsPathSeparator(path[j]) { + j-- + } + if i != j { + if path[i+1:k] == "." { + if j < len(vol) && k == len(path) { + j++ // keep starting separator instead of trailing one, because this does not exist + } + i = k + } + path = path[:j+1] + path[i:] + i = j + k = i + 1 + } + i-- + } + if k < len(path) && path[len(vol):k] == "." { + path = path[:len(vol)] + path[k+1:] + } + + return path + +} + +// Split2 splits path immediately following the final Separator, +// separating it into a directory and file name component. +// If there is no Separator in path, Split returns an empty dir +// and file set to path. In contrast to Split the directory +// path does not end with a trailing Separator, so Split2 can +// subsequently called for the directory part, again. +func Split2(path string) (dir, file string) { + vol := VolumeName(path) + i := len(path) - 1 + for i >= len(vol) && !os.IsPathSeparator(path[i]) { + i-- + } + j := i + for j > len(vol) && os.IsPathSeparator(path[j]) { + j-- + } + return path[:j+1], path[i+1:] +} + +// Join joins any number of path elements into a single path, adding +// a Separator if necessary. Join never calls Clean on the result to +// assure the result denotes the same file as the input. +// On Windows, the result is a UNC path if and only if the first path +// element is a UNC path. +func Join(elems ...string) string { + s := string(os.PathSeparator) + string(os.PathSeparator) + for i := 0; i < len(elems); i++ { + if elems[i] == "" { + elems = append(elems[:i], elems[i+1:]...) + } + } + r := strings.Join(elems, string(os.PathSeparator)) + for strings.Index(r, s) >= 0 { + r = strings.ReplaceAll(r, s, string(os.PathSeparator)) + } + return r +} + +// IsRoot determines whether a given path is a root path. +// This might be the Separator or the Separator preceded by +// a volume name under Windows. +// This function is directly taken from the original filepath +// package. +func IsRoot(path string) bool { + if runtime.GOOS != "windows" { + return path == "/" + } + switch len(path) { + case 1: + return os.IsPathSeparator(path[0]) + case 3: + return path[1] == ':' && os.IsPathSeparator(path[2]) + } + return false +} diff --git a/vendor/github.com/mandelsoft/vfs/LICENSE b/vendor/github.com/mandelsoft/vfs/LICENSE new file mode 100644 index 0000000..5c304d1 --- /dev/null +++ b/vendor/github.com/mandelsoft/vfs/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/mandelsoft/vfs/pkg/osfs/doc.go b/vendor/github.com/mandelsoft/vfs/pkg/osfs/doc.go new file mode 100644 index 0000000..d2666d6 --- /dev/null +++ b/vendor/github.com/mandelsoft/vfs/pkg/osfs/doc.go @@ -0,0 +1,20 @@ +/* + * Copyright 2020 Mandelsoft. All rights reserved. + * This file is licensed under the Apache Software License, v. 2 except as noted + * otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package osfs maps the operating system filesystem to a virtual filesystem. +package osfs diff --git a/vendor/github.com/mandelsoft/vfs/pkg/osfs/osfs.go b/vendor/github.com/mandelsoft/vfs/pkg/osfs/osfs.go new file mode 100644 index 0000000..0852222 --- /dev/null +++ b/vendor/github.com/mandelsoft/vfs/pkg/osfs/osfs.go @@ -0,0 +1,155 @@ +/* + * Copyright 2020 Mandelsoft. All rights reserved. + * This file is licensed under the Apache Software License, v. 2 except as noted + * otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package osfs + +import ( + "os" + "time" + + "github.com/mandelsoft/filepath/pkg/filepath" + + "github.com/mandelsoft/vfs/pkg/utils" + "github.com/mandelsoft/vfs/pkg/vfs" +) + +type osFileSystem struct { +} + +func New() vfs.FileSystem { + return &osFileSystem{} +} + +func (osFileSystem) Name() string { + return "OsFs" +} + +func (osFileSystem) VolumeName(name string) string { + return filepath.VolumeName(name) +} + +func (osFileSystem) FSTempDir() string { + return os.TempDir() +} + +func (osFileSystem) Normalize(path string) string { + return mapPath(path) +} + +func (osFileSystem) Getwd() (string, error) { + d, err := os.Getwd() + if err != nil { + return "", err + } + return mapPath(d), nil +} + +func (osFileSystem) Create(name string) (vfs.File, error) { + abs, err := filepath.Abs(name) + if err != nil { + return nil, err + } + f, e := os.Create(name) + if f == nil { + return nil, e + } + abs, err = filepath.EvalSymlinks(abs) + if err != nil { + f.Close() + return nil, err + } + return utils.NewRenamedFile(abs, f), e +} + +func (osFileSystem) Mkdir(name string, perm os.FileMode) error { + return os.Mkdir(name, perm) +} + +func (osFileSystem) MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +func (osFileSystem) Open(name string) (vfs.File, error) { + abs, err := filepath.Abs(name) + if err != nil { + return nil, err + } + f, e := os.Open(name) + if f == nil { + return nil, e + } + abs, err = filepath.EvalSymlinks(abs) + if err != nil { + f.Close() + return nil, err + } + return utils.NewRenamedFile(abs, f), e +} + +func (osFileSystem) OpenFile(name string, flag int, perm os.FileMode) (vfs.File, error) { + abs, err := filepath.Abs(name) + if err != nil { + return nil, err + } + f, e := os.OpenFile(name, flag, perm) + if f == nil { + return nil, e + } + abs, err = filepath.EvalSymlinks(abs) + if err != nil { + f.Close() + return nil, err + } + return utils.NewRenamedFile(abs, f), e +} + +func (osFileSystem) Remove(name string) error { + return os.Remove(name) +} + +func (osFileSystem) RemoveAll(path string) error { + return os.RemoveAll(path) +} + +func (osFileSystem) Rename(oldname, newname string) error { + return os.Rename(oldname, newname) +} + +func (osFileSystem) Stat(name string) (os.FileInfo, error) { + return os.Stat(name) +} + +func (osFileSystem) Chmod(name string, mode os.FileMode) error { + return os.Chmod(name, mode) +} + +func (osFileSystem) Chtimes(name string, atime time.Time, mtime time.Time) error { + return os.Chtimes(name, atime, mtime) +} + +func (osFileSystem) Lstat(name string) (os.FileInfo, error) { + return os.Lstat(name) +} + +func (osFileSystem) Symlink(oldname, newname string) error { + return os.Symlink(oldname, newname) +} + +func (osFileSystem) Readlink(name string) (string, error) { + return os.Readlink(name) +} diff --git a/vendor/github.com/mandelsoft/vfs/pkg/osfs/osfs_unix.go b/vendor/github.com/mandelsoft/vfs/pkg/osfs/osfs_unix.go new file mode 100644 index 0000000..908d1e8 --- /dev/null +++ b/vendor/github.com/mandelsoft/vfs/pkg/osfs/osfs_unix.go @@ -0,0 +1,23 @@ +/* + * Copyright 2020 Mandelsoft. All rights reserved. + * This file is licensed under the Apache Software License, v. 2 except as noted + * otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package osfs + +func mapPath(path string) string { + return path +} diff --git a/vendor/github.com/mandelsoft/vfs/pkg/osfs/osfs_windows.go b/vendor/github.com/mandelsoft/vfs/pkg/osfs/osfs_windows.go new file mode 100644 index 0000000..68a4dab --- /dev/null +++ b/vendor/github.com/mandelsoft/vfs/pkg/osfs/osfs_windows.go @@ -0,0 +1,35 @@ +/* + * Copyright 2020 Mandelsoft. All rights reserved. + * This file is licensed under the Apache Software License, v. 2 except as noted + * otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package osfs + +import ( + "os" +) + +func mapPath(p string) string { + mapped := "" + for c := range path { + if os.IsPathSeparator(c) { + mapped = mapped + pkg.PathSeparatorChar + } else { + mapped = mapped + c + } + } + return mapped +} diff --git a/vendor/github.com/mandelsoft/vfs/pkg/osfs/tempfs.go b/vendor/github.com/mandelsoft/vfs/pkg/osfs/tempfs.go new file mode 100644 index 0000000..e647bee --- /dev/null +++ b/vendor/github.com/mandelsoft/vfs/pkg/osfs/tempfs.go @@ -0,0 +1,48 @@ +/* + * Copyright 2020 Mandelsoft. All rights reserved. + * This file is licensed under the Apache Software License, v. 2 except as noted + * otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package osfs + +import ( + "io/ioutil" + "os" + + "github.com/mandelsoft/vfs/pkg/projectionfs" + "github.com/mandelsoft/vfs/pkg/vfs" +) + +type tempfs struct { + vfs.FileSystem + dir string +} + +func NewTempFileSystem() (vfs.FileSystem, error) { + dir, err := ioutil.TempDir("", "VFS-") + if err != nil { + return nil, err + } + fs, err := projectionfs.New(New(), dir) + if err != nil { + os.Remove(dir) + } + return &tempfs{fs, dir}, err +} + +func (t *tempfs) Cleanup() { + os.RemoveAll(t.dir) +} diff --git a/vendor/github.com/mandelsoft/vfs/pkg/projectionfs/doc.go b/vendor/github.com/mandelsoft/vfs/pkg/projectionfs/doc.go new file mode 100644 index 0000000..92d938f --- /dev/null +++ b/vendor/github.com/mandelsoft/vfs/pkg/projectionfs/doc.go @@ -0,0 +1,21 @@ +/* + * Copyright 2020 Mandelsoft. All rights reserved. + * This file is licensed under the Apache Software License, v. 2 except as noted + * otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package projectionfs implements virtual filesystems based on a +// dedicated directory of a base filesystem. +package projectionfs diff --git a/vendor/github.com/mandelsoft/vfs/pkg/projectionfs/projectionfs.go b/vendor/github.com/mandelsoft/vfs/pkg/projectionfs/projectionfs.go new file mode 100644 index 0000000..65ad072 --- /dev/null +++ b/vendor/github.com/mandelsoft/vfs/pkg/projectionfs/projectionfs.go @@ -0,0 +1,53 @@ +/* + * Copyright 2020 Mandelsoft. All rights reserved. + * This file is licensed under the Apache Software License, v. 2 except as noted + * otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package projectionfs + +import ( + "fmt" + + "github.com/mandelsoft/vfs/pkg/utils" + "github.com/mandelsoft/vfs/pkg/vfs" +) + +type ProjectionFileSystem struct { + *utils.MappedFileSystem + projection string +} + +type adapter struct { + fs *ProjectionFileSystem +} + +func (a *adapter) MapPath(name string) (vfs.FileSystem, string) { + return a.fs.Base(), vfs.Join(a.fs.Base(), a.fs.projection, name) +} + +func New(base vfs.FileSystem, path string) (vfs.FileSystem, error) { + eff, err := vfs.Canonical(base, path, true) + if err != nil { + return nil, err + } + fs := &ProjectionFileSystem{projection: eff} + fs.MappedFileSystem = utils.NewMappedFileSystem(base, &adapter{fs}) + return fs, nil +} + +func (p *ProjectionFileSystem) Name() string { + return fmt.Sprintf("ProjectionFilesytem [%s]%s", p.Base().Name(), p.projection) +} diff --git a/vendor/github.com/mandelsoft/vfs/pkg/utils/base.go b/vendor/github.com/mandelsoft/vfs/pkg/utils/base.go new file mode 100644 index 0000000..a06be64 --- /dev/null +++ b/vendor/github.com/mandelsoft/vfs/pkg/utils/base.go @@ -0,0 +1,45 @@ +/* + * Copyright 2020 Mandelsoft. All rights reserved. + * This file is licensed under the Apache Software License, v. 2 except as noted + * otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "github.com/mandelsoft/vfs/pkg/vfs" +) + +type FileSystemBase struct{} + +func (FileSystemBase) VolumeName(name string) string { + return "" +} + +func (FileSystemBase) FSTempDir() string { + return "/" +} + +func (FileSystemBase) Normalize(path string) string { + return path +} + +func (FileSystemBase) Getwd() (string, error) { + return vfs.PathSeparatorString, nil +} + +func (FileSystemBase) Cleanup() error { + return nil +} diff --git a/vendor/github.com/mandelsoft/vfs/pkg/utils/eval.go b/vendor/github.com/mandelsoft/vfs/pkg/utils/eval.go new file mode 100644 index 0000000..ff15ca5 --- /dev/null +++ b/vendor/github.com/mandelsoft/vfs/pkg/utils/eval.go @@ -0,0 +1,110 @@ +/* + * Copyright 2020 Mandelsoft. All rights reserved. + * This file is licensed under the Apache Software License, v. 2 except as noted + * otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "github.com/mandelsoft/vfs/pkg/vfs" +) + +type FileDataDirAccess interface { + Lock() + Unlock() + IsDir() bool + IsSymlink() bool + GetEntry(name string) (FileDataDirAccess, error) + GetSymlink() string +} + +func EvaluatePath(fs vfs.FileSystem, root FileDataDirAccess, name string, link ...bool) (FileDataDirAccess, string, FileDataDirAccess, string, error) { + var data []FileDataDirAccess + var path string + var dir bool + + _, elems, _ := vfs.SplitPath(fs, name) + getlink := true + if len(link) > 0 { + getlink = link[0] + } +outer: + for { + path = "/" + data = []FileDataDirAccess{root} + dir = true + + for i := 0; i < len(elems); i++ { + e := elems[i] + cur := len(data) - 1 + switch e { + case ".": + if !dir { + return nil, "", nil, "", vfs.ErrNotDir + } + continue + case "..": + if !dir { + return nil, "", nil, "", vfs.ErrNotDir + } + if len(data) > 1 { + data = data[:cur] + path, _ = vfs.Split(fs, path) + } + continue + } + data[cur].Lock() + next, err := data[cur].GetEntry(e) + data[cur].Unlock() + if vfs.IsErrNotDir(err) { + return nil, "", nil, "", vfs.NewPathError("", path, err) + } + if vfs.IsErrNotExist(err) { + if i == len(elems)-1 { + return data[cur], path, nil, e, nil + } + return nil, "", nil, "", vfs.NewPathError("", vfs.Join(fs, path, e), err) + } + next.Lock() + if !next.IsSymlink() || (!getlink && i == len(elems)-1) { + dir = next.IsDir() + path = vfs.Join(fs, path, e) + data = append(data, next) + next.Unlock() + continue + } + link := next.GetSymlink() + next.Unlock() + _, nested, rooted := vfs.SplitPath(fs, link) + if rooted { + elems = append(nested, elems[i+1:]...) + i = 0 + continue outer + } + elems = append(elems[:i], append(nested, elems[i+1:]...)...) + i-- + } + break + } + if path == vfs.PathSeparatorString { + return root, path, root, "", nil + } + d, b := vfs.Split(fs, path) + if d == "" { + return root, vfs.PathSeparatorString, data[len(data)-1], b, nil + } + return data[len(data)-2], d, data[len(data)-1], b, nil +} diff --git a/vendor/github.com/mandelsoft/vfs/pkg/utils/file.go b/vendor/github.com/mandelsoft/vfs/pkg/utils/file.go new file mode 100644 index 0000000..c11d502 --- /dev/null +++ b/vendor/github.com/mandelsoft/vfs/pkg/utils/file.go @@ -0,0 +1,255 @@ +/* + * Copyright 2020 Mandelsoft. All rights reserved. + * This file is licensed under the Apache Software License, v. 2 except as noted + * otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "bytes" + "errors" + "io" + "os" + "sort" + "time" + + "github.com/mandelsoft/vfs/pkg/vfs" +) + +type FileData interface { + FileDataDirAccess + Data() []byte + Files() []os.FileInfo + SetData([]byte) + Mode() os.FileMode + SetMode(mode os.FileMode) + ModTime() time.Time + SetModTime(time.Time) + Add(name string, f FileData) error + Del(name string) error +} + +type File struct { + // atomic requires 64-bit alignment for struct field access + offset int64 + readDirCount int64 + closed bool + readOnly bool + fileData FileData + name string +} + +var _ vfs.File = &File{} + +func newFileHandle(name string, data FileData) *File { + return &File{name: name, fileData: data} +} + +func (f *File) Open() error { + f.fileData.Lock() + f.offset = 0 + f.readDirCount = 0 + f.closed = false + f.fileData.Unlock() + return nil +} + +func (f *File) Close() error { + f.fileData.Lock() + f.closed = true + f.fileData.Unlock() + return nil +} + +func (f *File) Name() string { + return f.name +} + +func (f *File) Stat() (os.FileInfo, error) { + return NewFileInfo(f.name, f.fileData), nil +} + +func (f *File) Sync() error { + return nil +} + +func (f *File) Readdir(count int) (files []os.FileInfo, err error) { + if !f.fileData.IsDir() { + return nil, &os.PathError{Op: "readdir", Path: f.name, Err: ErrNotDir} + } + var outLength int64 + + f.fileData.Lock() + defer f.fileData.Unlock() + + files = f.fileData.Files() + sort.Sort(FilesSorter(files)) + if f.readDirCount >= int64(len(files)) { + files = []os.FileInfo{} + } else { + files = files[f.readDirCount:] + } + if count > 0 { + if len(files) < count { + outLength = int64(len(files)) + } else { + outLength = int64(count) + } + if len(files) == 0 { + err = io.EOF + } + } else { + outLength = int64(len(files)) + } + f.readDirCount += outLength + + return files, err +} + +func (f *File) Readdirnames(n int) (names []string, err error) { + fi, err := f.Readdir(n) + names = make([]string, len(fi)) + for i, f := range fi { + names[i] = f.Name() + } + return names, err +} + +func (f *File) Read(buf []byte) (int, error) { + f.fileData.Lock() + defer f.fileData.Unlock() + n, err := f.read(buf, f.offset, io.ErrUnexpectedEOF) + f.offset += int64(n) + return n, err +} + +func (f *File) read(b []byte, offset int64, err error) (int, error) { + if f.closed == true { + return 0, ErrFileClosed + } + data := f.fileData.Data() + if len(b) > 0 && int(offset) == len(data) { + return 0, io.EOF + } + if int(f.offset) > len(data) { + return 0, err + } + n := len(b) + if len(data)-int(offset) < len(b) { + n = len(data) - int(offset) + } + copy(b, data[offset:offset+int64(n)]) + return n, nil +} + +func (f *File) ReadAt(b []byte, off int64) (n int, err error) { + f.fileData.Lock() + defer f.fileData.Unlock() + return f.read(b, off, io.EOF) +} + +func (f *File) Truncate(size int64) error { + if f.readOnly { + return ErrReadOnly + } + if f.closed == true { + return ErrFileClosed + } + if size < 0 { + return ErrOutOfRange + } + f.fileData.Lock() + defer f.fileData.Unlock() + data := f.fileData.Data() + if size > int64(len(data)) { + diff := size - int64(len(data)) + f.fileData.SetData(append(data, bytes.Repeat([]byte{00}, int(diff))...)) + } else { + f.fileData.SetData(data[0:size]) + } + f.fileData.SetModTime(time.Now()) + return nil +} + +func (f *File) Seek(offset int64, whence int) (int64, error) { + if f.closed == true { + return 0, ErrFileClosed + } + f.fileData.Lock() + defer f.fileData.Unlock() + data := f.fileData.Data() + switch whence { + case 0: + case 1: + offset += f.offset + case 2: + offset = int64(len(data)) + offset + } + if offset < 0 || offset >= int64(len(data)) { + return 0, ErrOutOfRange + } + f.offset = offset + return f.offset, nil +} + +func (f *File) Write(buf []byte) (int, error) { + f.fileData.Lock() + defer f.fileData.Unlock() + n, err := f.write(buf, f.offset) + if err != nil { + return 0, err + } + f.offset += int64(n) + return int(n), nil +} + +func (f *File) write(buf []byte, offset int64) (int, error) { + if f.readOnly { + return 0, ErrReadOnly + } + if f.closed == true { + return 0, ErrFileClosed + } + data := f.fileData.Data() + n := int64(len(buf)) + add := offset + n - int64(len(data)) + copy(data[offset:], buf) + if add > 0 { + f.fileData.SetData(append(data, buf[n-add:]...)) + } + f.fileData.SetModTime(time.Now()) + return int(n), nil +} + +func (f *File) WriteAt(buf []byte, off int64) (n int, err error) { + f.fileData.Lock() + defer f.fileData.Unlock() + return f.write(buf, off) +} + +func (f *File) WriteString(s string) (ret int, err error) { + return f.Write([]byte(s)) +} + +var ( + ErrNotDir = vfs.ErrNotDir + ErrReadOnly = vfs.ErrReadOnly + + ErrFileClosed = errors.New("file is closed") + ErrOutOfRange = errors.New("out of range") + ErrTooLarge = errors.New("too large") + ErrNotEmpty = vfs.ErrNotEmpty +) diff --git a/vendor/github.com/mandelsoft/vfs/pkg/utils/fileInfo.go b/vendor/github.com/mandelsoft/vfs/pkg/utils/fileInfo.go new file mode 100644 index 0000000..2a51fe7 --- /dev/null +++ b/vendor/github.com/mandelsoft/vfs/pkg/utils/fileInfo.go @@ -0,0 +1,74 @@ +/* + * Copyright 2020 Mandelsoft. All rights reserved. + * This file is licensed under the Apache Software License, v. 2 except as noted + * otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "os" + "time" +) + +/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +// fileInfo implementing os.FileInfo +/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +type fileInfo struct { + fileData FileData + name string +} + +var _ os.FileInfo = &fileInfo{} + +func NewFileInfo(name string, file FileData) os.FileInfo { + return &fileInfo{name: name, fileData: file} +} + +var _ os.FileInfo = &fileInfo{} + +func (f *fileInfo) Name() string { + return f.name +} + +func (f *fileInfo) Mode() os.FileMode { + f.fileData.Lock() + defer f.fileData.Unlock() + return f.fileData.Mode() +} + +func (f *fileInfo) ModTime() time.Time { + f.fileData.Lock() + defer f.fileData.Unlock() + return f.fileData.ModTime() +} + +func (f *fileInfo) IsDir() bool { + f.fileData.Lock() + defer f.fileData.Unlock() + return f.fileData.IsDir() +} + +func (f *fileInfo) Sys() interface{} { return nil } + +func (f *fileInfo) Size() int64 { + f.fileData.Lock() + defer f.fileData.Unlock() + if f.fileData.IsDir() { + return int64(42) + } + return int64(len(f.fileData.Data())) +} diff --git a/vendor/github.com/mandelsoft/vfs/pkg/utils/fssupport.go b/vendor/github.com/mandelsoft/vfs/pkg/utils/fssupport.go new file mode 100644 index 0000000..ecccaca --- /dev/null +++ b/vendor/github.com/mandelsoft/vfs/pkg/utils/fssupport.go @@ -0,0 +1,315 @@ +/* + * Copyright 2020 Mandelsoft. All rights reserved. + * This file is licensed under the Apache Software License, v. 2 except as noted + * otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "errors" + "os" + "strings" + "time" + + "github.com/mandelsoft/vfs/pkg/vfs" +) + +type SupportAdapter interface { + CreateFile(perm os.FileMode) FileData + CreateDir(perm os.FileMode) FileData + CreateSymlink(oldname string, perm os.FileMode) FileData +} + +type FileSystemSupport struct { + FileSystemBase + name string + root FileData + adapter SupportAdapter +} + +func NewFSSupport(name string, root FileData, adapter SupportAdapter) vfs.FileSystem { + return &FileSystemSupport{name: name, root: root, adapter: adapter} +} + +func (m *FileSystemSupport) Name() string { + return m.name +} + +func (m *FileSystemSupport) findFile(name string, link ...bool) (FileData, string, error) { + _, _, f, n, err := m.createInfo(name, link...) + if err != nil { + return nil, n, err + } + if f == nil { + err = os.ErrNotExist + } + return f, n, err +} + +func asFileData(a FileDataDirAccess) FileData { + if a == nil { + return nil + } + return a.(FileData) +} + +func (m *FileSystemSupport) createInfo(name string, link ...bool) (FileData, string, FileData, string, error) { + d, dn, f, fn, err := EvaluatePath(m, m.root, name, link...) + return asFileData(d), dn, asFileData(f), fn, err +} + +func (m *FileSystemSupport) Create(name string) (vfs.File, error) { + parent, _, f, n, err := m.createInfo(name) + if err != nil { + return nil, err + } + if f != nil { + return nil, os.ErrExist + } + + f = m.adapter.CreateFile(os.ModePerm) + parent.Lock() + defer parent.Unlock() + err = parent.Add(n, f) + if err != nil { + return nil, err + } + return newFileHandle(n, f), nil +} + +func (m *FileSystemSupport) Mkdir(name string, perm os.FileMode) error { + parent, _, f, n, err := m.createInfo(name) + if err != nil { + return err + } + if f != nil { + return os.ErrExist + } + parent.Lock() + defer parent.Unlock() + return parent.Add(n, m.adapter.CreateDir(perm)) +} + +func (m *FileSystemSupport) MkdirAll(path string, perm os.FileMode) error { + path, err := vfs.Canonical(m, path, false) + if err != nil { + return err + } + _, elems, _ := vfs.SplitPath(m, path) + parent := m.root + for i, e := range elems { + parent.Lock() + next, err := parent.GetEntry(e) + if err != nil && err != os.ErrNotExist { + parent.Unlock() + return &os.PathError{Op: "mkdirall", Path: strings.Join(elems[:i+1], vfs.PathSeparatorString), Err: err} + } + if next == nil { + next = m.adapter.CreateDir(perm) + parent.Add(e, next.(FileData)) + } + parent.Unlock() + parent = next.(FileData) + } + return nil +} + +func (m *FileSystemSupport) Open(name string) (vfs.File, error) { + f, _, err := m.findFile(name) + if err != nil { + return nil, err + } + return newFileHandle(name, f), nil +} + +func (m *FileSystemSupport) OpenFile(name string, flags int, perm os.FileMode) (vfs.File, error) { + dir, _, f, n, err := m.createInfo(name) + if err != nil { + return nil, err + } + if f == nil { + if flags&(os.O_CREATE) == 0 { + return nil, &os.PathError{Op: "open", Path: name, Err: os.ErrNotExist} + } + f = m.adapter.CreateFile(perm) + dir.Lock() + err = dir.Add(n, f) + a, _ := dir.GetEntry(n) + if err != nil { + if !vfs.IsErrExist(err) { + dir.Unlock() + return nil, &os.PathError{Op: "open", Path: name, Err: err} + } + if flags&os.O_EXCL != 0 { + return nil, &os.PathError{Op: "open", Path: name, Err: os.ErrExist} + } + f = a.(FileData) + } + dir.Unlock() + } else { + if flags&os.O_EXCL != 0 { + return nil, &os.PathError{Op: "open", Path: name, Err: os.ErrExist} + } + } + h := newFileHandle(name, f) + + if flags&(os.O_RDONLY|os.O_WRONLY|os.O_RDWR) == os.O_RDONLY { + h.readOnly = true + } else { + if flags&os.O_APPEND != 0 { + _, err = h.Seek(0, os.SEEK_END) + } + if err == nil && flags&os.O_TRUNC > 0 && flags&(os.O_RDWR|os.O_WRONLY) > 0 { + err = h.Truncate(0) + } + if err != nil { + h.Close() + return nil, err + } + } + return h, nil +} + +func (m *FileSystemSupport) Remove(name string) error { + dir, _, f, n, err := m.createInfo(name, false) + if err != nil { + return err + } + + if f == nil { + return os.ErrNotExist + } + f.Lock() + defer f.Unlock() + if f.IsDir() { + if len(f.Files()) > 0 { + return &os.PathError{Op: "remove", Path: name, Err: ErrNotEmpty} + } + } + if n == "" { + return errors.New("cannot delete root dir") + } + dir.Lock() + defer dir.Unlock() + return dir.Del(n) +} + +func (m *FileSystemSupport) RemoveAll(name string) error { + dir, _, _, n, err := m.createInfo(name, false) + if err != nil { + return err + } + if n == "" { + return errors.New("cannot delete root dir") + } + dir.Lock() + defer dir.Unlock() + return dir.Del(n) +} + +func (m *FileSystemSupport) Rename(oldname, newname string) error { + odir, _, fo, o, err := m.createInfo(oldname, false) + if err != nil { + return err + } + if o == "" { + return errors.New("cannot rename root dir") + } + ndir, _, fn, n, err := m.createInfo(newname) + if err != nil { + return err + } + if fo == nil { + return os.ErrNotExist + } + if fn != nil { + return os.ErrExist + } + + ndir.Lock() + err = ndir.Add(n, fo) + ndir.Unlock() + if err == nil { + odir.Lock() + odir.Del(o) + odir.Unlock() + } + return err +} + +func (m *FileSystemSupport) Lstat(name string) (os.FileInfo, error) { + f, n, err := m.findFile(name, false) + if err != nil { + return nil, err + } + return NewFileInfo(n, f), nil +} + +func (m *FileSystemSupport) Stat(name string) (os.FileInfo, error) { + f, n, err := m.findFile(name) + if err != nil { + return nil, err + } + if f == nil { + return nil, os.ErrNotExist + } + return NewFileInfo(n, f), nil +} + +func (m *FileSystemSupport) Chmod(name string, mode os.FileMode) error { + f, _, err := m.findFile(name) + if err != nil { + return err + } + f.Lock() + defer f.Unlock() + f.SetMode((f.Mode() & (^os.ModePerm)) | (mode & os.ModePerm)) + return nil +} + +func (m *FileSystemSupport) Chtimes(name string, atime time.Time, mtime time.Time) error { + f, _, err := m.findFile(name) + if err != nil { + return err + } + f.Lock() + defer f.Unlock() + f.SetModTime(mtime) + return nil +} + +func (m *FileSystemSupport) Symlink(oldname, newname string) error { + parent, _, _, n, err := m.createInfo(newname) + if err != nil { + return err + } + parent.Lock() + defer parent.Unlock() + return parent.Add(n, m.adapter.CreateSymlink(oldname, os.ModePerm)) +} + +func (m *FileSystemSupport) Readlink(name string) (string, error) { + f, _, err := m.findFile(name, false) + if err != nil { + return "", err + } + f.Lock() + defer f.Unlock() + if f.IsSymlink() { + return f.GetSymlink(), nil + } + return "", &os.PathError{Op: "readlink", Path: name, Err: errors.New("no symlink")} +} diff --git a/vendor/github.com/mandelsoft/vfs/pkg/utils/mappedfs.go b/vendor/github.com/mandelsoft/vfs/pkg/utils/mappedfs.go new file mode 100644 index 0000000..6e49024 --- /dev/null +++ b/vendor/github.com/mandelsoft/vfs/pkg/utils/mappedfs.go @@ -0,0 +1,294 @@ +/* + * Copyright 2020 Mandelsoft. All rights reserved. + * This file is licensed under the Apache Software License, v. 2 except as noted + * otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "errors" + "fmt" + "os" + "strings" + "time" + + "github.com/mandelsoft/vfs/pkg/vfs" +) + +type PathMapper interface { + MapPath(path string) (vfs.FileSystem, string) +} + +type MappedFileSystem struct { + FileSystemBase + mapper PathMapper + base vfs.FileSystem +} + +func NewMappedFileSystem(root vfs.FileSystem, mapper PathMapper) *MappedFileSystem { + return &MappedFileSystem{mapper: mapper, base: root} +} + +func (m *MappedFileSystem) Base() vfs.FileSystem { + return m.base +} + +func (m *MappedFileSystem) VolumeName(name string) string { + return m.base.VolumeName(name) +} + +func (m *MappedFileSystem) FSTempDir() string { + return vfs.PathSeparatorString +} + +func (m *MappedFileSystem) Normalize(path string) string { + return m.base.Normalize(path) +} + +func (*MappedFileSystem) Getwd() (string, error) { + return vfs.PathSeparatorString, nil +} + +// isAbs reports whether the path is absolute. +func isAbs(path string) bool { + return strings.HasPrefix(path, vfs.PathSeparatorString) +} + +func (m *MappedFileSystem) mapPath(path string, link ...bool) (vfs.FileSystem, string, string, error) { + getlink := true + if len(link) > 0 { + getlink = link[0] + } + + r := vfs.PathSeparatorString + fs, l := m.mapper.MapPath(r) + links := 0 + path = fs.Normalize(path) + + for path != "" { + i := 0 + for i < len(path) && vfs.IsPathSeparator(path[i]) { + i++ + } + j := i + for j < len(path) && !vfs.IsPathSeparator(path[j]) { + j++ + } + + b := path[i:j] + path = path[j:] + + switch b { + case ".", "": + continue + case "..": + r, b = vfs.Split(m.base, r) + if r == "" { + r = "/" + } + fs, l = m.mapper.MapPath(r) + continue + } + fs, l = m.mapper.MapPath(vfs.Join(m.base, r, b)) + + fi, err := fs.Lstat(l) + if vfs.Exists_(err) { + if err != nil && !os.IsPermission(err) { + return nil, "", "", err + } + if fi.Mode()&os.ModeSymlink != 0 && (getlink || strings.Contains(path, vfs.PathSeparatorString)) { + links++ + if links > 255 { + return nil, "", "", errors.New("AbsPath: too many links") + } + newpath, err := fs.Readlink(l) + if err != nil { + return nil, "", "", err + } + newpath = fs.Normalize(newpath) + vol, newpath := vfs.SplitVolume(m.base, newpath) + if vol != "" { + return nil, "", "", fmt.Errorf("volume links not possible: %s: %s", l, vol+newpath) + } + if isAbs(newpath) { + r = "/" + } + path = vfs.Join(m.base, newpath, path) + } else { + r = vfs.Join(m.base, r, b) + } + } else { + if strings.Contains(path, vfs.PathSeparatorString) { + return nil, "", "", err + } + r = vfs.Join(m.base, r, b) + } + } + return fs, l, r, nil +} + +func (m *MappedFileSystem) Chtimes(name string, atime, mtime time.Time) (err error) { + fs, l, _, err := m.mapPath(name) + if err != nil { + return &os.PathError{Op: "chtimes", Path: name, Err: err} + } + return fs.Chtimes(l, atime, mtime) +} + +func (m *MappedFileSystem) Chmod(name string, mode os.FileMode) (err error) { + fs, l, _, err := m.mapPath(name) + if err != nil { + return &os.PathError{Op: "chmod", Path: name, Err: err} + } + return fs.Chmod(l, mode) +} + +func (m *MappedFileSystem) Stat(name string) (fi os.FileInfo, err error) { + fs, l, _, err := m.mapPath(name) + if err != nil { + return nil, &os.PathError{Op: "stat", Path: name, Err: err} + } + return fs.Stat(l) +} + +func (m *MappedFileSystem) Rename(oldname, newname string) (err error) { + oldfs, o, _, err := m.mapPath(oldname, false) + if err != nil { + return &os.PathError{Op: "rename", Path: oldname, Err: err} + } + newfs, n, _, err := m.mapPath(newname) + if err != nil { + return &os.PathError{Op: "rename", Path: newname, Err: err} + } + if oldfs == newfs { + return oldfs.Rename(o, n) + } + return fmt.Errorf("no cross filesystem rename operation possible: %s -> %s", oldname, newname) +} + +func (m *MappedFileSystem) RemoveAll(name string) (err error) { + fs, l, _, err := m.mapPath(name, false) + if err != nil { + return &os.PathError{Op: "remove_all", Path: name, Err: err} + } + return fs.RemoveAll(l) +} + +func (m *MappedFileSystem) Remove(name string) (err error) { + fs, l, _, err := m.mapPath(name, false) + if err != nil { + return &os.PathError{Op: "remove", Path: name, Err: err} + } + return fs.Remove(l) +} + +func (m *MappedFileSystem) OpenFile(name string, flag int, mode os.FileMode) (f vfs.File, err error) { + fs, l, n, err := m.mapPath(name) + if err != nil { + return nil, &os.PathError{Op: "openfile", Path: name, Err: err} + } + sourcef, err := fs.OpenFile(l, flag, mode) + if err != nil { + return nil, err + } + return NewRenamedFile(n, sourcef), nil +} + +func (m *MappedFileSystem) Open(name string) (f vfs.File, err error) { + fs, l, n, err := m.mapPath(name) + if err != nil { + return nil, &os.PathError{Op: "open", Path: name, Err: err} + } + sourcef, err := fs.Open(l) + if err != nil { + return nil, err + } + return NewRenamedFile(n, sourcef), nil +} + +func (m *MappedFileSystem) Mkdir(name string, mode os.FileMode) (err error) { + fs, l, _, err := m.mapPath(name) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + return fs.Mkdir(l, mode) +} + +func (m *MappedFileSystem) MkdirAll(name string, mode os.FileMode) (err error) { + fs, l, _, err := m.mapPath(name) + if err == nil && fs == m.base { + return fs.MkdirAll(l, mode) + } + + _, elems, _ := vfs.SplitPath(m.base, name) + + r := "" + for _, dir := range elems { + r = vfs.PathSeparatorString + dir + fs, l, r, err := m.mapPath(r) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + fi, err := fs.Stat(l) + if err == nil { + if fi.IsDir() { + continue + } + return &os.PathError{Op: "mkdir", Path: name, Err: fmt.Errorf("%s is no dir", r)} + } + err = fs.Mkdir(l, mode) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + } + return nil +} + +func (m *MappedFileSystem) Create(name string) (f vfs.File, err error) { + fs, l, n, err := m.mapPath(name) + if err != nil { + return nil, &os.PathError{Op: "create", Path: name, Err: err} + } + sourcef, err := fs.Create(l) + if err != nil { + return nil, err + } + return NewRenamedFile(n, sourcef), nil +} + +func (m *MappedFileSystem) Lstat(name string) (os.FileInfo, error) { + fs, l, _, err := m.mapPath(name, false) + if err != nil { + return nil, &os.PathError{Op: "lstat", Path: name, Err: err} + } + return fs.Lstat(l) +} + +func (m *MappedFileSystem) Symlink(oldname, newname string) error { + fs, l, _, err := m.mapPath(newname) + if err != nil { + return &os.PathError{Op: "rename", Path: newname, Err: err} + } + return fs.Symlink(oldname, l) +} + +func (m *MappedFileSystem) Readlink(name string) (string, error) { + fs, l, _, err := m.mapPath(name, false) + if err != nil { + return "", &os.PathError{Op: "readlink", Path: name, Err: err} + } + return fs.Readlink(l) +} diff --git a/vendor/github.com/mandelsoft/vfs/pkg/utils/sorter.go b/vendor/github.com/mandelsoft/vfs/pkg/utils/sorter.go new file mode 100644 index 0000000..4eba261 --- /dev/null +++ b/vendor/github.com/mandelsoft/vfs/pkg/utils/sorter.go @@ -0,0 +1,29 @@ +/* + * Copyright 2020 Mandelsoft. All rights reserved. + * This file is licensed under the Apache Software License, v. 2 except as noted + * otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "os" +) + +type FilesSorter []os.FileInfo + +func (s FilesSorter) Len() int { return len(s) } +func (s FilesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s FilesSorter) Less(i, j int) bool { return s[i].Name() < s[j].Name() } diff --git a/vendor/github.com/mandelsoft/vfs/pkg/utils/utils.go b/vendor/github.com/mandelsoft/vfs/pkg/utils/utils.go new file mode 100644 index 0000000..dbb30c1 --- /dev/null +++ b/vendor/github.com/mandelsoft/vfs/pkg/utils/utils.go @@ -0,0 +1,36 @@ +/* + * Copyright 2020 Mandelsoft. All rights reserved. + * This file is licensed under the Apache Software License, v. 2 except as noted + * otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "github.com/mandelsoft/vfs/pkg/vfs" +) + +type RenamedFile struct { + vfs.File + name string +} + +func NewRenamedFile(name string, file vfs.File) vfs.File { + return &RenamedFile{file, name} +} + +func (r *RenamedFile) Name() string { + return r.name +} diff --git a/vendor/github.com/mandelsoft/vfs/pkg/vfs/errors.go b/vendor/github.com/mandelsoft/vfs/pkg/vfs/errors.go new file mode 100644 index 0000000..f1afd84 --- /dev/null +++ b/vendor/github.com/mandelsoft/vfs/pkg/vfs/errors.go @@ -0,0 +1,82 @@ +/* + * Copyright 2020 Mandelsoft. All rights reserved. + * This file is licensed under the Apache Software License, v. 2 except as noted + * otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package vfs + +import ( + "errors" + "os" + "reflect" +) + +type ErrorMatcher func(err error) bool + +func MatchErr(err error, match ErrorMatcher, base error) bool { + for err != nil { + if base == err || (match != nil && match(err)) { + return true + } + switch nerr := err.(type) { + case interface{ Unwrap() error }: + err = nerr.Unwrap() + default: + err = nil + v := reflect.ValueOf(nerr) + if v.Kind() == reflect.Struct { + f := v.FieldByName("Err") + if f.IsValid() { + err, _ = f.Interface().(error) + } + } + } + } + return false +} + +func IsErrNotDir(err error) bool { + return MatchErr(err, isUnderlyingErrNotDir, ErrNotDir) +} + +func IsErrNotExist(err error) bool { + if os.IsNotExist(err) { + return true + } + return MatchErr(err, os.IsNotExist, ErrNotExist) +} + +func IsErrExist(err error) bool { + if os.IsExist(err) { + return true + } + return MatchErr(err, os.IsExist, ErrExist) +} + +func IsErrReadOnly(err error) bool { + return MatchErr(err, nil, ErrReadOnly) +} + +func NewPathError(op string, path string, err error) error { + return &os.PathError{Op: op, Path: path, Err: err} +} + +var ErrNotDir = errors.New("is no directory") +var ErrNotExist = os.ErrNotExist +var ErrExist = os.ErrExist + +var ErrReadOnly = errors.New("filehandle is not writable") +var ErrNotEmpty = errors.New("dir not empty") diff --git a/vendor/github.com/mandelsoft/vfs/pkg/vfs/errors_unix.go b/vendor/github.com/mandelsoft/vfs/pkg/vfs/errors_unix.go new file mode 100644 index 0000000..6d37e06 --- /dev/null +++ b/vendor/github.com/mandelsoft/vfs/pkg/vfs/errors_unix.go @@ -0,0 +1,27 @@ +/* + * Copyright 2020 Mandelsoft. All rights reserved. + * This file is licensed under the Apache Software License, v. 2 except as noted + * otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package vfs + +import ( + "syscall" +) + +func isUnderlyingErrNotDir(err error) bool { + return err == syscall.ENOTDIR +} diff --git a/vendor/github.com/mandelsoft/vfs/pkg/vfs/errors_windows.go b/vendor/github.com/mandelsoft/vfs/pkg/vfs/errors_windows.go new file mode 100644 index 0000000..19151cb --- /dev/null +++ b/vendor/github.com/mandelsoft/vfs/pkg/vfs/errors_windows.go @@ -0,0 +1,25 @@ +/* + * Copyright 2020 Mandelsoft. All rights reserved. + * This file is licensed under the Apache Software License, v. 2 except as noted + * otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package vfs + +// TODO + +func isUnderlyingErrNotDir(err error) bool { + return false +} diff --git a/vendor/github.com/mandelsoft/vfs/pkg/vfs/eval.go b/vendor/github.com/mandelsoft/vfs/pkg/vfs/eval.go new file mode 100644 index 0000000..6341c34 --- /dev/null +++ b/vendor/github.com/mandelsoft/vfs/pkg/vfs/eval.go @@ -0,0 +1,114 @@ +/* + * Copyright 2020 Mandelsoft. All rights reserved. + * This file is licensed under the Apache Software License, v. 2 except as noted + * otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package vfs + +import ( + "os" +) + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +func evalPath(fs FileSystem, path string, exist bool, link ...bool) (string, error) { + var parsed string + var dir bool + + vol, elems, rooted := SplitPath(fs, path) + getlink := true + if len(link) > 0 { + getlink = link[0] + } +outer: + for { + parsed = "" + dir = true + + for i := 0; i < len(elems); i++ { + e := elems[i] + next := e + if len(parsed) > 0 { + next = parsed + PathSeparatorString + e + } + switch e { + case ".": + if !dir { + return "", ErrNotDir + } + continue + case "..": + if !dir { + return "", ErrNotDir + } + base := Base(nil, parsed) + if parsed == "" || base == ".." { + if !rooted { + parsed = next + } + } else { + parsed = Dir(nil, parsed) + if parsed == "." { + parsed = "" + } + } + continue + } + p := next + if rooted { + p = PathSeparatorString + next + } + fi, err := fs.Lstat(p) + if err != nil { + if os.IsPermission(err) { + return "", err + } + if exist || !IsErrNotExist(err) { + return "", NewPathError("", p, err) + } + dir = true + parsed = next + } else { + if fi.Mode()&os.ModeType != os.ModeSymlink || (!getlink && i == len(elems)-1) { + dir = fi.IsDir() + parsed = next + continue + } + link, err := fs.Readlink(p) + if err != nil { + return "", NewPathError("", next, err) + } + v, nested, r := SplitPath(fs, link) + if r { + elems = append(nested, elems[i+1:]...) + vol = v + rooted = r + continue outer + } + elems = append(elems[:i], append(nested, elems[i+1:]...)...) + i-- + } + } + break + } + if rooted { + return vol + PathSeparatorString + parsed, nil + } + if len(parsed) == 0 { + parsed = "." + } + return vol + parsed, nil +} diff --git a/vendor/github.com/mandelsoft/vfs/pkg/vfs/interface.go b/vendor/github.com/mandelsoft/vfs/pkg/vfs/interface.go new file mode 100644 index 0000000..65cad7e --- /dev/null +++ b/vendor/github.com/mandelsoft/vfs/pkg/vfs/interface.go @@ -0,0 +1,174 @@ +/* + * Copyright 2020 Mandelsoft. All rights reserved. + * This file is licensed under the Apache Software License, v. 2 except as noted + * otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package vfs + +import ( + "io" + "os" + "time" +) + +const PathSeparatorChar = '/' +const PathSeparatorString = "/" + +type FileSystem interface { + + // VolumeName returns leading volume name. + // Given "C:\foo\bar" it returns "C:" on Windows. + // Given "\\host\share\foo" it returns "\\host\share". + // On other platforms it returns "". + VolumeName(name string) string + + // FSTempDir (similar to os.TempDir) provides + // the dir to use fortemporary files for this filesystem + FSTempDir() string + + // Normalize returns a path in the normalized vfs path syntax + Normalize(name string) string + + // Create creates a file in the filesystem, returning the file and an + // error, if any happens. + Create(name string) (File, error) + + // Mkdir creates a directory in the filesystem, return an error if any + // happens. + Mkdir(name string, perm os.FileMode) error + + // MkdirAll creates a directory path and all parents that does not exist + // yet. + MkdirAll(path string, perm os.FileMode) error + + // Open opens a file, returning it or an error, if any happens. + Open(name string) (File, error) + + // OpenFile opens a file using the given flags and the given mode. + OpenFile(name string, flags int, perm os.FileMode) (File, error) + + // Remove removes a file identified by name, returning an error, if any + // happens. + Remove(name string) error + + // RemoveAll removes a directory path and any children it contains. It + // does not fail if the path does not exist (return nil). + RemoveAll(path string) error + + // Rename renames a file. + Rename(oldname, newname string) error + + // Stat returns a FileInfo describing the named file, or an error, if any + // happens. + Stat(name string) (os.FileInfo, error) + + // Lstat returns a FileInfo describing the named file, or an error, if any + // happens. + // If the file is a symbolic link, the returned FileInfo + // describes the symbolic link. Lstat makes no attempt to follow the link. + Lstat(name string) (os.FileInfo, error) + + // Create a symlink if supported + Symlink(oldname, newname string) error + + // Read a symlink if supported + Readlink(name string) (string, error) + + // Name returns the spec of this FileSystem + Name() string + + // Chmod changes the mode of the named file to mode. + Chmod(name string, mode os.FileMode) error + + // Chtimes changes the access and modification times of the named file + Chtimes(name string, atime time.Time, mtime time.Time) error + + // Getwd return the absolute path of the working directory of the + // file system + Getwd() (string, error) +} + +type FileSystemWithWorkingDirectory interface { + FileSystem + Chdir(path string) error +} + +type FileSystemCleanup interface { + FileSystem + + // Cleanup should remove all temporary resources allocated + // for this file system + Cleanup() error +} + +type File interface { + io.Closer + io.Reader + io.ReaderAt + io.Seeker + io.Writer + io.WriterAt + + Name() string + Readdir(count int) ([]os.FileInfo, error) + Readdirnames(n int) ([]string, error) + Stat() (os.FileInfo, error) + Sync() error + Truncate(size int64) error + WriteString(s string) (ret int, err error) +} + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +type VFS interface { + FileSystem + + Join(elems ...string) string + Split(path string) (string, string) + Base(path string) string + Dir(path string) string + Clean(path string) string + Trim(path string) string + IsAbs(path string) bool + IsRoot(path string) bool + SplitVolume(path string) (string, string) + SplitPath(path string) (vol string, elems []string, rooted bool) + + Canonical(path string, exist bool) (string, error) + Abs(path string) (string, error) + EvalSymlinks(path string) (string, error) + Walk(path string, fn WalkFunc) error + + Exists(path string) (bool, error) + DirExists(path string) (bool, error) + IsDir(path string) (bool, error) + IsFile(path string) (bool, error) + + ReadDir(path string) ([]os.FileInfo, error) + ReadFile(path string) ([]byte, error) + WriteFile(path string, data []byte, mode os.FileMode) error + TempFile(dir, prefix string) (File, error) + TempDir(dir, prefix string) (string, error) +} + +func Cleanup(fs FileSystem) error { + if fs != nil { + if c, ok := fs.(FileSystemCleanup); ok { + return c.Cleanup() + } + } + return nil +} diff --git a/vendor/github.com/mandelsoft/vfs/pkg/vfs/tempfile.go b/vendor/github.com/mandelsoft/vfs/pkg/vfs/tempfile.go new file mode 100644 index 0000000..92d9633 --- /dev/null +++ b/vendor/github.com/mandelsoft/vfs/pkg/vfs/tempfile.go @@ -0,0 +1,117 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// modified for usage of virtual filesystem + +package vfs + +import ( + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/mandelsoft/filepath/pkg/filepath" +) + +// Random number state. +// We generate random temporary file names so that there's a good +// chance the file doesn't exist yet - keeps the number of tries in +// TempFile to a minimum. +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} + +func nextRandom() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} + +// TempFile creates a new temporary file in the directory dir, +// opens the file for reading and writing, and returns the resulting *os.File. +// The filename is generated by taking pattern and adding a random +// string to the end. If pattern includes a "*", the random string +// replaces the last "*". +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func TempFile(fs FileSystem, dir, pattern string) (f File, err error) { + if dir == "" { + dir = fs.FSTempDir() + } + + var prefix, suffix string + if pos := strings.LastIndex(pattern, "*"); pos != -1 { + prefix, suffix = pattern[:pos], pattern[pos+1:] + } else { + prefix = pattern + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+nextRandom()+suffix) + f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if IsErrExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + break + } + return +} + +// TempDir creates a new temporary directory in the directory dir +// with a name beginning with prefix and returns the path of the +// new directory. If dir is the empty string, TempDir uses the +// default directory for temporary files (see os.TempDir). +// Multiple programs calling TempDir simultaneously +// will not choose the same directory. It is the caller's responsibility +// to remove the directory when no longer needed. +func TempDir(fs FileSystem, dir, prefix string) (name string, err error) { + if dir == "" { + dir = fs.FSTempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + try := Join(fs, dir, prefix+nextRandom()) + err = fs.Mkdir(try, 0700) + if IsErrExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + if IsErrNotExist(err) { + if _, err := fs.Stat(dir); IsErrNotExist(err) { + return "", err + } + } + if err == nil { + name = try + } + break + } + return +} diff --git a/vendor/github.com/mandelsoft/vfs/pkg/vfs/utils.go b/vendor/github.com/mandelsoft/vfs/pkg/vfs/utils.go new file mode 100644 index 0000000..7513647 --- /dev/null +++ b/vendor/github.com/mandelsoft/vfs/pkg/vfs/utils.go @@ -0,0 +1,513 @@ +/* + * Copyright 2020 Mandelsoft. All rights reserved. + * This file is licensed under the Apache Software License, v. 2 except as noted + * otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package vfs + +import ( + "bytes" + "errors" + "io" + "os" + "path" + "sort" + "strings" +) + +func filesys(fs ...FileSystem) FileSystem { + if len(fs) == 0 { + return nil + } + return fs[0] +} + +// IsPathSeparator reports whether c is a directory separator character. +func IsPathSeparator(c uint8) bool { + return PathSeparatorChar == c +} + +// Join joins any number of path elements into a single path, adding +// a Separator if necessary. Join never calls Clean on the result to +// assure the result denotes the same file as the input. +// Empty entries will be ignored. +// If a FileSystem is given, the file systems volume. +// handling is applied, otherwise the path argument +// is handled as a regular plain path +func Join(fs FileSystem, elems ...string) string { + for i := 0; i < len(elems); i++ { + if elems[i] == "" { + elems = append(elems[:i], elems[i+1:]...) + } + } + return Trim(fs, strings.Join(elems, PathSeparatorString)) +} + +// Clean returns the shortest path name equivalent to path +// by purely lexical processing. It applies the following rules +// iteratively until no further processing can be done: +// +// 1. Replace multiple path separators with a single one. +// 2. Eliminate each . path name element (the current directory). +// 3. Eliminate each inner .. path name element (the parent directory) +// along with the non-.. element that precedes it. +// 4. Eliminate .. elements that begin a rooted path: +// that is, replace "/.." by "/" at the beginning of a path. +// +// The returned path ends in a slash only if it is the root "/". +// +// If the result of this process is an empty string, Clean +// returns the string ".". +// If a FileSystem is given, the file systems volume. +// handling is applied, otherwise the path argument +// is handled as a regular plain path +func Clean(fs FileSystem, p string) string { + vol := "" + if fs != nil { + p = fs.Normalize(p) + vol = fs.VolumeName(p) + } + return vol + path.Clean(p[len(vol):]) +} + +// Dir returns the path's directory dropping the final element +// after removing trailing Separators, Dir does not call Clean on the path. +// If the path is empty, Dir returns "." or "/" for a rooted path. +// If the path consists entirely of Separators, Dir2 returns a single Separator. +// The returned path does not end in a Separator unless it is the root directory. +// This function is the counterpart of Base +// Base("a/b/")="b" and Dir("a/b/") = "a". +// If a FileSystem is given, the file systems volume. +// handling is applied, otherwise the path argument +// is handled as a regular plain path +func Dir(fs FileSystem, path string) string { + def := "." + vol := "" + if fs != nil { + vol, path = SplitVolume(fs, path) + } + i := len(path) - 1 + for i > 0 && IsPathSeparator(path[i]) { + i-- + } + for i >= 0 && !IsPathSeparator(path[i]) { + i-- + } + for i > 0 && IsPathSeparator(path[i]) { + def = PathSeparatorString + i-- + } + path = path[0 : i+1] + if path == "" { + path = def + } + return vol + path +} + +// Base extracts the last path component. +// For the root path it returns the root name, +// For an empty path . is returned +// If a FileSystem is given, the file systems volume. +// handling is applied, otherwise the path argument +// is handled as a regular plain path +func Base(fs FileSystem, path string) string { + if fs != nil { + _, path = SplitVolume(fs, path) + } + i := len(path) - 1 + for i > 0 && IsPathSeparator(path[i]) { + i-- + } + j := i + for j >= 0 && !IsPathSeparator(path[j]) { + j-- + } + path = path[j+1 : i+1] + if path == "" { + if j == 0 { + return PathSeparatorString + } + return "." + } + return path +} + +// Trim eliminates trailing slashes from a path name. +// An empty path is unchanged. +// If a FileSystem is given, the file systems volume +// handling is applied, otherwise the path argument +// is handled as a regular plain path +func Trim(fs FileSystem, path string) string { + vol := "" + if fs != nil { + path = fs.Normalize(path) + vol = fs.VolumeName(path) + } + i := len(path) - 1 + for i > len(vol) && IsPathSeparator(path[i]) { + i-- + } + k := i + 1 + path = path[:k] + for i >= len(vol) { + j := i + for j >= len(vol) && IsPathSeparator(path[j]) { + j-- + } + if i != j { + if path[i+1:k] == "." { + if j < len(vol) && k == len(path) { + j++ // keep starting separator instead of trailing one, because this does not exist + } + i = k + } + path = path[:j+1] + path[i:] + i = j + k = i + 1 + } + i-- + } + if k < len(path) && path[len(vol):k] == "." { + path = path[:len(vol)] + path[k+1:] + } + + return path +} + +// IsAbs return true if the given path is an absolute one +// starting with a Separator or is quailified by a volume name. +func IsAbs(fs FileSystem, path string) bool { + _, path = SplitVolume(fs, path) + return strings.HasPrefix(path, PathSeparatorString) +} + +// IsRoot determines whether a given path is a root path. +// This might be the separator or the separator preceded by +// a volume name. +func IsRoot(fs FileSystem, path string) bool { + _, path = SplitVolume(fs, path) + return path == PathSeparatorString +} + +func SplitVolume(fs FileSystem, path string) (string, string) { + path = fs.Normalize(path) + vol := fs.VolumeName(path) + return vol, path[len(vol):] +} + +// Canonical returns the canonical absolute path of a file. +// If exist=false the denoted file must not exist, but +// then the part of the initial path referring to a not existing +// directory structure is lexically resolved (like Clean) and +// does not consider potential symbolic links that might occur +// if the file is finally created in the future. +func Canonical(fs FileSystem, path string, exist bool) (string, error) { + if !IsAbs(fs, path) { + wd, err := fs.Getwd() + if err != nil { + return "", err + } + path = Join(fs, wd, path) + } + return evalPath(fs, path, exist) +} + +// EvalSymlinks resolves all symbolic links in a path +// and returns a path not containing any symbolic link +// anymore. It does not call Clean on a non-canonical path, +// so the result always denotes the same file than the original path. +// If the given path is a relative one, a +// relative one is returned as long as there is no +// absolute symbolic link. It may contain `..`, if it is above +// the current working directory +func EvalSymlinks(fs FileSystem, path string) (string, error) { + return evalPath(fs, path, false) +} + +// Abs returns an absolute representation of path. +// If the path is not absolute it will be joined with the current +// working directory to turn it into an absolute path. The absolute +// path name for a given file is not guaranteed to be unique. +// Symbolic links in the given path will be resolved, but not in +// the current working directory, if used to make the path absolute. +// The denoted file may not exist. +// Abs never calls Clean on the result, so the resulting path +// will denote the same file as the argument. +func Abs(fs FileSystem, path string) (string, error) { + path, err := evalPath(fs, path, false) + if err != nil { + return "", err + } + if IsAbs(fs, path) { + return path, nil + } + p, err := fs.Getwd() + if err != nil { + return "", err + } + return Join(fs, p, path), nil +} + +// Split splits path immediately following the final Separator, +// separating it into a directory and file name component. +// If there is no Separator in path, Split returns an empty dir +// and file set to path. In contrast to filepath.Split the directory +// path does not end with a trailing Separator, so Split can +// subsequently called for the directory part, again. +func Split(fs FileSystem, path string) (dir, file string) { + path = fs.Normalize(path) + vol := fs.VolumeName(path) + i := len(path) - 1 + for i >= len(vol) && !IsPathSeparator(path[i]) { + i-- + } + j := i + for j > len(vol) && IsPathSeparator(path[j]) { + j-- + } + return path[:j+1], path[i+1:] +} + +// SplitPath splits a path into a volume and an array of the path segments +func SplitPath(fs FileSystem, path string) (string, []string, bool) { + vol, path := SplitVolume(fs, path) + rest := path + elems := []string{} + for rest != "" { + i := 0 + for i < len(rest) && IsPathSeparator(rest[i]) { + i++ + } + j := i + for j < len(rest) && !IsPathSeparator(rest[j]) { + j++ + } + b := rest[i:j] + rest = rest[j:] + if b == "." || b == "" { + continue + } + elems = append(elems, b) + } + return vol, elems, strings.HasPrefix(path, PathSeparatorString) +} + +func Exists_(err error) bool { + return err == nil || !os.IsNotExist(err) +} + +// Exists checks if a file or directory exists. +func Exists(fs FileSystem, path string) (bool, error) { + _, err := fs.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +// DirExists checks if a path exists and is a directory. +func DirExists(fs FileSystem, path string) (bool, error) { + fi, err := fs.Stat(path) + if err == nil && fi.IsDir() { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +// FileExists checks if a path exists and is a regular file. +func FileExists(fs FileSystem, path string) (bool, error) { + fi, err := fs.Stat(path) + if err == nil && fi.Mode()&os.ModeType == 0 { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +// IsDir checks if a given path is a directory. +func IsDir(fs FileSystem, path string) (bool, error) { + fi, err := fs.Stat(path) + if err != nil { + return false, err + } + return fi.IsDir(), nil +} + +// IsFile checks if a given path is a file. +func IsFile(fs FileSystem, path string) (bool, error) { + fi, err := fs.Stat(path) + if err != nil { + return false, err + } + return fi.Mode()&os.ModeType == 0, nil +} + +func CopyFile(srcfs FileSystem, src string, dstfs FileSystem, dst string) error { + + fi, err := srcfs.Lstat(src) + if err != nil { + return err + } + if !fi.Mode().IsRegular() { + return errors.New("no regular file") + } + s, err := srcfs.Open(src) + if err != nil { + return err + } + defer s.Close() + + d, err := dstfs.OpenFile(dst, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, fi.Mode()&os.ModePerm) + if err != nil { + return err + } + defer d.Close() + + _, err = io.Copy(d, s) + if err != nil { + return err + } + return dstfs.Chmod(dst, fi.Mode()) +} + +// CopyDir recursively copies a directory tree, attempting to preserve permissions. +// Source directory must exist, destination directory may exist. +// Symlinks are ignored and skipped. +func CopyDir(srcfs FileSystem, src string, dstfs FileSystem, dst string) error { + src = Trim(srcfs, src) + dst = Trim(dstfs, dst) + + si, err := srcfs.Stat(src) + if err != nil { + return err + } + if !si.IsDir() { + return NewPathError("CopyDir", src, ErrNotDir) + } + + di, err := dstfs.Stat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil && !di.IsDir() { + return NewPathError("CopyDir", dst, ErrNotDir) + } + + err = dstfs.MkdirAll(dst, si.Mode()) + if err != nil { + return err + } + + entries, err := ReadDir(srcfs, src) + if err != nil { + return err + } + + for _, entry := range entries { + srcPath := Join(srcfs, src, entry.Name()) + dstPath := Join(dstfs, dst, entry.Name()) + + if entry.IsDir() { + err = CopyDir(srcfs, srcPath, dstfs, dstPath) + } else { + // Skip symlinks. + if entry.Mode()&os.ModeSymlink != 0 { + var old string + old, err = srcfs.Readlink(srcPath) + if err == nil { + err = dstfs.Symlink(old, dstPath) + } + if err == nil { + err = os.Chmod(dst, entry.Mode()) + } + } else { + err = CopyFile(srcfs, srcPath, dstfs, dstPath) + } + } + if err != nil { + return err + } + } + return nil +} + +func Touch(fs FileSystem, path string, perm os.FileMode) error { + file, err := fs.OpenFile(path, os.O_CREATE, perm&os.ModePerm) + if err != nil { + return err + } + file.Close() + return nil +} + +// ReadFile reads the file named by filename and returns the contents. +// A successful call returns err == nil, not err == EOF. Because ReadFile +// reads the whole file, it does not treat an EOF from Read as an error +// to be reported. +func ReadFile(fs FileSystem, path string) ([]byte, error) { + file, err := fs.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + var buf bytes.Buffer + if _, err := io.Copy(&buf, file); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// WriteFile writes data to a file named by filename. +// If the file does not exist, WriteFile creates it with permissions perm +// (before umask); otherwise WriteFile truncates it before writing. +func WriteFile(fs FileSystem, filename string, data []byte, mode os.FileMode) error { + f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +// ReadDir reads the directory named by path and returns +// a list of directory entries sorted by filename. +func ReadDir(fs FileSystem, path string) ([]os.FileInfo, error) { + f, err := fs.Open(path) + if err != nil { + return nil, err + } + list, err := f.Readdir(-1) + f.Close() + if err != nil { + return nil, err + } + sort.Slice(list, func(i, j int) bool { return list[i].Name() < list[j].Name() }) + return list, nil +} diff --git a/vendor/github.com/mandelsoft/vfs/pkg/vfs/vfs.go b/vendor/github.com/mandelsoft/vfs/pkg/vfs/vfs.go new file mode 100644 index 0000000..40543b0 --- /dev/null +++ b/vendor/github.com/mandelsoft/vfs/pkg/vfs/vfs.go @@ -0,0 +1,130 @@ +/* + * Copyright 2020 Mandelsoft. All rights reserved. + * This file is licensed under the Apache Software License, v. 2 except as noted + * otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package vfs + +import ( + "os" +) + +type vfs struct { + FileSystem +} + +func New(fs FileSystem) VFS { + if v, ok := fs.(VFS); ok { + return v + } + return &vfs{fs} +} + +func (fs *vfs) Join(elems ...string) string { + return Join(fs, elems...) +} + +func (fs *vfs) Split(path string) (string, string) { + return Split(fs, path) +} + +func (fs *vfs) Base(path string) string { + return Base(fs, path) +} + +func (fs *vfs) Dir(path string) string { + return Dir(fs, path) +} + +func (fs *vfs) Clean(path string) string { + return Clean(fs, path) +} + +func (fs *vfs) Trim(path string) string { + return Trim(fs, path) +} + +func (fs *vfs) IsAbs(path string) bool { + return IsAbs(fs, path) +} + +func (fs *vfs) IsRoot(path string) bool { + return IsRoot(fs, path) +} + +func (fs *vfs) SplitVolume(path string) (string, string) { + return SplitVolume(fs, path) +} + +func (fs *vfs) SplitPath(path string) (vol string, elems []string, rooted bool) { + return SplitPath(fs, path) +} + +func (fs *vfs) Canonical(path string, exist bool) (string, error) { + return Canonical(fs, path, exist) +} + +func (fs *vfs) Abs(path string) (string, error) { + return Abs(fs, path) +} + +func (fs *vfs) EvalSymlinks(path string) (string, error) { + return EvalSymlinks(fs, path) +} + +func (fs *vfs) Walk(path string, fn WalkFunc) error { + return Walk(fs, path, fn) +} + +func (fs *vfs) Exists(path string) (bool, error) { + return Exists(fs, path) +} + +func (fs *vfs) DirExists(path string) (bool, error) { + return DirExists(fs, path) +} + +func (fs *vfs) FileExists(path string) (bool, error) { + return FileExists(fs, path) +} + +func (fs *vfs) IsDir(path string) (bool, error) { + return IsDir(fs, path) +} + +func (fs *vfs) IsFile(path string) (bool, error) { + return IsFile(fs, path) +} + +func (fs *vfs) ReadFile(path string) ([]byte, error) { + return ReadFile(fs, path) +} + +func (fs *vfs) WriteFile(path string, data []byte, mode os.FileMode) error { + return WriteFile(fs, path, data, mode) +} + +func (fs *vfs) ReadDir(path string) ([]os.FileInfo, error) { + return ReadDir(fs, path) +} + +func (fs *vfs) TempFile(dir, prefix string) (File, error) { + return TempFile(fs, dir, prefix) +} + +func (fs *vfs) TempDir(dir, prefix string) (string, error) { + return TempDir(fs, dir, prefix) +} diff --git a/vendor/github.com/mandelsoft/vfs/pkg/vfs/walk.go b/vendor/github.com/mandelsoft/vfs/pkg/vfs/walk.go new file mode 100644 index 0000000..491c3ac --- /dev/null +++ b/vendor/github.com/mandelsoft/vfs/pkg/vfs/walk.go @@ -0,0 +1,88 @@ +/* + * Copyright 2020 Mandelsoft. All rights reserved. + * This file is licensed under the Apache Software License, v. 2 except as noted + * otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package vfs + +import ( + "os" + fp "path/filepath" + "sort" + + "github.com/mandelsoft/filepath/pkg/filepath" +) + +// readDirNames reads the directory named by dirname and returns +// a sorted list of directory entries. +// adapted from https://golang.org/src/path/filepath/path.go +func readDirNames(fs FileSystem, dirname string) ([]string, error) { + f, err := fs.Open(dirname) + if err != nil { + return nil, err + } + names, err := f.Readdirnames(-1) + f.Close() + if err != nil { + return nil, err + } + sort.Strings(names) + return names, nil +} + +// adapted from https://golang.org/src/path/filepath/path.go +func walkFS(fs FileSystem, path string, info os.FileInfo, err error, walkFn WalkFunc) error { + err = walkFn(path, info, err) + if err != nil { + return err + } + + if info == nil || !info.IsDir() { + return nil + } + + names, err := readDirNames(fs, path) + if err != nil { + err := walkFn(path, info, err) + if err == fp.SkipDir { + return nil + } + return err + } + + for _, name := range names { + filename := Join(fs, path, name) + fileInfo, err := fs.Lstat(filename) + + err = walkFS(fs, filename, fileInfo, err, walkFn) + if err != nil { + if err == SkipDir { + return nil + } + return err + } + } + return nil +} + +type WalkFunc = filepath.WalkFunc + +var SkipDir = filepath.SkipDir + +func Walk(fs FileSystem, root string, walkFn WalkFunc) error { + info, err := fs.Lstat(root) + return walkFS(fs, root, info, err, walkFn) +} diff --git a/vendor/github.com/mandelsoft/vfs/test/pkg b/vendor/github.com/mandelsoft/vfs/test/pkg new file mode 120000 index 0000000..4394270 --- /dev/null +++ b/vendor/github.com/mandelsoft/vfs/test/pkg @@ -0,0 +1 @@ +../pkg \ No newline at end of file diff --git a/vendor/github.com/mandelsoft/vfs/test/sub/d/sub b/vendor/github.com/mandelsoft/vfs/test/sub/d/sub new file mode 120000 index 0000000..b1f01da --- /dev/null +++ b/vendor/github.com/mandelsoft/vfs/test/sub/d/sub @@ -0,0 +1 @@ +../../sub \ No newline at end of file diff --git a/vendor/github.com/nxadm/tail/.gitignore b/vendor/github.com/nxadm/tail/.gitignore new file mode 100644 index 0000000..fa81aa9 --- /dev/null +++ b/vendor/github.com/nxadm/tail/.gitignore @@ -0,0 +1,2 @@ +.idea/ +.test/ \ No newline at end of file diff --git a/vendor/github.com/nxadm/tail/.travis.yml b/vendor/github.com/nxadm/tail/.travis.yml new file mode 100644 index 0000000..95dd3bd --- /dev/null +++ b/vendor/github.com/nxadm/tail/.travis.yml @@ -0,0 +1,16 @@ +language: go + +script: + - go test -race -v ./... + +go: + - "1.9" + - "1.10" + - "1.11" + - "1.12" + - "1.13" + - tip + +matrix: + allow_failures: + - go: tip diff --git a/vendor/github.com/nxadm/tail/CHANGES.md b/vendor/github.com/nxadm/tail/CHANGES.md new file mode 100644 index 0000000..ef1b5fb --- /dev/null +++ b/vendor/github.com/nxadm/tail/CHANGES.md @@ -0,0 +1,46 @@ +# Version v1.4.4 + +* Fix of checksum problem because of forced tag. No changes to the code. + +# Version v1.4.1 + +* Incorporated PR 162 by by Mohammed902: "Simplify non-Windows build tag". + +# Version v1.4.0 + +* Incorporated PR 9 by mschneider82: "Added seekinfo to Tail". + +# Version v1.3.1 + +* Incorporated PR 7: "Fix deadlock when stopping on non-empty file/buffer", +fixes upstream issue 93. + + +# Version v1.3.0 + +* Incorporated changes of unmerged upstream PR 149 by mezzi: "added line num +to Line struct". + +# Version v1.2.1 + +* Incorporated changes of unmerged upstream PR 128 by jadekler: "Compile-able +code in readme". +* Incorporated changes of unmerged upstream PR 130 by fgeller: "small change +to comment wording". +* Incorporated changes of unmerged upstream PR 133 by sm3142: "removed +spurious newlines from log messages". + +# Version v1.2.0 + +* Incorporated changes of unmerged upstream PR 126 by Code-Hex: "Solved the + problem for never return the last line if it's not followed by a newline". +* Incorporated changes of unmerged upstream PR 131 by StoicPerlman: "Remove +deprecated os.SEEK consts". The changes bumped the minimal supported Go +release to 1.9. + +# Version v1.1.0 + +* migration to go modules. +* release of master branch of the dormant upstream, because it contains +fixes and improvement no present in the tagged release. + diff --git a/vendor/github.com/nxadm/tail/Dockerfile b/vendor/github.com/nxadm/tail/Dockerfile new file mode 100644 index 0000000..d963389 --- /dev/null +++ b/vendor/github.com/nxadm/tail/Dockerfile @@ -0,0 +1,19 @@ +FROM golang + +RUN mkdir -p $GOPATH/src/github.com/nxadm/tail/ +ADD . $GOPATH/src/github.com/nxadm/tail/ + +# expecting to fetch dependencies successfully. +RUN go get -v github.com/nxadm/tail + +# expecting to run the test successfully. +RUN go test -v github.com/nxadm/tail + +# expecting to install successfully +RUN go install -v github.com/nxadm/tail +RUN go install -v github.com/nxadm/tail/cmd/gotail + +RUN $GOPATH/bin/gotail -h || true + +ENV PATH $GOPATH/bin:$PATH +CMD ["gotail"] diff --git a/vendor/github.com/nxadm/tail/LICENSE b/vendor/github.com/nxadm/tail/LICENSE new file mode 100644 index 0000000..818d802 --- /dev/null +++ b/vendor/github.com/nxadm/tail/LICENSE @@ -0,0 +1,21 @@ +# The MIT License (MIT) + +# © Copyright 2015 Hewlett Packard Enterprise Development LP +Copyright (c) 2014 ActiveState + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/nxadm/tail/README.md b/vendor/github.com/nxadm/tail/README.md new file mode 100644 index 0000000..dbb6c17 --- /dev/null +++ b/vendor/github.com/nxadm/tail/README.md @@ -0,0 +1,36 @@ +[![Build Status](https://travis-ci.org/nxadm/tail.svg?branch=master)](https://travis-ci.org/nxadm/tail) + +This is repo is forked from the dormant upstream repo at +[hpcloud](https://github.com/hpcloud/tail). This fork adds support for go +modules, updates the dependencies, adds features and fixes bugs. Go 1.9 is +the oldest compiler release supported. + +# Go package for tail-ing files + +A Go package striving to emulate the features of the BSD `tail` program. + +```Go +t, err := tail.TailFile("/var/log/nginx.log", tail.Config{Follow: true}) +if err != nil { + panic(err) +} + +for line := range t.Lines { + fmt.Println(line.Text) +} +``` + +See [API documentation](http://godoc.org/github.com/nxadm/tail). + +## Log rotation + +Tail comes with full support for truncation/move detection as it is +designed to work with log rotation tools. + +## Installing + + go get github.com/nxadm/tail/... + +## Windows support + +This package [needs assistance](https://github.com/nxadm/tail/labels/Windows) for full Windows support. diff --git a/vendor/github.com/nxadm/tail/appveyor.yml b/vendor/github.com/nxadm/tail/appveyor.yml new file mode 100644 index 0000000..e149bc6 --- /dev/null +++ b/vendor/github.com/nxadm/tail/appveyor.yml @@ -0,0 +1,11 @@ +version: 0.{build} +skip_tags: true +cache: C:\Users\appveyor\AppData\Local\NuGet\Cache +build_script: +- SET GOPATH=c:\workspace +- go test -v -race ./... +test: off +clone_folder: c:\workspace\src\github.com\nxadm\tail +branches: + only: + - master diff --git a/vendor/github.com/nxadm/tail/go.mod b/vendor/github.com/nxadm/tail/go.mod new file mode 100644 index 0000000..fb10d42 --- /dev/null +++ b/vendor/github.com/nxadm/tail/go.mod @@ -0,0 +1,9 @@ +module github.com/nxadm/tail + +go 1.13 + +require ( + github.com/fsnotify/fsnotify v1.4.7 + golang.org/x/sys v0.0.0-20190904154756-749cb33beabd // indirect + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 +) diff --git a/vendor/github.com/nxadm/tail/go.sum b/vendor/github.com/nxadm/tail/go.sum new file mode 100644 index 0000000..b391f19 --- /dev/null +++ b/vendor/github.com/nxadm/tail/go.sum @@ -0,0 +1,6 @@ +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd h1:DBH9mDw0zluJT/R+nGuV3jWFWLFaHyYZWD4tOT+cjn0= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= diff --git a/vendor/github.com/nxadm/tail/ratelimiter/Licence b/vendor/github.com/nxadm/tail/ratelimiter/Licence new file mode 100644 index 0000000..434aab1 --- /dev/null +++ b/vendor/github.com/nxadm/tail/ratelimiter/Licence @@ -0,0 +1,7 @@ +Copyright (C) 2013 99designs + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/nxadm/tail/ratelimiter/leakybucket.go b/vendor/github.com/nxadm/tail/ratelimiter/leakybucket.go new file mode 100644 index 0000000..358b69e --- /dev/null +++ b/vendor/github.com/nxadm/tail/ratelimiter/leakybucket.go @@ -0,0 +1,97 @@ +// Package ratelimiter implements the Leaky Bucket ratelimiting algorithm with memcached and in-memory backends. +package ratelimiter + +import ( + "time" +) + +type LeakyBucket struct { + Size uint16 + Fill float64 + LeakInterval time.Duration // time.Duration for 1 unit of size to leak + Lastupdate time.Time + Now func() time.Time +} + +func NewLeakyBucket(size uint16, leakInterval time.Duration) *LeakyBucket { + bucket := LeakyBucket{ + Size: size, + Fill: 0, + LeakInterval: leakInterval, + Now: time.Now, + Lastupdate: time.Now(), + } + + return &bucket +} + +func (b *LeakyBucket) updateFill() { + now := b.Now() + if b.Fill > 0 { + elapsed := now.Sub(b.Lastupdate) + + b.Fill -= float64(elapsed) / float64(b.LeakInterval) + if b.Fill < 0 { + b.Fill = 0 + } + } + b.Lastupdate = now +} + +func (b *LeakyBucket) Pour(amount uint16) bool { + b.updateFill() + + var newfill float64 = b.Fill + float64(amount) + + if newfill > float64(b.Size) { + return false + } + + b.Fill = newfill + + return true +} + +// The time at which this bucket will be completely drained +func (b *LeakyBucket) DrainedAt() time.Time { + return b.Lastupdate.Add(time.Duration(b.Fill * float64(b.LeakInterval))) +} + +// The duration until this bucket is completely drained +func (b *LeakyBucket) TimeToDrain() time.Duration { + return b.DrainedAt().Sub(b.Now()) +} + +func (b *LeakyBucket) TimeSinceLastUpdate() time.Duration { + return b.Now().Sub(b.Lastupdate) +} + +type LeakyBucketSer struct { + Size uint16 + Fill float64 + LeakInterval time.Duration // time.Duration for 1 unit of size to leak + Lastupdate time.Time +} + +func (b *LeakyBucket) Serialise() *LeakyBucketSer { + bucket := LeakyBucketSer{ + Size: b.Size, + Fill: b.Fill, + LeakInterval: b.LeakInterval, + Lastupdate: b.Lastupdate, + } + + return &bucket +} + +func (b *LeakyBucketSer) DeSerialise() *LeakyBucket { + bucket := LeakyBucket{ + Size: b.Size, + Fill: b.Fill, + LeakInterval: b.LeakInterval, + Lastupdate: b.Lastupdate, + Now: time.Now, + } + + return &bucket +} diff --git a/vendor/github.com/nxadm/tail/ratelimiter/memory.go b/vendor/github.com/nxadm/tail/ratelimiter/memory.go new file mode 100644 index 0000000..bf3c213 --- /dev/null +++ b/vendor/github.com/nxadm/tail/ratelimiter/memory.go @@ -0,0 +1,60 @@ +package ratelimiter + +import ( + "errors" + "time" +) + +const ( + GC_SIZE int = 100 + GC_PERIOD time.Duration = 60 * time.Second +) + +type Memory struct { + store map[string]LeakyBucket + lastGCCollected time.Time +} + +func NewMemory() *Memory { + m := new(Memory) + m.store = make(map[string]LeakyBucket) + m.lastGCCollected = time.Now() + return m +} + +func (m *Memory) GetBucketFor(key string) (*LeakyBucket, error) { + + bucket, ok := m.store[key] + if !ok { + return nil, errors.New("miss") + } + + return &bucket, nil +} + +func (m *Memory) SetBucketFor(key string, bucket LeakyBucket) error { + + if len(m.store) > GC_SIZE { + m.GarbageCollect() + } + + m.store[key] = bucket + + return nil +} + +func (m *Memory) GarbageCollect() { + now := time.Now() + + // rate limit GC to once per minute + if now.Unix() >= m.lastGCCollected.Add(GC_PERIOD).Unix() { + for key, bucket := range m.store { + // if the bucket is drained, then GC + if bucket.DrainedAt().Unix() < now.Unix() { + delete(m.store, key) + } + } + + m.lastGCCollected = now + } +} diff --git a/vendor/github.com/nxadm/tail/ratelimiter/storage.go b/vendor/github.com/nxadm/tail/ratelimiter/storage.go new file mode 100644 index 0000000..89b2fe8 --- /dev/null +++ b/vendor/github.com/nxadm/tail/ratelimiter/storage.go @@ -0,0 +1,6 @@ +package ratelimiter + +type Storage interface { + GetBucketFor(string) (*LeakyBucket, error) + SetBucketFor(string, LeakyBucket) error +} diff --git a/vendor/github.com/nxadm/tail/tail.go b/vendor/github.com/nxadm/tail/tail.go new file mode 100644 index 0000000..58d3c4b --- /dev/null +++ b/vendor/github.com/nxadm/tail/tail.go @@ -0,0 +1,440 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package tail + +import ( + "bufio" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "strings" + "sync" + "time" + + "github.com/nxadm/tail/ratelimiter" + "github.com/nxadm/tail/util" + "github.com/nxadm/tail/watch" + "gopkg.in/tomb.v1" +) + +var ( + ErrStop = errors.New("tail should now stop") +) + +type Line struct { + Text string + Num int + SeekInfo SeekInfo + Time time.Time + Err error // Error from tail +} + +// NewLine returns a Line with present time. +func NewLine(text string, lineNum int) *Line { + return &Line{text, lineNum, SeekInfo{}, time.Now(), nil} +} + +// SeekInfo represents arguments to `io.Seek` +type SeekInfo struct { + Offset int64 + Whence int // io.Seek* +} + +type logger interface { + Fatal(v ...interface{}) + Fatalf(format string, v ...interface{}) + Fatalln(v ...interface{}) + Panic(v ...interface{}) + Panicf(format string, v ...interface{}) + Panicln(v ...interface{}) + Print(v ...interface{}) + Printf(format string, v ...interface{}) + Println(v ...interface{}) +} + +// Config is used to specify how a file must be tailed. +type Config struct { + // File-specifc + Location *SeekInfo // Seek to this location before tailing + ReOpen bool // Reopen recreated files (tail -F) + MustExist bool // Fail early if the file does not exist + Poll bool // Poll for file changes instead of using inotify + Pipe bool // Is a named pipe (mkfifo) + RateLimiter *ratelimiter.LeakyBucket + + // Generic IO + Follow bool // Continue looking for new lines (tail -f) + MaxLineSize int // If non-zero, split longer lines into multiple lines + + // Logger, when nil, is set to tail.DefaultLogger + // To disable logging: set field to tail.DiscardingLogger + Logger logger +} + +type Tail struct { + Filename string + Lines chan *Line + Config + + file *os.File + reader *bufio.Reader + lineNum int + + watcher watch.FileWatcher + changes *watch.FileChanges + + tomb.Tomb // provides: Done, Kill, Dying + + lk sync.Mutex +} + +var ( + // DefaultLogger is used when Config.Logger == nil + DefaultLogger = log.New(os.Stderr, "", log.LstdFlags) + // DiscardingLogger can be used to disable logging output + DiscardingLogger = log.New(ioutil.Discard, "", 0) +) + +// TailFile begins tailing the file. Output stream is made available +// via the `Tail.Lines` channel. To handle errors during tailing, +// invoke the `Wait` or `Err` method after finishing reading from the +// `Lines` channel. +func TailFile(filename string, config Config) (*Tail, error) { + if config.ReOpen && !config.Follow { + util.Fatal("cannot set ReOpen without Follow.") + } + + t := &Tail{ + Filename: filename, + Lines: make(chan *Line), + Config: config, + } + + // when Logger was not specified in config, use default logger + if t.Logger == nil { + t.Logger = DefaultLogger + } + + if t.Poll { + t.watcher = watch.NewPollingFileWatcher(filename) + } else { + t.watcher = watch.NewInotifyFileWatcher(filename) + } + + if t.MustExist { + var err error + t.file, err = OpenFile(t.Filename) + if err != nil { + return nil, err + } + } + + go t.tailFileSync() + + return t, nil +} + +// Tell returns the file's current position, like stdio's ftell(). +// But this value is not very accurate. +// One line from the chan(tail.Lines) may have been read, +// so it may have lost one line. +func (tail *Tail) Tell() (offset int64, err error) { + if tail.file == nil { + return + } + offset, err = tail.file.Seek(0, io.SeekCurrent) + if err != nil { + return + } + + tail.lk.Lock() + defer tail.lk.Unlock() + if tail.reader == nil { + return + } + + offset -= int64(tail.reader.Buffered()) + return +} + +// Stop stops the tailing activity. +func (tail *Tail) Stop() error { + tail.Kill(nil) + return tail.Wait() +} + +// StopAtEOF stops tailing as soon as the end of the file is reached. +func (tail *Tail) StopAtEOF() error { + tail.Kill(errStopAtEOF) + return tail.Wait() +} + +var errStopAtEOF = errors.New("tail: stop at eof") + +func (tail *Tail) close() { + close(tail.Lines) + tail.closeFile() +} + +func (tail *Tail) closeFile() { + if tail.file != nil { + tail.file.Close() + tail.file = nil + } +} + +func (tail *Tail) reopen() error { + tail.closeFile() + tail.lineNum = 0 + for { + var err error + tail.file, err = OpenFile(tail.Filename) + if err != nil { + if os.IsNotExist(err) { + tail.Logger.Printf("Waiting for %s to appear...", tail.Filename) + if err := tail.watcher.BlockUntilExists(&tail.Tomb); err != nil { + if err == tomb.ErrDying { + return err + } + return fmt.Errorf("Failed to detect creation of %s: %s", tail.Filename, err) + } + continue + } + return fmt.Errorf("Unable to open file %s: %s", tail.Filename, err) + } + break + } + return nil +} + +func (tail *Tail) readLine() (string, error) { + tail.lk.Lock() + line, err := tail.reader.ReadString('\n') + tail.lk.Unlock() + if err != nil { + // Note ReadString "returns the data read before the error" in + // case of an error, including EOF, so we return it as is. The + // caller is expected to process it if err is EOF. + return line, err + } + + line = strings.TrimRight(line, "\n") + + return line, err +} + +func (tail *Tail) tailFileSync() { + defer tail.Done() + defer tail.close() + + if !tail.MustExist { + // deferred first open. + err := tail.reopen() + if err != nil { + if err != tomb.ErrDying { + tail.Kill(err) + } + return + } + } + + // Seek to requested location on first open of the file. + if tail.Location != nil { + _, err := tail.file.Seek(tail.Location.Offset, tail.Location.Whence) + if err != nil { + tail.Killf("Seek error on %s: %s", tail.Filename, err) + return + } + } + + tail.openReader() + + // Read line by line. + for { + // do not seek in named pipes + if !tail.Pipe { + // grab the position in case we need to back up in the event of a half-line + if _, err := tail.Tell(); err != nil { + tail.Kill(err) + return + } + } + + line, err := tail.readLine() + + // Process `line` even if err is EOF. + if err == nil { + cooloff := !tail.sendLine(line) + if cooloff { + // Wait a second before seeking till the end of + // file when rate limit is reached. + msg := ("Too much log activity; waiting a second before resuming tailing") + offset, _ := tail.Tell() + tail.Lines <- &Line{msg, tail.lineNum, SeekInfo{Offset: offset}, time.Now(), errors.New(msg)} + select { + case <-time.After(time.Second): + case <-tail.Dying(): + return + } + if err := tail.seekEnd(); err != nil { + tail.Kill(err) + return + } + } + } else if err == io.EOF { + if !tail.Follow { + if line != "" { + tail.sendLine(line) + } + return + } + + if tail.Follow && line != "" { + tail.sendLine(line) + if err := tail.seekEnd(); err != nil { + tail.Kill(err) + return + } + } + + // When EOF is reached, wait for more data to become + // available. Wait strategy is based on the `tail.watcher` + // implementation (inotify or polling). + err := tail.waitForChanges() + if err != nil { + if err != ErrStop { + tail.Kill(err) + } + return + } + } else { + // non-EOF error + tail.Killf("Error reading %s: %s", tail.Filename, err) + return + } + + select { + case <-tail.Dying(): + if tail.Err() == errStopAtEOF { + continue + } + return + default: + } + } +} + +// waitForChanges waits until the file has been appended, deleted, +// moved or truncated. When moved or deleted - the file will be +// reopened if ReOpen is true. Truncated files are always reopened. +func (tail *Tail) waitForChanges() error { + if tail.changes == nil { + pos, err := tail.file.Seek(0, io.SeekCurrent) + if err != nil { + return err + } + tail.changes, err = tail.watcher.ChangeEvents(&tail.Tomb, pos) + if err != nil { + return err + } + } + + select { + case <-tail.changes.Modified: + return nil + case <-tail.changes.Deleted: + tail.changes = nil + if tail.ReOpen { + // XXX: we must not log from a library. + tail.Logger.Printf("Re-opening moved/deleted file %s ...", tail.Filename) + if err := tail.reopen(); err != nil { + return err + } + tail.Logger.Printf("Successfully reopened %s", tail.Filename) + tail.openReader() + return nil + } + tail.Logger.Printf("Stopping tail as file no longer exists: %s", tail.Filename) + return ErrStop + case <-tail.changes.Truncated: + // Always reopen truncated files (Follow is true) + tail.Logger.Printf("Re-opening truncated file %s ...", tail.Filename) + if err := tail.reopen(); err != nil { + return err + } + tail.Logger.Printf("Successfully reopened truncated %s", tail.Filename) + tail.openReader() + return nil + case <-tail.Dying(): + return ErrStop + } +} + +func (tail *Tail) openReader() { + tail.lk.Lock() + if tail.MaxLineSize > 0 { + // add 2 to account for newline characters + tail.reader = bufio.NewReaderSize(tail.file, tail.MaxLineSize+2) + } else { + tail.reader = bufio.NewReader(tail.file) + } + tail.lk.Unlock() +} + +func (tail *Tail) seekEnd() error { + return tail.seekTo(SeekInfo{Offset: 0, Whence: io.SeekEnd}) +} + +func (tail *Tail) seekTo(pos SeekInfo) error { + _, err := tail.file.Seek(pos.Offset, pos.Whence) + if err != nil { + return fmt.Errorf("Seek error on %s: %s", tail.Filename, err) + } + // Reset the read buffer whenever the file is re-seek'ed + tail.reader.Reset(tail.file) + return nil +} + +// sendLine sends the line(s) to Lines channel, splitting longer lines +// if necessary. Return false if rate limit is reached. +func (tail *Tail) sendLine(line string) bool { + now := time.Now() + lines := []string{line} + + // Split longer lines + if tail.MaxLineSize > 0 && len(line) > tail.MaxLineSize { + lines = util.PartitionString(line, tail.MaxLineSize) + } + + for _, line := range lines { + tail.lineNum++ + offset, _ := tail.Tell() + select { + case tail.Lines <- &Line{line, tail.lineNum, SeekInfo{Offset: offset}, now, nil}: + case <-tail.Dying(): + return true + } + } + + if tail.Config.RateLimiter != nil { + ok := tail.Config.RateLimiter.Pour(uint16(len(lines))) + if !ok { + tail.Logger.Printf("Leaky bucket full (%v); entering 1s cooloff period.", + tail.Filename) + return false + } + } + + return true +} + +// Cleanup removes inotify watches added by the tail package. This function is +// meant to be invoked from a process's exit handler. Linux kernel may not +// automatically remove inotify watches after the process exits. +func (tail *Tail) Cleanup() { + watch.Cleanup(tail.Filename) +} diff --git a/vendor/github.com/nxadm/tail/tail_posix.go b/vendor/github.com/nxadm/tail/tail_posix.go new file mode 100644 index 0000000..1b94520 --- /dev/null +++ b/vendor/github.com/nxadm/tail/tail_posix.go @@ -0,0 +1,11 @@ +// +build !windows + +package tail + +import ( + "os" +) + +func OpenFile(name string) (file *os.File, err error) { + return os.Open(name) +} diff --git a/vendor/github.com/nxadm/tail/tail_windows.go b/vendor/github.com/nxadm/tail/tail_windows.go new file mode 100644 index 0000000..4aaceea --- /dev/null +++ b/vendor/github.com/nxadm/tail/tail_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package tail + +import ( + "github.com/nxadm/tail/winfile" + "os" +) + +func OpenFile(name string) (file *os.File, err error) { + return winfile.OpenFile(name, os.O_RDONLY, 0) +} diff --git a/vendor/github.com/nxadm/tail/util/util.go b/vendor/github.com/nxadm/tail/util/util.go new file mode 100644 index 0000000..2ba0ed7 --- /dev/null +++ b/vendor/github.com/nxadm/tail/util/util.go @@ -0,0 +1,48 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package util + +import ( + "fmt" + "log" + "os" + "runtime/debug" +) + +type Logger struct { + *log.Logger +} + +var LOGGER = &Logger{log.New(os.Stderr, "", log.LstdFlags)} + +// fatal is like panic except it displays only the current goroutine's stack. +func Fatal(format string, v ...interface{}) { + // https://github.com/nxadm/log/blob/master/log.go#L45 + LOGGER.Output(2, fmt.Sprintf("FATAL -- "+format, v...)+"\n"+string(debug.Stack())) + os.Exit(1) +} + +// partitionString partitions the string into chunks of given size, +// with the last chunk of variable size. +func PartitionString(s string, chunkSize int) []string { + if chunkSize <= 0 { + panic("invalid chunkSize") + } + length := len(s) + chunks := 1 + length/chunkSize + start := 0 + end := chunkSize + parts := make([]string, 0, chunks) + for { + if end > length { + end = length + } + parts = append(parts, s[start:end]) + if end == length { + break + } + start, end = end, end+chunkSize + } + return parts +} diff --git a/vendor/github.com/nxadm/tail/watch/filechanges.go b/vendor/github.com/nxadm/tail/watch/filechanges.go new file mode 100644 index 0000000..f80aead --- /dev/null +++ b/vendor/github.com/nxadm/tail/watch/filechanges.go @@ -0,0 +1,36 @@ +package watch + +type FileChanges struct { + Modified chan bool // Channel to get notified of modifications + Truncated chan bool // Channel to get notified of truncations + Deleted chan bool // Channel to get notified of deletions/renames +} + +func NewFileChanges() *FileChanges { + return &FileChanges{ + make(chan bool, 1), make(chan bool, 1), make(chan bool, 1)} +} + +func (fc *FileChanges) NotifyModified() { + sendOnlyIfEmpty(fc.Modified) +} + +func (fc *FileChanges) NotifyTruncated() { + sendOnlyIfEmpty(fc.Truncated) +} + +func (fc *FileChanges) NotifyDeleted() { + sendOnlyIfEmpty(fc.Deleted) +} + +// sendOnlyIfEmpty sends on a bool channel only if the channel has no +// backlog to be read by other goroutines. This concurrency pattern +// can be used to notify other goroutines if and only if they are +// looking for it (i.e., subsequent notifications can be compressed +// into one). +func sendOnlyIfEmpty(ch chan bool) { + select { + case ch <- true: + default: + } +} diff --git a/vendor/github.com/nxadm/tail/watch/inotify.go b/vendor/github.com/nxadm/tail/watch/inotify.go new file mode 100644 index 0000000..4399218 --- /dev/null +++ b/vendor/github.com/nxadm/tail/watch/inotify.go @@ -0,0 +1,135 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package watch + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/nxadm/tail/util" + + "github.com/fsnotify/fsnotify" + "gopkg.in/tomb.v1" +) + +// InotifyFileWatcher uses inotify to monitor file changes. +type InotifyFileWatcher struct { + Filename string + Size int64 +} + +func NewInotifyFileWatcher(filename string) *InotifyFileWatcher { + fw := &InotifyFileWatcher{filepath.Clean(filename), 0} + return fw +} + +func (fw *InotifyFileWatcher) BlockUntilExists(t *tomb.Tomb) error { + err := WatchCreate(fw.Filename) + if err != nil { + return err + } + defer RemoveWatchCreate(fw.Filename) + + // Do a real check now as the file might have been created before + // calling `WatchFlags` above. + if _, err = os.Stat(fw.Filename); !os.IsNotExist(err) { + // file exists, or stat returned an error. + return err + } + + events := Events(fw.Filename) + + for { + select { + case evt, ok := <-events: + if !ok { + return fmt.Errorf("inotify watcher has been closed") + } + evtName, err := filepath.Abs(evt.Name) + if err != nil { + return err + } + fwFilename, err := filepath.Abs(fw.Filename) + if err != nil { + return err + } + if evtName == fwFilename { + return nil + } + case <-t.Dying(): + return tomb.ErrDying + } + } + panic("unreachable") +} + +func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) { + err := Watch(fw.Filename) + if err != nil { + return nil, err + } + + changes := NewFileChanges() + fw.Size = pos + + go func() { + + events := Events(fw.Filename) + + for { + prevSize := fw.Size + + var evt fsnotify.Event + var ok bool + + select { + case evt, ok = <-events: + if !ok { + RemoveWatch(fw.Filename) + return + } + case <-t.Dying(): + RemoveWatch(fw.Filename) + return + } + + switch { + case evt.Op&fsnotify.Remove == fsnotify.Remove: + fallthrough + + case evt.Op&fsnotify.Rename == fsnotify.Rename: + RemoveWatch(fw.Filename) + changes.NotifyDeleted() + return + + //With an open fd, unlink(fd) - inotify returns IN_ATTRIB (==fsnotify.Chmod) + case evt.Op&fsnotify.Chmod == fsnotify.Chmod: + fallthrough + + case evt.Op&fsnotify.Write == fsnotify.Write: + fi, err := os.Stat(fw.Filename) + if err != nil { + if os.IsNotExist(err) { + RemoveWatch(fw.Filename) + changes.NotifyDeleted() + return + } + // XXX: report this error back to the user + util.Fatal("Failed to stat file %v: %v", fw.Filename, err) + } + fw.Size = fi.Size() + + if prevSize > 0 && prevSize > fw.Size { + changes.NotifyTruncated() + } else { + changes.NotifyModified() + } + prevSize = fw.Size + } + } + }() + + return changes, nil +} diff --git a/vendor/github.com/nxadm/tail/watch/inotify_tracker.go b/vendor/github.com/nxadm/tail/watch/inotify_tracker.go new file mode 100644 index 0000000..a94bcd4 --- /dev/null +++ b/vendor/github.com/nxadm/tail/watch/inotify_tracker.go @@ -0,0 +1,248 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package watch + +import ( + "log" + "os" + "path/filepath" + "sync" + "syscall" + + "github.com/nxadm/tail/util" + + "github.com/fsnotify/fsnotify" +) + +type InotifyTracker struct { + mux sync.Mutex + watcher *fsnotify.Watcher + chans map[string]chan fsnotify.Event + done map[string]chan bool + watchNums map[string]int + watch chan *watchInfo + remove chan *watchInfo + error chan error +} + +type watchInfo struct { + op fsnotify.Op + fname string +} + +func (this *watchInfo) isCreate() bool { + return this.op == fsnotify.Create +} + +var ( + // globally shared InotifyTracker; ensures only one fsnotify.Watcher is used + shared *InotifyTracker + + // these are used to ensure the shared InotifyTracker is run exactly once + once = sync.Once{} + goRun = func() { + shared = &InotifyTracker{ + mux: sync.Mutex{}, + chans: make(map[string]chan fsnotify.Event), + done: make(map[string]chan bool), + watchNums: make(map[string]int), + watch: make(chan *watchInfo), + remove: make(chan *watchInfo), + error: make(chan error), + } + go shared.run() + } + + logger = log.New(os.Stderr, "", log.LstdFlags) +) + +// Watch signals the run goroutine to begin watching the input filename +func Watch(fname string) error { + return watch(&watchInfo{ + fname: fname, + }) +} + +// Watch create signals the run goroutine to begin watching the input filename +// if call the WatchCreate function, don't call the Cleanup, call the RemoveWatchCreate +func WatchCreate(fname string) error { + return watch(&watchInfo{ + op: fsnotify.Create, + fname: fname, + }) +} + +func watch(winfo *watchInfo) error { + // start running the shared InotifyTracker if not already running + once.Do(goRun) + + winfo.fname = filepath.Clean(winfo.fname) + shared.watch <- winfo + return <-shared.error +} + +// RemoveWatch signals the run goroutine to remove the watch for the input filename +func RemoveWatch(fname string) error { + return remove(&watchInfo{ + fname: fname, + }) +} + +// RemoveWatch create signals the run goroutine to remove the watch for the input filename +func RemoveWatchCreate(fname string) error { + return remove(&watchInfo{ + op: fsnotify.Create, + fname: fname, + }) +} + +func remove(winfo *watchInfo) error { + // start running the shared InotifyTracker if not already running + once.Do(goRun) + + winfo.fname = filepath.Clean(winfo.fname) + shared.mux.Lock() + done := shared.done[winfo.fname] + if done != nil { + delete(shared.done, winfo.fname) + close(done) + } + shared.mux.Unlock() + + shared.remove <- winfo + return <-shared.error +} + +// Events returns a channel to which FileEvents corresponding to the input filename +// will be sent. This channel will be closed when removeWatch is called on this +// filename. +func Events(fname string) <-chan fsnotify.Event { + shared.mux.Lock() + defer shared.mux.Unlock() + + return shared.chans[fname] +} + +// Cleanup removes the watch for the input filename if necessary. +func Cleanup(fname string) error { + return RemoveWatch(fname) +} + +// watchFlags calls fsnotify.WatchFlags for the input filename and flags, creating +// a new Watcher if the previous Watcher was closed. +func (shared *InotifyTracker) addWatch(winfo *watchInfo) error { + shared.mux.Lock() + defer shared.mux.Unlock() + + if shared.chans[winfo.fname] == nil { + shared.chans[winfo.fname] = make(chan fsnotify.Event) + } + if shared.done[winfo.fname] == nil { + shared.done[winfo.fname] = make(chan bool) + } + + fname := winfo.fname + if winfo.isCreate() { + // Watch for new files to be created in the parent directory. + fname = filepath.Dir(fname) + } + + var err error + // already in inotify watch + if shared.watchNums[fname] == 0 { + err = shared.watcher.Add(fname) + } + if err == nil { + shared.watchNums[fname]++ + } + return err +} + +// removeWatch calls fsnotify.RemoveWatch for the input filename and closes the +// corresponding events channel. +func (shared *InotifyTracker) removeWatch(winfo *watchInfo) error { + shared.mux.Lock() + + ch := shared.chans[winfo.fname] + if ch != nil { + delete(shared.chans, winfo.fname) + close(ch) + } + + fname := winfo.fname + if winfo.isCreate() { + // Watch for new files to be created in the parent directory. + fname = filepath.Dir(fname) + } + shared.watchNums[fname]-- + watchNum := shared.watchNums[fname] + if watchNum == 0 { + delete(shared.watchNums, fname) + } + shared.mux.Unlock() + + var err error + // If we were the last ones to watch this file, unsubscribe from inotify. + // This needs to happen after releasing the lock because fsnotify waits + // synchronously for the kernel to acknowledge the removal of the watch + // for this file, which causes us to deadlock if we still held the lock. + if watchNum == 0 { + err = shared.watcher.Remove(fname) + } + + return err +} + +// sendEvent sends the input event to the appropriate Tail. +func (shared *InotifyTracker) sendEvent(event fsnotify.Event) { + name := filepath.Clean(event.Name) + + shared.mux.Lock() + ch := shared.chans[name] + done := shared.done[name] + shared.mux.Unlock() + + if ch != nil && done != nil { + select { + case ch <- event: + case <-done: + } + } +} + +// run starts the goroutine in which the shared struct reads events from its +// Watcher's Event channel and sends the events to the appropriate Tail. +func (shared *InotifyTracker) run() { + watcher, err := fsnotify.NewWatcher() + if err != nil { + util.Fatal("failed to create Watcher") + } + shared.watcher = watcher + + for { + select { + case winfo := <-shared.watch: + shared.error <- shared.addWatch(winfo) + + case winfo := <-shared.remove: + shared.error <- shared.removeWatch(winfo) + + case event, open := <-shared.watcher.Events: + if !open { + return + } + shared.sendEvent(event) + + case err, open := <-shared.watcher.Errors: + if !open { + return + } else if err != nil { + sysErr, ok := err.(*os.SyscallError) + if !ok || sysErr.Err != syscall.EINTR { + logger.Printf("Error in Watcher Error channel: %s", err) + } + } + } + } +} diff --git a/vendor/github.com/nxadm/tail/watch/polling.go b/vendor/github.com/nxadm/tail/watch/polling.go new file mode 100644 index 0000000..fb17069 --- /dev/null +++ b/vendor/github.com/nxadm/tail/watch/polling.go @@ -0,0 +1,118 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package watch + +import ( + "os" + "runtime" + "time" + + "github.com/nxadm/tail/util" + "gopkg.in/tomb.v1" +) + +// PollingFileWatcher polls the file for changes. +type PollingFileWatcher struct { + Filename string + Size int64 +} + +func NewPollingFileWatcher(filename string) *PollingFileWatcher { + fw := &PollingFileWatcher{filename, 0} + return fw +} + +var POLL_DURATION time.Duration + +func (fw *PollingFileWatcher) BlockUntilExists(t *tomb.Tomb) error { + for { + if _, err := os.Stat(fw.Filename); err == nil { + return nil + } else if !os.IsNotExist(err) { + return err + } + select { + case <-time.After(POLL_DURATION): + continue + case <-t.Dying(): + return tomb.ErrDying + } + } + panic("unreachable") +} + +func (fw *PollingFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) { + origFi, err := os.Stat(fw.Filename) + if err != nil { + return nil, err + } + + changes := NewFileChanges() + var prevModTime time.Time + + // XXX: use tomb.Tomb to cleanly manage these goroutines. replace + // the fatal (below) with tomb's Kill. + + fw.Size = pos + + go func() { + prevSize := fw.Size + for { + select { + case <-t.Dying(): + return + default: + } + + time.Sleep(POLL_DURATION) + fi, err := os.Stat(fw.Filename) + if err != nil { + // Windows cannot delete a file if a handle is still open (tail keeps one open) + // so it gives access denied to anything trying to read it until all handles are released. + if os.IsNotExist(err) || (runtime.GOOS == "windows" && os.IsPermission(err)) { + // File does not exist (has been deleted). + changes.NotifyDeleted() + return + } + + // XXX: report this error back to the user + util.Fatal("Failed to stat file %v: %v", fw.Filename, err) + } + + // File got moved/renamed? + if !os.SameFile(origFi, fi) { + changes.NotifyDeleted() + return + } + + // File got truncated? + fw.Size = fi.Size() + if prevSize > 0 && prevSize > fw.Size { + changes.NotifyTruncated() + prevSize = fw.Size + continue + } + // File got bigger? + if prevSize > 0 && prevSize < fw.Size { + changes.NotifyModified() + prevSize = fw.Size + continue + } + prevSize = fw.Size + + // File was appended to (changed)? + modTime := fi.ModTime() + if modTime != prevModTime { + prevModTime = modTime + changes.NotifyModified() + } + } + }() + + return changes, nil +} + +func init() { + POLL_DURATION = 250 * time.Millisecond +} diff --git a/vendor/github.com/nxadm/tail/watch/watch.go b/vendor/github.com/nxadm/tail/watch/watch.go new file mode 100644 index 0000000..2e1783e --- /dev/null +++ b/vendor/github.com/nxadm/tail/watch/watch.go @@ -0,0 +1,20 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package watch + +import "gopkg.in/tomb.v1" + +// FileWatcher monitors file-level events. +type FileWatcher interface { + // BlockUntilExists blocks until the file comes into existence. + BlockUntilExists(*tomb.Tomb) error + + // ChangeEvents reports on changes to a file, be it modification, + // deletion, renames or truncations. Returned FileChanges group of + // channels will be closed, thus become unusable, after a deletion + // or truncation event. + // In order to properly report truncations, ChangeEvents requires + // the caller to pass their current offset in the file. + ChangeEvents(*tomb.Tomb, int64) (*FileChanges, error) +} diff --git a/vendor/github.com/nxadm/tail/winfile/winfile.go b/vendor/github.com/nxadm/tail/winfile/winfile.go new file mode 100644 index 0000000..aa7e7bc --- /dev/null +++ b/vendor/github.com/nxadm/tail/winfile/winfile.go @@ -0,0 +1,92 @@ +// +build windows + +package winfile + +import ( + "os" + "syscall" + "unsafe" +) + +// issue also described here +//https://codereview.appspot.com/8203043/ + +// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L218 +func Open(path string, mode int, perm uint32) (fd syscall.Handle, err error) { + if len(path) == 0 { + return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND + } + pathp, err := syscall.UTF16PtrFromString(path) + if err != nil { + return syscall.InvalidHandle, err + } + var access uint32 + switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) { + case syscall.O_RDONLY: + access = syscall.GENERIC_READ + case syscall.O_WRONLY: + access = syscall.GENERIC_WRITE + case syscall.O_RDWR: + access = syscall.GENERIC_READ | syscall.GENERIC_WRITE + } + if mode&syscall.O_CREAT != 0 { + access |= syscall.GENERIC_WRITE + } + if mode&syscall.O_APPEND != 0 { + access &^= syscall.GENERIC_WRITE + access |= syscall.FILE_APPEND_DATA + } + sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE | syscall.FILE_SHARE_DELETE) + var sa *syscall.SecurityAttributes + if mode&syscall.O_CLOEXEC == 0 { + sa = makeInheritSa() + } + var createmode uint32 + switch { + case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL): + createmode = syscall.CREATE_NEW + case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC): + createmode = syscall.CREATE_ALWAYS + case mode&syscall.O_CREAT == syscall.O_CREAT: + createmode = syscall.OPEN_ALWAYS + case mode&syscall.O_TRUNC == syscall.O_TRUNC: + createmode = syscall.TRUNCATE_EXISTING + default: + createmode = syscall.OPEN_EXISTING + } + h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, syscall.FILE_ATTRIBUTE_NORMAL, 0) + return h, e +} + +// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L211 +func makeInheritSa() *syscall.SecurityAttributes { + var sa syscall.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + return &sa +} + +// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_windows.go#L133 +func OpenFile(name string, flag int, perm os.FileMode) (file *os.File, err error) { + r, e := Open(name, flag|syscall.O_CLOEXEC, syscallMode(perm)) + if e != nil { + return nil, e + } + return os.NewFile(uintptr(r), name), nil +} + +// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_posix.go#L61 +func syscallMode(i os.FileMode) (o uint32) { + o |= uint32(i.Perm()) + if i&os.ModeSetuid != 0 { + o |= syscall.S_ISUID + } + if i&os.ModeSetgid != 0 { + o |= syscall.S_ISGID + } + if i&os.ModeSticky != 0 { + o |= syscall.S_ISVTX + } + // No mapping for Go's ModeTemporary (plan9 only). + return +} diff --git a/vendor/github.com/subosito/gotenv/.env b/vendor/github.com/subosito/gotenv/.env new file mode 100644 index 0000000..6405eca --- /dev/null +++ b/vendor/github.com/subosito/gotenv/.env @@ -0,0 +1 @@ +HELLO=world diff --git a/vendor/github.com/subosito/gotenv/.env.invalid b/vendor/github.com/subosito/gotenv/.env.invalid new file mode 100644 index 0000000..016d5e0 --- /dev/null +++ b/vendor/github.com/subosito/gotenv/.env.invalid @@ -0,0 +1 @@ +lol$wut diff --git a/vendor/github.com/subosito/gotenv/.gitignore b/vendor/github.com/subosito/gotenv/.gitignore new file mode 100644 index 0000000..2b8d456 --- /dev/null +++ b/vendor/github.com/subosito/gotenv/.gitignore @@ -0,0 +1,3 @@ +*.test +*.out +annotate.json diff --git a/vendor/github.com/subosito/gotenv/.travis.yml b/vendor/github.com/subosito/gotenv/.travis.yml new file mode 100644 index 0000000..3370d5f --- /dev/null +++ b/vendor/github.com/subosito/gotenv/.travis.yml @@ -0,0 +1,10 @@ +language: go +go: + - 1.x +os: + - linux + - osx +script: + - go test -test.v -coverprofile=coverage.out -covermode=count +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/subosito/gotenv/CHANGELOG.md b/vendor/github.com/subosito/gotenv/CHANGELOG.md new file mode 100644 index 0000000..67f6873 --- /dev/null +++ b/vendor/github.com/subosito/gotenv/CHANGELOG.md @@ -0,0 +1,47 @@ +# Changelog + +## [1.2.0] - 2019-08-03 + +### Added + +- Add `Must` helper to raise an error as panic. It can be used with `Load` and `OverLoad`. +- Add more tests to be 100% coverage. +- Add CHANGELOG +- Add more OS for the test: OSX and Windows + +### Changed + +- Reduce complexity and improve source code for having `A+` score in [goreportcard](https://goreportcard.com/report/github.com/subosito/gotenv). +- Updated README with mentions to all available functions + +### Removed + +- Remove `ErrFormat` +- Remove `MustLoad` and `MustOverload`, replaced with `Must` helper. + +## [1.1.1] - 2018-06-05 + +### Changed + +- Replace `os.Getenv` with `os.LookupEnv` to ensure that the environment variable is not set, by [radding](https://github.com/radding) + +## [1.1.0] - 2017-03-20 + +### Added + +- Supports carriage return in env +- Handle files with UTF-8 BOM + +### Changed + +- Whitespace handling + +### Fixed + +- Incorrect variable expansion +- Handling escaped '$' characters + +## [1.0.0] - 2014-10-05 + +First stable release. + diff --git a/vendor/github.com/subosito/gotenv/LICENSE b/vendor/github.com/subosito/gotenv/LICENSE new file mode 100644 index 0000000..f64ccae --- /dev/null +++ b/vendor/github.com/subosito/gotenv/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Alif Rachmawadi + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/subosito/gotenv/README.md b/vendor/github.com/subosito/gotenv/README.md new file mode 100644 index 0000000..d610cdf --- /dev/null +++ b/vendor/github.com/subosito/gotenv/README.md @@ -0,0 +1,131 @@ +# gotenv + +[![Build Status](https://travis-ci.org/subosito/gotenv.svg?branch=master)](https://travis-ci.org/subosito/gotenv) +[![Build status](https://ci.appveyor.com/api/projects/status/wb2e075xkfl0m0v2/branch/master?svg=true)](https://ci.appveyor.com/project/subosito/gotenv/branch/master) +[![Coverage Status](https://badgen.net/codecov/c/github/subosito/gotenv)](https://codecov.io/gh/subosito/gotenv) +[![Go Report Card](https://goreportcard.com/badge/github.com/subosito/gotenv)](https://goreportcard.com/report/github.com/subosito/gotenv) +[![GoDoc](https://godoc.org/github.com/subosito/gotenv?status.svg)](https://godoc.org/github.com/subosito/gotenv) + +Load environment variables dynamically in Go. + +## Usage + +Put the gotenv package on your `import` statement: + +```go +import "github.com/subosito/gotenv" +``` + +To modify your app environment variables, `gotenv` expose 2 main functions: + +- `gotenv.Load` +- `gotenv.Apply` + +By default, `gotenv.Load` will look for a file called `.env` in the current working directory. + +Behind the scene, it will then load `.env` file and export the valid variables to the environment variables. Make sure you call the method as soon as possible to ensure it loads all variables, say, put it on `init()` function. + +Once loaded you can use `os.Getenv()` to get the value of the variable. + +Let's say you have `.env` file: + +``` +APP_ID=1234567 +APP_SECRET=abcdef +``` + +Here's the example of your app: + +```go +package main + +import ( + "github.com/subosito/gotenv" + "log" + "os" +) + +func init() { + gotenv.Load() +} + +func main() { + log.Println(os.Getenv("APP_ID")) // "1234567" + log.Println(os.Getenv("APP_SECRET")) // "abcdef" +} +``` + +You can also load other than `.env` file if you wish. Just supply filenames when calling `Load()`. It will load them in order and the first value set for a variable will win.: + +```go +gotenv.Load(".env.production", "credentials") +``` + +While `gotenv.Load` loads entries from `.env` file, `gotenv.Apply` allows you to use any `io.Reader`: + +```go +gotenv.Apply(strings.NewReader("APP_ID=1234567")) + +log.Println(os.Getenv("APP_ID")) +// Output: "1234567" +``` + +Both `gotenv.Load` and `gotenv.Apply` **DO NOT** overrides existing environment variables. If you want to override existing ones, you can see section below. + +### Environment Overrides + +Besides above functions, `gotenv` also provides another functions that overrides existing: + +- `gotenv.OverLoad` +- `gotenv.OverApply` + + +Here's the example of this overrides behavior: + +```go +os.Setenv("HELLO", "world") + +// NOTE: using Apply existing value will be reserved +gotenv.Apply(strings.NewReader("HELLO=universe")) +fmt.Println(os.Getenv("HELLO")) +// Output: "world" + +// NOTE: using OverApply existing value will be overridden +gotenv.OverApply(strings.NewReader("HELLO=universe")) +fmt.Println(os.Getenv("HELLO")) +// Output: "universe" +``` + +### Throw a Panic + +Both `gotenv.Load` and `gotenv.OverLoad` returns an error on something wrong occurred, like your env file is not exist, and so on. To make it easier to use, `gotenv` also provides `gotenv.Must` helper, to let it panic when an error returned. + +```go +err := gotenv.Load(".env-is-not-exist") +fmt.Println("error", err) +// error: open .env-is-not-exist: no such file or directory + +gotenv.Must(gotenv.Load, ".env-is-not-exist") +// it will throw a panic +// panic: open .env-is-not-exist: no such file or directory +``` + +### Another Scenario + +Just in case you want to parse environment variables from any `io.Reader`, gotenv keeps its `Parse` and `StrictParse` function as public API so you can use that. + +```go +// import "strings" + +pairs := gotenv.Parse(strings.NewReader("FOO=test\nBAR=$FOO")) +// gotenv.Env{"FOO": "test", "BAR": "test"} + +err, pairs = gotenv.StrictParse(strings.NewReader(`FOO="bar"`)) +// gotenv.Env{"FOO": "bar"} +``` + +`Parse` ignores invalid lines and returns `Env` of valid environment variables, while `StrictParse` returns an error for invalid lines. + +## Notes + +The gotenv package is a Go port of [`dotenv`](https://github.com/bkeepers/dotenv) project with some additions made for Go. For general features, it aims to be compatible as close as possible. diff --git a/vendor/github.com/subosito/gotenv/appveyor.yml b/vendor/github.com/subosito/gotenv/appveyor.yml new file mode 100644 index 0000000..33b4c40 --- /dev/null +++ b/vendor/github.com/subosito/gotenv/appveyor.yml @@ -0,0 +1,9 @@ +build: off +clone_folder: c:\gopath\src\github.com\subosito\gotenv +environment: + GOPATH: c:\gopath +stack: go 1.10 +before_test: + - go get -t +test_script: + - go test -v -cover -race diff --git a/vendor/github.com/subosito/gotenv/gotenv.go b/vendor/github.com/subosito/gotenv/gotenv.go new file mode 100644 index 0000000..745a344 --- /dev/null +++ b/vendor/github.com/subosito/gotenv/gotenv.go @@ -0,0 +1,265 @@ +// Package gotenv provides functionality to dynamically load the environment variables +package gotenv + +import ( + "bufio" + "fmt" + "io" + "os" + "regexp" + "strings" +) + +const ( + // Pattern for detecting valid line format + linePattern = `\A\s*(?:export\s+)?([\w\.]+)(?:\s*=\s*|:\s+?)('(?:\'|[^'])*'|"(?:\"|[^"])*"|[^#\n]+)?\s*(?:\s*\#.*)?\z` + + // Pattern for detecting valid variable within a value + variablePattern = `(\\)?(\$)(\{?([A-Z0-9_]+)?\}?)` +) + +// Env holds key/value pair of valid environment variable +type Env map[string]string + +/* +Load is a function to load a file or multiple files and then export the valid variables into environment variables if they do not exist. +When it's called with no argument, it will load `.env` file on the current path and set the environment variables. +Otherwise, it will loop over the filenames parameter and set the proper environment variables. +*/ +func Load(filenames ...string) error { + return loadenv(false, filenames...) +} + +/* +OverLoad is a function to load a file or multiple files and then export and override the valid variables into environment variables. +*/ +func OverLoad(filenames ...string) error { + return loadenv(true, filenames...) +} + +/* +Must is wrapper function that will panic when supplied function returns an error. +*/ +func Must(fn func(filenames ...string) error, filenames ...string) { + if err := fn(filenames...); err != nil { + panic(err.Error()) + } +} + +/* +Apply is a function to load an io Reader then export the valid variables into environment variables if they do not exist. +*/ +func Apply(r io.Reader) error { + return parset(r, false) +} + +/* +OverApply is a function to load an io Reader then export and override the valid variables into environment variables. +*/ +func OverApply(r io.Reader) error { + return parset(r, true) +} + +func loadenv(override bool, filenames ...string) error { + if len(filenames) == 0 { + filenames = []string{".env"} + } + + for _, filename := range filenames { + f, err := os.Open(filename) + if err != nil { + return err + } + + err = parset(f, override) + if err != nil { + return err + } + + f.Close() + } + + return nil +} + +// parse and set :) +func parset(r io.Reader, override bool) error { + env, err := StrictParse(r) + if err != nil { + return err + } + + for key, val := range env { + setenv(key, val, override) + } + + return nil +} + +func setenv(key, val string, override bool) { + if override { + os.Setenv(key, val) + } else { + if _, present := os.LookupEnv(key); !present { + os.Setenv(key, val) + } + } +} + +// Parse is a function to parse line by line any io.Reader supplied and returns the valid Env key/value pair of valid variables. +// It expands the value of a variable from the environment variable but does not set the value to the environment itself. +// This function is skipping any invalid lines and only processing the valid one. +func Parse(r io.Reader) Env { + env, _ := StrictParse(r) + return env +} + +// StrictParse is a function to parse line by line any io.Reader supplied and returns the valid Env key/value pair of valid variables. +// It expands the value of a variable from the environment variable but does not set the value to the environment itself. +// This function is returning an error if there are any invalid lines. +func StrictParse(r io.Reader) (Env, error) { + env := make(Env) + scanner := bufio.NewScanner(r) + + i := 1 + bom := string([]byte{239, 187, 191}) + + for scanner.Scan() { + line := scanner.Text() + + if i == 1 { + line = strings.TrimPrefix(line, bom) + } + + i++ + + err := parseLine(line, env) + if err != nil { + return env, err + } + } + + return env, nil +} + +func parseLine(s string, env Env) error { + rl := regexp.MustCompile(linePattern) + rm := rl.FindStringSubmatch(s) + + if len(rm) == 0 { + return checkFormat(s, env) + } + + key := rm[1] + val := rm[2] + + // determine if string has quote prefix + hdq := strings.HasPrefix(val, `"`) + + // determine if string has single quote prefix + hsq := strings.HasPrefix(val, `'`) + + // trim whitespace + val = strings.Trim(val, " ") + + // remove quotes '' or "" + rq := regexp.MustCompile(`\A(['"])(.*)(['"])\z`) + val = rq.ReplaceAllString(val, "$2") + + if hdq { + val = strings.Replace(val, `\n`, "\n", -1) + val = strings.Replace(val, `\r`, "\r", -1) + + // Unescape all characters except $ so variables can be escaped properly + re := regexp.MustCompile(`\\([^$])`) + val = re.ReplaceAllString(val, "$1") + } + + rv := regexp.MustCompile(variablePattern) + fv := func(s string) string { + return varReplacement(s, hsq, env) + } + + val = rv.ReplaceAllStringFunc(val, fv) + val = parseVal(val, env) + + env[key] = val + return nil +} + +func parseExport(st string, env Env) error { + if strings.HasPrefix(st, "export") { + vs := strings.SplitN(st, " ", 2) + + if len(vs) > 1 { + if _, ok := env[vs[1]]; !ok { + return fmt.Errorf("line `%s` has an unset variable", st) + } + } + } + + return nil +} + +func varReplacement(s string, hsq bool, env Env) string { + if strings.HasPrefix(s, "\\") { + return strings.TrimPrefix(s, "\\") + } + + if hsq { + return s + } + + sn := `(\$)(\{?([A-Z0-9_]+)\}?)` + rn := regexp.MustCompile(sn) + mn := rn.FindStringSubmatch(s) + + if len(mn) == 0 { + return s + } + + v := mn[3] + + replace, ok := env[v] + if !ok { + replace = os.Getenv(v) + } + + return replace +} + +func checkFormat(s string, env Env) error { + st := strings.TrimSpace(s) + + if (st == "") || strings.HasPrefix(st, "#") { + return nil + } + + if err := parseExport(st, env); err != nil { + return err + } + + return fmt.Errorf("line `%s` doesn't match format", s) +} + +func parseVal(val string, env Env) string { + if strings.Contains(val, "=") { + if !(val == "\n" || val == "\r") { + kv := strings.Split(val, "\n") + + if len(kv) == 1 { + kv = strings.Split(val, "\r") + } + + if len(kv) > 1 { + val = kv[0] + + for i := 1; i < len(kv); i++ { + parseLine(kv[i], env) + } + } + } + } + + return val +} diff --git a/yaml/marshal.go b/yaml/marshal.go index 8bdc763..cbff0aa 100644 --- a/yaml/marshal.go +++ b/yaml/marshal.go @@ -9,6 +9,10 @@ import ( "github.com/mandelsoft/spiff/legacy/candiedyaml" ) +func Marshal(node Node) ([]byte, error) { + return candiedyaml.Marshal(node) +} + func ToJSON(root Node) ([]byte, error) { if root == nil { return ValueToJSON(nil) diff --git a/yaml/parser.go b/yaml/parser.go index 6f210dc..0c0e1bb 100644 --- a/yaml/parser.go +++ b/yaml/parser.go @@ -20,6 +20,10 @@ func (e NonStringKeyError) Error() string { return fmt.Sprintf("map key must be a string: %#v", e.Key) } +func Unmarshal(sourceName string, source []byte) (Node, error) { + return Parse(sourceName, source) +} + func Parse(sourceName string, source []byte) (Node, error) { docs, err := ParseMulti(sourceName, source) if err != nil { @@ -31,6 +35,10 @@ func Parse(sourceName string, source []byte) (Node, error) { return docs[0], err } +func UnmarshalMulti(sourceName string, source []byte) ([]Node, error) { + return ParseMulti(sourceName, source) +} + func ParseMulti(sourceName string, source []byte) ([]Node, error) { docs := []Node{}