diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 4f1c95d..2607a47 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -8,37 +8,137 @@ "Rev": "f7f79f729e0fbe2fcc061db48a9ba0263f588252" }, { - "ImportPath": "github.com/gorilla/websocket", - "Rev": "c45a635370221f34fea2d5163fd156fcb4e38e8a" + "ImportPath": "github.com/aws/aws-sdk-go/aws", + "Comment": "v1.1.15", + "Rev": "e7cf1e5986499eea7d4a87868f1eb578c8f2045a" }, { - "ImportPath": "github.com/miekg/dns", - "Rev": "b9171237b0642de1d8e8004f16869970e065f46b" + "ImportPath": "github.com/aws/aws-sdk-go/aws/awserr", + "Comment": "v1.1.15", + "Rev": "e7cf1e5986499eea7d4a87868f1eb578c8f2045a" }, { - "ImportPath": "github.com/mitchellh/goamz/aws", - "Rev": "caaaea8b30ee15616494ee68abd5d8ebbbef05cf" + "ImportPath": "github.com/aws/aws-sdk-go/aws/awsutil", + "Comment": "v1.1.15", + "Rev": "e7cf1e5986499eea7d4a87868f1eb578c8f2045a" }, { - "ImportPath": "github.com/mitchellh/goamz/route53", - "Rev": "caaaea8b30ee15616494ee68abd5d8ebbbef05cf" + "ImportPath": "github.com/aws/aws-sdk-go/aws/client", + "Comment": "v1.1.15", + "Rev": "e7cf1e5986499eea7d4a87868f1eb578c8f2045a" }, { - "ImportPath": "github.com/rancher/go-rancher/client", - "Comment": "v0.1.0-143-g77690e2", - "Rev": "77690e2bf83b2c6227dbab555941894f2f76137a" + "ImportPath": "github.com/aws/aws-sdk-go/aws/client/metadata", + "Comment": "v1.1.15", + "Rev": "e7cf1e5986499eea7d4a87868f1eb578c8f2045a" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/corehandlers", + "Comment": "v1.1.15", + "Rev": "e7cf1e5986499eea7d4a87868f1eb578c8f2045a" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials", + "Comment": "v1.1.15", + "Rev": "e7cf1e5986499eea7d4a87868f1eb578c8f2045a" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", + "Comment": "v1.1.15", + "Rev": "e7cf1e5986499eea7d4a87868f1eb578c8f2045a" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/defaults", + "Comment": "v1.1.15", + "Rev": "e7cf1e5986499eea7d4a87868f1eb578c8f2045a" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/ec2metadata", + "Comment": "v1.1.15", + "Rev": "e7cf1e5986499eea7d4a87868f1eb578c8f2045a" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/request", + "Comment": "v1.1.15", + "Rev": "e7cf1e5986499eea7d4a87868f1eb578c8f2045a" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/session", + "Comment": "v1.1.15", + "Rev": "e7cf1e5986499eea7d4a87868f1eb578c8f2045a" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/endpoints", + "Comment": "v1.1.15", + "Rev": "e7cf1e5986499eea7d4a87868f1eb578c8f2045a" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol", + "Comment": "v1.1.15", + "Rev": "e7cf1e5986499eea7d4a87868f1eb578c8f2045a" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query", + "Comment": "v1.1.15", + "Rev": "e7cf1e5986499eea7d4a87868f1eb578c8f2045a" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", + "Comment": "v1.1.15", + "Rev": "e7cf1e5986499eea7d4a87868f1eb578c8f2045a" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest", + "Comment": "v1.1.15", + "Rev": "e7cf1e5986499eea7d4a87868f1eb578c8f2045a" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/restxml", + "Comment": "v1.1.15", + "Rev": "e7cf1e5986499eea7d4a87868f1eb578c8f2045a" }, { - "ImportPath": "github.com/square/go-jose", - "Rev": "7465d2baefd097ef32054b46be7d9c41185869a1" + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", + "Comment": "v1.1.15", + "Rev": "e7cf1e5986499eea7d4a87868f1eb578c8f2045a" }, { - "ImportPath": "github.com/square/go-jose/cipher", - "Rev": "7465d2baefd097ef32054b46be7d9c41185869a1" + "ImportPath": "github.com/aws/aws-sdk-go/private/signer/v4", + "Comment": "v1.1.15", + "Rev": "e7cf1e5986499eea7d4a87868f1eb578c8f2045a" }, { - "ImportPath": "github.com/vaughan0/go-ini", - "Rev": "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1" + "ImportPath": "github.com/aws/aws-sdk-go/private/waiter", + "Comment": "v1.1.15", + "Rev": "e7cf1e5986499eea7d4a87868f1eb578c8f2045a" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/route53", + "Comment": "v1.1.15", + "Rev": "e7cf1e5986499eea7d4a87868f1eb578c8f2045a" + }, + { + "ImportPath": "github.com/go-ini/ini", + "Comment": "v1.11.0", + "Rev": "12f418cc7edc5a618a51407b7ac1f1f512139df3" + }, + { + "ImportPath": "github.com/gorilla/websocket", + "Rev": "c45a635370221f34fea2d5163fd156fcb4e38e8a" + }, + { + "ImportPath": "github.com/jmespath/go-jmespath", + "Comment": "0.2.2-12-g0b12d6b", + "Rev": "0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74" + }, + { + "ImportPath": "github.com/miekg/dns", + "Rev": "b9171237b0642de1d8e8004f16869970e065f46b" + }, + { + "ImportPath": "github.com/rancher/go-rancher/client", + "Comment": "v0.1.0-143-g77690e2", + "Rev": "77690e2bf83b2c6227dbab555941894f2f76137a" }, { "ImportPath": "github.com/weppos/dnsimple-go/dnsimple", @@ -46,8 +146,28 @@ }, { "ImportPath": "github.com/xenolf/lego/acme", - "Comment": "v0.2.0-117-g39eef1c", - "Rev": "39eef1c2f6f8025524aa88746c196ae598aeffc3" + "Comment": "v0.3.0-34-gca19a90", + "Rev": "ca19a90028e242e878585941c2a27c8f3b3efc25" + }, + { + "ImportPath": "github.com/xenolf/lego/providers/dns/cloudflare", + "Comment": "v0.3.0-34-gca19a90", + "Rev": "ca19a90028e242e878585941c2a27c8f3b3efc25" + }, + { + "ImportPath": "github.com/xenolf/lego/providers/dns/digitalocean", + "Comment": "v0.3.0-34-gca19a90", + "Rev": "ca19a90028e242e878585941c2a27c8f3b3efc25" + }, + { + "ImportPath": "github.com/xenolf/lego/providers/dns/dnsimple", + "Comment": "v0.3.0-34-gca19a90", + "Rev": "ca19a90028e242e878585941c2a27c8f3b3efc25" + }, + { + "ImportPath": "github.com/xenolf/lego/providers/dns/route53", + "Comment": "v0.3.0-34-gca19a90", + "Rev": "ca19a90028e242e878585941c2a27c8f3b3efc25" }, { "ImportPath": "golang.org/x/crypto/ocsp", @@ -60,6 +180,21 @@ { "ImportPath": "golang.org/x/sys/unix", "Rev": "eb2c74142fd19a79b3f237334c7384d5167b1b46" + }, + { + "ImportPath": "gopkg.in/square/go-jose.v1", + "Comment": "v1.0.1", + "Rev": "40d457b439244b546f023d056628e5184136899b" + }, + { + "ImportPath": "gopkg.in/square/go-jose.v1/cipher", + "Comment": "v1.0.1", + "Rev": "40d457b439244b546f023d056628e5184136899b" + }, + { + "ImportPath": "gopkg.in/square/go-jose.v1/json", + "Comment": "v1.0.1", + "Rev": "40d457b439244b546f023d056628e5184136899b" } ] } diff --git a/Godeps/_workspace/src/github.com/square/go-jose/LICENSE b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/LICENSE.txt similarity index 100% rename from Godeps/_workspace/src/github.com/square/go-jose/LICENSE rename to Godeps/_workspace/src/github.com/aws/aws-sdk-go/LICENSE.txt diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/NOTICE.txt b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/NOTICE.txt new file mode 100644 index 0000000..5f14d11 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/error.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/error.go new file mode 100644 index 0000000..e50771f --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -0,0 +1,145 @@ +// Package awserr represents API error interface accessors for the SDK. +package awserr + +// An Error wraps lower level errors with code, message and an original error. +// The underlying concrete error type may also satisfy other interfaces which +// can be to used to obtain more specific information about the error. +// +// Calling Error() or String() will always include the full information about +// an error based on its underlying type. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Get error details +// log.Println("Error:", awsErr.Code(), awsErr.Message()) +// +// // Prints out full error message, including original error if there was one. +// log.Println("Error:", awsErr.Error()) +// +// // Get original error +// if origErr := awsErr.OrigErr(); origErr != nil { +// // operate on original error. +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type Error interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErr() error +} + +// BatchError is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occured in the batch. +// +// Deprecated: Replaced with BatchedErrors. Only defined for backwards +// compatibility. +type BatchError interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// BatchedErrors is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occured in the batch. +// +// Replaces BatchError +type BatchedErrors interface { + // Satisfy the base Error interface. + Error + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// New returns an Error object described by the code, message, and origErr. +// +// If origErr satisfies the Error interface it will not be wrapped within a new +// Error object and will instead be returned. +func New(code, message string, origErr error) Error { + var errs []error + if origErr != nil { + errs = append(errs, origErr) + } + return newBaseError(code, message, errs) +} + +// NewBatchError returns an BatchedErrors with a collection of errors as an +// array of errors. +func NewBatchError(code, message string, errs []error) BatchedErrors { + return newBaseError(code, message, errs) +} + +// A RequestFailure is an interface to extract request failure information from +// an Error such as the request ID of the failed request returned by a service. +// RequestFailures may not always have a requestID value if the request failed +// prior to reaching the service such as a connection error. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if reqerr, ok := err.(RequestFailure); ok { +// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) +// } else { +// log.Println("Error:", err.Error()) +// } +// } +// +// Combined with awserr.Error: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Generic AWS Error with Code, Message, and original error (if any) +// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) +// +// if reqErr, ok := err.(awserr.RequestFailure); ok { +// // A service error occurred +// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type RequestFailure interface { + Error + + // The status code of the HTTP response. + StatusCode() int + + // The request ID returned by the service for a request failure. This will + // be empty if no request ID is available such as the request failed due + // to a connection error. + RequestID() string +} + +// NewRequestFailure returns a new request error wrapper for the given Error +// provided. +func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { + return newRequestError(err, statusCode, reqID) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/types.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/types.go new file mode 100644 index 0000000..e2d333b --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -0,0 +1,194 @@ +package awserr + +import "fmt" + +// SprintError returns a string of the formatted error code. +// +// Both extra and origErr are optional. If they are included their lines +// will be added, but if they are not included their lines will be ignored. +func SprintError(code, message, extra string, origErr error) string { + msg := fmt.Sprintf("%s: %s", code, message) + if extra != "" { + msg = fmt.Sprintf("%s\n\t%s", msg, extra) + } + if origErr != nil { + msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) + } + return msg +} + +// A baseError wraps the code and message which defines an error. It also +// can be used to wrap an original error object. +// +// Should be used as the root for errors satisfying the awserr.Error. Also +// for any error which does not fit into a specific error wrapper type. +type baseError struct { + // Classification of error + code string + + // Detailed information about error + message string + + // Optional original error this error is based off of. Allows building + // chained errors. + errs []error +} + +// newBaseError returns an error object for the code, message, and errors. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the +// error. +// +// origErrs is the error objects which will be nested under the new errors to +// be returned. +func newBaseError(code, message string, origErrs []error) *baseError { + b := &baseError{ + code: code, + message: message, + errs: origErrs, + } + + return b +} + +// Error returns the string representation of the error. +// +// See ErrorWithExtra for formatting. +// +// Satisfies the error interface. +func (b baseError) Error() string { + size := len(b.errs) + if size > 0 { + return SprintError(b.code, b.message, "", errorList(b.errs)) + } + + return SprintError(b.code, b.message, "", nil) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (b baseError) String() string { + return b.Error() +} + +// Code returns the short phrase depicting the classification of the error. +func (b baseError) Code() string { + return b.code +} + +// Message returns the error details message. +func (b baseError) Message() string { + return b.message +} + +// OrigErr returns the original error if one was set. Nil is returned if no +// error was set. This only returns the first element in the list. If the full +// list is needed, use BatchedErrors. +func (b baseError) OrigErr() error { + switch len(b.errs) { + case 0: + return nil + case 1: + return b.errs[0] + default: + if err, ok := b.errs[0].(Error); ok { + return NewBatchError(err.Code(), err.Message(), b.errs[1:]) + } + return NewBatchError("BatchedErrors", + "multiple errors occured", b.errs) + } +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (b baseError) OrigErrs() []error { + return b.errs +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError Error + +// A requestError wraps a request or service error. +// +// Composed of baseError for code, message, and original error. +type requestError struct { + awsError + statusCode int + requestID string +} + +// newRequestError returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +// +// Also wraps original errors via the baseError. +func newRequestError(err Error, statusCode int, requestID string) *requestError { + return &requestError{ + awsError: err, + statusCode: statusCode, + requestID: requestID, + } +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (r requestError) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s", + r.statusCode, r.requestID) + return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (r requestError) String() string { + return r.Error() +} + +// StatusCode returns the wrapped status code for the error +func (r requestError) StatusCode() int { + return r.statusCode +} + +// RequestID returns the wrapped requestID +func (r requestError) RequestID() string { + return r.requestID +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (r requestError) OrigErrs() []error { + if b, ok := r.awsError.(BatchedErrors); ok { + return b.OrigErrs() + } + return []error{r.OrigErr()} +} + +// An error list that satisfies the golang interface +type errorList []error + +// Error returns the string representation of the error. +// +// Satisfies the error interface. +func (e errorList) Error() string { + msg := "" + // How do we want to handle the array size being zero + if size := len(e); size > 0 { + for i := 0; i < size; i++ { + msg += fmt.Sprintf("%s", e[i].Error()) + // We check the next index to see if it is within the slice. + // If it is, then we append a newline. We do this, because unit tests + // could be broken with the additional '\n' + if i+1 < size { + msg += "\n" + } + } + } + return msg +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go new file mode 100644 index 0000000..8429470 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go @@ -0,0 +1,100 @@ +package awsutil + +import ( + "io" + "reflect" +) + +// Copy deeply copies a src structure to dst. Useful for copying request and +// response structures. +// +// Can copy between structs of different type, but will only copy fields which +// are assignable, and exist in both structs. Fields which are not assignable, +// or do not exist in both structs are ignored. +func Copy(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +// CopyOf returns a copy of src while also allocating the memory for dst. +// src must be a pointer type or this operation will fail. +func CopyOf(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +// rcopy performs a recursive copy of values from the source to destination. +// +// root is used to skip certain aspects of the copy which are not valid +// for the root node of a object. +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + dst.Set(reflect.New(e)) + } + if src.Elem().IsValid() { + // Keep the current root state since the depth hasn't changed + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + if src.IsNil() { + break + } + + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + if src.IsNil() { + break + } + + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + // Assign the value if possible. If its not assignable, the value would + // need to be converted and the impact of that may be unexpected, or is + // not compatible with the dst type. + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/equal.go new file mode 100644 index 0000000..59fa4a5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/equal.go @@ -0,0 +1,27 @@ +package awsutil + +import ( + "reflect" +) + +// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. +// In addition to this, this method will also dereference the input values if +// possible so the DeepEqual performed will not fail if one parameter is a +// pointer and the other is not. +// +// DeepEqual will not perform indirection of nested values of the input parameters. +func DeepEqual(a, b interface{}) bool { + ra := reflect.Indirect(reflect.ValueOf(a)) + rb := reflect.Indirect(reflect.ValueOf(b)) + + if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { + // If the elements are both nil, and of the same type the are equal + // If they are of different types they are not equal + return reflect.TypeOf(a) == reflect.TypeOf(b) + } else if raValid != rbValid { + // Both values must be valid to be equal + return false + } + + return reflect.DeepEqual(ra.Interface(), rb.Interface()) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go new file mode 100644 index 0000000..4d2a01e --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -0,0 +1,222 @@ +package awsutil + +import ( + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/jmespath/go-jmespath" +) + +var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) + +// rValuesAtPath returns a slice of values found in value v. The values +// in v are explored recursively so all nested values are collected. +func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { + pathparts := strings.Split(path, "||") + if len(pathparts) > 1 { + for _, pathpart := range pathparts { + vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) + if len(vals) > 0 { + return vals + } + } + return nil + } + + values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} + components := strings.Split(path, ".") + for len(values) > 0 && len(components) > 0 { + var index *int64 + var indexStar bool + c := strings.TrimSpace(components[0]) + if c == "" { // no actual component, illegal syntax + return nil + } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { + // TODO normalize case for user + return nil // don't support unexported fields + } + + // parse this component + if m := indexRe.FindStringSubmatch(c); m != nil { + c = m[1] + if m[2] == "" { + index = nil + indexStar = true + } else { + i, _ := strconv.ParseInt(m[2], 10, 32) + index = &i + indexStar = false + } + } + + nextvals := []reflect.Value{} + for _, value := range values { + // pull component name out of struct member + if value.Kind() != reflect.Struct { + continue + } + + if c == "*" { // pull all members + for i := 0; i < value.NumField(); i++ { + if f := reflect.Indirect(value.Field(i)); f.IsValid() { + nextvals = append(nextvals, f) + } + } + continue + } + + value = value.FieldByNameFunc(func(name string) bool { + if c == name { + return true + } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { + return true + } + return false + }) + + if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { + if !value.IsNil() { + value.Set(reflect.Zero(value.Type())) + } + return []reflect.Value{value} + } + + if createPath && value.Kind() == reflect.Ptr && value.IsNil() { + // TODO if the value is the terminus it should not be created + // if the value to be set to its position is nil. + value.Set(reflect.New(value.Type().Elem())) + value = value.Elem() + } else { + value = reflect.Indirect(value) + } + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + + if indexStar || index != nil { + nextvals = []reflect.Value{} + for _, value := range values { + value := reflect.Indirect(value) + if value.Kind() != reflect.Slice { + continue + } + + if indexStar { // grab all indices + for i := 0; i < value.Len(); i++ { + idx := reflect.Indirect(value.Index(i)) + if idx.IsValid() { + nextvals = append(nextvals, idx) + } + } + continue + } + + // pull out index + i := int(*index) + if i >= value.Len() { // check out of bounds + if createPath { + // TODO resize slice + } else { + continue + } + } else if i < 0 { // support negative indexing + i = value.Len() + i + } + value = reflect.Indirect(value.Index(i)) + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + } + + components = components[1:] + } + return values +} + +// ValuesAtPath returns a list of values at the case insensitive lexical +// path inside of a structure. +func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { + result, err := jmespath.Search(path, i) + if err != nil { + return nil, err + } + + v := reflect.ValueOf(result) + if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, nil + } + if s, ok := result.([]interface{}); ok { + return s, err + } + if v.Kind() == reflect.Map && v.Len() == 0 { + return nil, nil + } + if v.Kind() == reflect.Slice { + out := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + out[i] = v.Index(i).Interface() + } + return out, nil + } + + return []interface{}{result}, nil +} + +// SetValueAtPath sets a value at the case insensitive lexical path inside +// of a structure. +func SetValueAtPath(i interface{}, path string, v interface{}) { + if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue + } + setValue(rval, v) + } + } +} + +func setValue(dstVal reflect.Value, src interface{}) { + if dstVal.Kind() == reflect.Ptr { + dstVal = reflect.Indirect(dstVal) + } + srcVal := reflect.ValueOf(src) + + if !srcVal.IsValid() { // src is literal nil + if dstVal.CanAddr() { + // Convert to pointer so that pointer's value can be nil'ed + // dstVal = dstVal.Addr() + } + dstVal.Set(reflect.Zero(dstVal.Type())) + + } else if srcVal.Kind() == reflect.Ptr { + if srcVal.IsNil() { + srcVal = reflect.Zero(dstVal.Type()) + } else { + srcVal = reflect.ValueOf(src).Elem() + } + dstVal.Set(srcVal) + } else { + dstVal.Set(srcVal) + } + +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go new file mode 100644 index 0000000..fc38172 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go @@ -0,0 +1,107 @@ +package awsutil + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +// Prettify returns the string representation of a value. +func Prettify(i interface{}) string { + var buf bytes.Buffer + prettify(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +// prettify will recursively walk value v to build a textual +// representation of the value. +func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + strtype := v.Type().String() + if strtype == "time.Time" { + fmt.Fprintf(buf, "%s", v.Interface()) + break + } else if strings.HasPrefix(strtype, "io.") { + buf.WriteString("") + break + } + + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + prettify(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + prettify(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + prettify(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + if !v.IsValid() { + fmt.Fprint(buf, "") + return + } + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + case io.ReadSeeker, io.Reader: + format = "buffer(%p)" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go new file mode 100644 index 0000000..b6432f1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -0,0 +1,89 @@ +package awsutil + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// StringValue returns the string representation of a value. +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + stringValue(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/client.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/client.go new file mode 100644 index 0000000..c8d0564 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/client.go @@ -0,0 +1,120 @@ +package client + +import ( + "fmt" + "io/ioutil" + "net/http/httputil" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides configuration to a service client instance. +type Config struct { + Config *aws.Config + Handlers request.Handlers + Endpoint, SigningRegion string +} + +// ConfigProvider provides a generic way for a service client to receive +// the ClientConfig without circular dependencies. +type ConfigProvider interface { + ClientConfig(serviceName string, cfgs ...*aws.Config) Config +} + +// A Client implements the base client request and response handling +// used by all service clients. +type Client struct { + request.Retryer + metadata.ClientInfo + + Config aws.Config + Handlers request.Handlers +} + +// New will return a pointer to a new initialized service client. +func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { + svc := &Client{ + Config: cfg, + ClientInfo: info, + Handlers: handlers, + } + + switch retryer, ok := cfg.Retryer.(request.Retryer); { + case ok: + svc.Retryer = retryer + case cfg.Retryer != nil && cfg.Logger != nil: + s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) + cfg.Logger.Log(s) + fallthrough + default: + maxRetries := aws.IntValue(cfg.MaxRetries) + if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { + maxRetries = 3 + } + svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} + } + + svc.AddDebugHandlers() + + for _, option := range options { + option(svc) + } + + return svc +} + +// NewRequest returns a new Request pointer for the service API +// operation and parameters. +func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { + return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) +} + +// AddDebugHandlers injects debug logging handlers into the service to log request +// debug information. +func (c *Client) AddDebugHandlers() { + if !c.Config.LogLevel.AtLeast(aws.LogDebug) { + return + } + + c.Handlers.Send.PushFront(logRequest) + c.Handlers.Send.PushBack(logResponse) +} + +const logReqMsg = `DEBUG: Request %s/%s Details: +---[ REQUEST POST-SIGN ]----------------------------- +%s +-----------------------------------------------------` + +func logRequest(r *request.Request) { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody) + + if logBody { + // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's + // Body as a NoOpCloser and will not be reset after read by the HTTP + // client reader. + r.Body.Seek(r.BodyStart, 0) + r.HTTPRequest.Body = ioutil.NopCloser(r.Body) + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody))) +} + +const logRespMsg = `DEBUG: Response %s/%s Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` + +func logResponse(r *request.Request) { + var msg = "no response data" + if r.HTTPResponse != nil { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody) + msg = string(dumpedBody) + } else if r.Error != nil { + msg = r.Error.Error() + } + r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg)) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go new file mode 100644 index 0000000..f664caf --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -0,0 +1,68 @@ +package client + +import ( + "math/rand" + "time" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// DefaultRetryer implements basic retry logic using exponential backoff for +// most services. If you want to implement custom retry logic, implement the +// request.Retryer interface or create a structure type that composes this +// struct and override the specific methods. For example, to override only +// the MaxRetries method: +// +// type retryer struct { +// service.DefaultRetryer +// } +// +// // This implementation always has 100 max retries +// func (d retryer) MaxRetries() uint { return 100 } +type DefaultRetryer struct { + NumMaxRetries int +} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API request. +func (d DefaultRetryer) MaxRetries() int { + return d.NumMaxRetries +} + +// RetryRules returns the delay duration before retrying this request again +func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { + // Set the upper limit of delay in retrying at ~five minutes + minTime := 30 + throttle := d.shouldThrottle(r) + if throttle { + minTime = 1000 + } + + retryCount := r.RetryCount + if retryCount > 13 { + retryCount = 13 + } else if throttle && retryCount > 8 { + retryCount = 8 + } + + delay := (1 << uint(retryCount)) * (rand.Intn(30) + minTime) + return time.Duration(delay) * time.Millisecond +} + +// ShouldRetry returns true if the request should be retried. +func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + if r.HTTPResponse.StatusCode >= 500 { + return true + } + return r.IsErrorRetryable() || d.shouldThrottle(r) +} + +// ShouldThrottle returns true if the request should be throttled. +func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { + if r.HTTPResponse.StatusCode == 502 || + r.HTTPResponse.StatusCode == 503 || + r.HTTPResponse.StatusCode == 504 { + return true + } + return r.IsErrorThrottle() +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go new file mode 100644 index 0000000..4778056 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -0,0 +1,12 @@ +package metadata + +// ClientInfo wraps immutable data from the client.Client structure. +type ClientInfo struct { + ServiceName string + APIVersion string + Endpoint string + SigningName string + SigningRegion string + JSONVersion string + TargetPrefix string +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go new file mode 100644 index 0000000..9e83e92 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go @@ -0,0 +1,311 @@ +package aws + +import ( + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +// UseServiceDefaultRetries instructs the config to use the service's own default +// number of retries. This will be the default action if Config.MaxRetries +// is nil also. +const UseServiceDefaultRetries = -1 + +// RequestRetryer is an alias for a type that implements the request.Retryer interface. +type RequestRetryer interface{} + +// A Config provides service configuration for service clients. By default, +// all clients will use the {defaults.DefaultConfig} structure. +type Config struct { + // Enables verbose error printing of all credential chain errors. + // Should be used when wanting to see all errors while attempting to retreive + // credentials. + CredentialsChainVerboseErrors *bool + + // The credentials object to use when signing requests. Defaults to + // a chain of credential providers to search for credentials in environment + // variables, shared credential file, and EC2 Instance Roles. + Credentials *credentials.Credentials + + // An optional endpoint URL (hostname only or fully qualified URI) + // that overrides the default generated endpoint for a client. Set this + // to `""` to use the default generated endpoint. + // + // @note You must still provide a `Region` value when specifying an + // endpoint for a client. + Endpoint *string + + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // @see http://docs.aws.amazon.com/general/latest/gr/rande.html + // AWS Regions and Endpoints + Region *string + + // Set this to `true` to disable SSL when sending requests. Defaults + // to `false`. + DisableSSL *bool + + // The HTTP client to use when sending requests. Defaults to + // `http.DefaultClient`. + HTTPClient *http.Client + + // An integer value representing the logging level. The default log level + // is zero (LogOff), which represents no logging. To enable logging set + // to a LogLevel Value. + LogLevel *LogLevelType + + // The logger writer interface to write logging messages to. Defaults to + // standard out. + Logger Logger + + // The maximum number of times that a request will be retried for failures. + // Defaults to -1, which defers the max retry setting to the service specific + // configuration. + MaxRetries *int + + // Retryer guides how HTTP requests should be retried in case of recoverable failures. + // + // When nil or the value does not implement the request.Retryer interface, + // the request.DefaultRetryer will be used. + // + // When both Retryer and MaxRetries are non-nil, the former is used and + // the latter ignored. + // + // To set the Retryer field in a type-safe manner and with chaining, use + // the request.WithRetryer helper function: + // + // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) + // + Retryer RequestRetryer + + // Disables semantic parameter validation, which validates input for missing + // required fields and/or other semantic request input errors. + DisableParamValidation *bool + + // Disables the computation of request and response checksums, e.g., + // CRC32 checksums in Amazon DynamoDB. + DisableComputeChecksums *bool + + // Set this to `true` to force the request to use path-style addressing, + // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client will + // use virtual hosted bucket addressing when possible + // (`http://BUCKET.s3.amazonaws.com/KEY`). + // + // @note This configuration option is specific to the Amazon S3 service. + // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + // Amazon S3: Virtual Hosting of Buckets + S3ForcePathStyle *bool + + // Set this to `true` to disable the EC2Metadata client from overriding the + // default http.Client's Timeout. This is helpful if you do not want the EC2Metadata + // client to create a new http.Client. This options is only meaningful if you're not + // already using a custom HTTP client with the SDK. Enabled by default. + // + // Must be set and provided to the session.New() in order to disable the EC2Metadata + // overriding the timeout for default credentials chain. + // + // Example: + // sess := session.New(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true)) + // svc := s3.New(sess) + // + EC2MetadataDisableTimeoutOverride *bool + + SleepDelay func(time.Duration) +} + +// NewConfig returns a new Config pointer that can be chained with builder methods to +// set multiple configuration values inline without using pointers. +// +// svc := s3.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10)) +// +func NewConfig() *Config { + return &Config{} +} + +// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning +// a Config pointer. +func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { + c.CredentialsChainVerboseErrors = &verboseErrs + return c +} + +// WithCredentials sets a config Credentials value returning a Config pointer +// for chaining. +func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { + c.Credentials = creds + return c +} + +// WithEndpoint sets a config Endpoint value returning a Config pointer for +// chaining. +func (c *Config) WithEndpoint(endpoint string) *Config { + c.Endpoint = &endpoint + return c +} + +// WithRegion sets a config Region value returning a Config pointer for +// chaining. +func (c *Config) WithRegion(region string) *Config { + c.Region = ®ion + return c +} + +// WithDisableSSL sets a config DisableSSL value returning a Config pointer +// for chaining. +func (c *Config) WithDisableSSL(disable bool) *Config { + c.DisableSSL = &disable + return c +} + +// WithHTTPClient sets a config HTTPClient value returning a Config pointer +// for chaining. +func (c *Config) WithHTTPClient(client *http.Client) *Config { + c.HTTPClient = client + return c +} + +// WithMaxRetries sets a config MaxRetries value returning a Config pointer +// for chaining. +func (c *Config) WithMaxRetries(max int) *Config { + c.MaxRetries = &max + return c +} + +// WithDisableParamValidation sets a config DisableParamValidation value +// returning a Config pointer for chaining. +func (c *Config) WithDisableParamValidation(disable bool) *Config { + c.DisableParamValidation = &disable + return c +} + +// WithDisableComputeChecksums sets a config DisableComputeChecksums value +// returning a Config pointer for chaining. +func (c *Config) WithDisableComputeChecksums(disable bool) *Config { + c.DisableComputeChecksums = &disable + return c +} + +// WithLogLevel sets a config LogLevel value returning a Config pointer for +// chaining. +func (c *Config) WithLogLevel(level LogLevelType) *Config { + c.LogLevel = &level + return c +} + +// WithLogger sets a config Logger value returning a Config pointer for +// chaining. +func (c *Config) WithLogger(logger Logger) *Config { + c.Logger = logger + return c +} + +// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config +// pointer for chaining. +func (c *Config) WithS3ForcePathStyle(force bool) *Config { + c.S3ForcePathStyle = &force + return c +} + +// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { + c.EC2MetadataDisableTimeoutOverride = &enable + return c +} + +// WithSleepDelay overrides the function used to sleep while waiting for the +// next retry. Defaults to time.Sleep. +func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { + c.SleepDelay = fn + return c +} + +// MergeIn merges the passed in configs into the existing config object. +func (c *Config) MergeIn(cfgs ...*Config) { + for _, other := range cfgs { + mergeInConfig(c, other) + } +} + +func mergeInConfig(dst *Config, other *Config) { + if other == nil { + return + } + + if other.CredentialsChainVerboseErrors != nil { + dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors + } + + if other.Credentials != nil { + dst.Credentials = other.Credentials + } + + if other.Endpoint != nil { + dst.Endpoint = other.Endpoint + } + + if other.Region != nil { + dst.Region = other.Region + } + + if other.DisableSSL != nil { + dst.DisableSSL = other.DisableSSL + } + + if other.HTTPClient != nil { + dst.HTTPClient = other.HTTPClient + } + + if other.LogLevel != nil { + dst.LogLevel = other.LogLevel + } + + if other.Logger != nil { + dst.Logger = other.Logger + } + + if other.MaxRetries != nil { + dst.MaxRetries = other.MaxRetries + } + + if other.Retryer != nil { + dst.Retryer = other.Retryer + } + + if other.DisableParamValidation != nil { + dst.DisableParamValidation = other.DisableParamValidation + } + + if other.DisableComputeChecksums != nil { + dst.DisableComputeChecksums = other.DisableComputeChecksums + } + + if other.S3ForcePathStyle != nil { + dst.S3ForcePathStyle = other.S3ForcePathStyle + } + + if other.EC2MetadataDisableTimeoutOverride != nil { + dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride + } + + if other.SleepDelay != nil { + dst.SleepDelay = other.SleepDelay + } +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c *Config) Copy(cfgs ...*Config) *Config { + dst := &Config{} + dst.MergeIn(c) + + for _, cfg := range cfgs { + dst.MergeIn(cfg) + } + + return dst +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convert_types.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convert_types.go new file mode 100644 index 0000000..d6a7b08 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -0,0 +1,357 @@ +package aws + +import "time" + +// String returns a pointer to of the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to of the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to of the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to of the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to of the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to of the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go new file mode 100644 index 0000000..a054d39 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -0,0 +1,144 @@ +package corehandlers + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "runtime" + "strconv" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Interface for matching types which also have a Len method. +type lener interface { + Len() int +} + +// BuildContentLengthHandler builds the content length of a request based on the body, +// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable +// to determine request body length and no "Content-Length" was specified it will panic. +var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { + if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { + length, _ := strconv.ParseInt(slength, 10, 64) + r.HTTPRequest.ContentLength = length + return + } + + var length int64 + switch body := r.Body.(type) { + case nil: + length = 0 + case lener: + length = int64(body.Len()) + case io.Seeker: + r.BodyStart, _ = body.Seek(0, 1) + end, _ := body.Seek(0, 2) + body.Seek(r.BodyStart, 0) // make sure to seek back to original location + length = end - r.BodyStart + default: + panic("Cannot get length of body, must provide `ContentLength`") + } + + r.HTTPRequest.ContentLength = length + r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) +}} + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +var reStatusCode = regexp.MustCompile(`^(\d{3})`) + +// SendHandler is a request handler to send service request using HTTP client. +var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *request.Request) { + var err error + r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest) + if err != nil { + // Prevent leaking if an HTTPResponse was returned. Clean up + // the body. + if r.HTTPResponse != nil { + r.HTTPResponse.Body.Close() + } + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other url redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok && e.Err != nil { + if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) + r.HTTPResponse = &http.Response{ + StatusCode: int(code), + Status: http.StatusText(int(code)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + return + } + } + if r.HTTPResponse == nil { + // Add a dummy request response object to ensure the HTTPResponse + // value is consistent. + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + // Catch all other request errors. + r.Error = awserr.New("RequestError", "send request failed", err) + r.Retryable = aws.Bool(true) // network errors are retryable + } +}} + +// ValidateResponseHandler is a request handler to validate service response. +var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { + if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { + // this may be replaced by an UnmarshalError handler + r.Error = awserr.New("UnknownError", "unknown error", nil) + } +}} + +// AfterRetryHandler performs final checks to determine if the request should +// be retried and how long to delay. +var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } + + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) + r.Config.SleepDelay(r.RetryDelay) + + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } + + r.RetryCount++ + r.Error = nil + } +}} + +// ValidateEndpointHandler is a request handler to validate a request had the +// appropriate Region and Endpoint set. Will set r.Error if the endpoint or +// region is not valid. +var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { + if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { + r.Error = aws.ErrMissingRegion + } else if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +}} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go new file mode 100644 index 0000000..ea07580 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go @@ -0,0 +1,153 @@ +package corehandlers + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ValidateParametersHandler is a request handler to validate the input parameters. +// Validating parameters only has meaning if done prior to the request being sent. +var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { + if r.ParamsFilled() { + v := validator{errors: []string{}} + v.validateAny(reflect.ValueOf(r.Params), "") + + if count := len(v.errors); count > 0 { + format := "%d validation errors:\n- %s" + msg := fmt.Sprintf(format, count, strings.Join(v.errors, "\n- ")) + r.Error = awserr.New("InvalidParameter", msg, nil) + } + } +}} + +// A validator validates values. Collects validations errors which occurs. +type validator struct { + errors []string +} + +// There's no validation to be done on the contents of []byte values. Prepare +// to check validateAny arguments against that type so we can quickly skip +// them. +var byteSliceType = reflect.TypeOf([]byte(nil)) + +// validateAny will validate any struct, slice or map type. All validations +// are also performed recursively for nested types. +func (v *validator) validateAny(value reflect.Value, path string) { + value = reflect.Indirect(value) + if !value.IsValid() { + return + } + + switch value.Kind() { + case reflect.Struct: + v.validateStruct(value, path) + case reflect.Slice: + if value.Type() == byteSliceType { + // We don't need to validate the contents of []byte. + return + } + for i := 0; i < value.Len(); i++ { + v.validateAny(value.Index(i), path+fmt.Sprintf("[%d]", i)) + } + case reflect.Map: + for _, n := range value.MapKeys() { + v.validateAny(value.MapIndex(n), path+fmt.Sprintf("[%q]", n.String())) + } + } +} + +// validateStruct will validate the struct value's fields. If the structure has +// nested types those types will be validated also. +func (v *validator) validateStruct(value reflect.Value, path string) { + prefix := "." + if path == "" { + prefix = "" + } + + for i := 0; i < value.Type().NumField(); i++ { + f := value.Type().Field(i) + if strings.ToLower(f.Name[0:1]) == f.Name[0:1] { + continue + } + fvalue := value.FieldByName(f.Name) + + err := validateField(f, fvalue, validateFieldRequired, validateFieldMin) + if err != nil { + v.errors = append(v.errors, fmt.Sprintf("%s: %s", err.Error(), path+prefix+f.Name)) + continue + } + + v.validateAny(fvalue, path+prefix+f.Name) + } +} + +type validatorFunc func(f reflect.StructField, fvalue reflect.Value) error + +func validateField(f reflect.StructField, fvalue reflect.Value, funcs ...validatorFunc) error { + for _, fn := range funcs { + if err := fn(f, fvalue); err != nil { + return err + } + } + return nil +} + +// Validates that a field has a valid value provided for required fields. +func validateFieldRequired(f reflect.StructField, fvalue reflect.Value) error { + if f.Tag.Get("required") == "" { + return nil + } + + switch fvalue.Kind() { + case reflect.Ptr, reflect.Slice, reflect.Map: + if fvalue.IsNil() { + return fmt.Errorf("missing required parameter") + } + default: + if !fvalue.IsValid() { + return fmt.Errorf("missing required parameter") + } + } + return nil +} + +// Validates that if a value is provided for a field, that value must be at +// least a minimum length. +func validateFieldMin(f reflect.StructField, fvalue reflect.Value) error { + minStr := f.Tag.Get("min") + if minStr == "" { + return nil + } + min, _ := strconv.ParseInt(minStr, 10, 64) + + kind := fvalue.Kind() + if kind == reflect.Ptr { + if fvalue.IsNil() { + return nil + } + fvalue = fvalue.Elem() + } + + switch fvalue.Kind() { + case reflect.String: + if int64(fvalue.Len()) < min { + return fmt.Errorf("field too short, minimum length %d", min) + } + case reflect.Slice, reflect.Map: + if fvalue.IsNil() { + return nil + } + if int64(fvalue.Len()) < min { + return fmt.Errorf("field too short, minimum length %d", min) + } + + // TODO min can also apply to number minimum value. + + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go new file mode 100644 index 0000000..857311f --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -0,0 +1,100 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var ( + // ErrNoValidProvidersFoundInChain Is returned when there are no valid + // providers in the ChainProvider. + // + // This has been deprecated. For verbose error messaging set + // aws.Config.CredentialsChainVerboseErrors to true + // + // @readonly + ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", + `no valid providers in chain. Deprecated. + For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, + nil) +) + +// A ChainProvider will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The ChainProvider provides a way of chaining multiple providers together +// which will pick the first available using priority order of the Providers +// in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error ErrNoValidProvidersFoundInChain. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again. +// +// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. +// In this example EnvProvider will first check if any credentials are available +// vai the environment variables. If there are none ChainProvider will check +// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider +// does not return any credentials ChainProvider will return the error +// ErrNoValidProvidersFoundInChain +// +// creds := NewChainCredentials( +// []Provider{ +// &EnvProvider{}, +// &EC2RoleProvider{ +// Client: ec2metadata.New(sess), +// }, +// }) +// +// // Usage of ChainCredentials with aws.Config +// svc := ec2.New(&aws.Config{Credentials: creds}) +// +type ChainProvider struct { + Providers []Provider + curr Provider + VerboseErrors bool +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return NewCredentials(&ChainProvider{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. +// +// If a provider is found it will be cached and any calls to IsExpired() +// will return the expired state of the cached provider. +func (c *ChainProvider) Retrieve() (Value, error) { + var errs []error + for _, p := range c.Providers { + creds, err := p.Retrieve() + if err == nil { + c.curr = p + return creds, nil + } + errs = append(errs, err) + } + c.curr = nil + + var err error + err = ErrNoValidProvidersFoundInChain + if c.VerboseErrors { + err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) + } + return Value{}, err +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *ChainProvider) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go new file mode 100644 index 0000000..7b8ebf5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -0,0 +1,223 @@ +// Package credentials provides credential retrieval and management +// +// The Credentials is the primary method of getting access to and managing +// credentials Values. Using dependency injection retrieval of the credential +// values is handled by a object which satisfies the Provider interface. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials Value have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := NewEnvCredentials() +// +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := NewCredentials(&EC2RoleProvider{}) +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials + +import ( + "sync" + "time" +) + +// AnonymousCredentials is an empty Credential object that can be used as +// dummy placeholder credentials for requests that do not need signed. +// +// This Credentials can be used to configure a service to not sign requests +// when making service API calls. For example, when accessing public +// s3 buckets. +// +// svc := s3.New(&aws.Config{Credentials: AnonymousCredentials}) +// // Access public S3 buckets. +// +// @readonly +var AnonymousCredentials = NewStaticCredentials("", "", "") + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Provider used to get credentials + ProviderName string +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +// +// The Provider should not need to implement its own mutexes, because +// that will be managed by Credentials. +type Provider interface { + // Refresh returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type EC2RoleProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. Available for testing + // to be able to mock out the current time. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + e.expiration = expiration + if window > 0 { + e.expiration = e.expiration.Add(-window) + } +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + if e.CurrentTime == nil { + e.CurrentTime = time.Now + } + return e.expiration.Before(e.CurrentTime()) +} + +// A Credentials provides synchronous safe retrieval of AWS credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + creds Value + forceRefresh bool + m sync.Mutex + + provider Provider +} + +// NewCredentials returns a pointer to a new Credentials with the provider set. +func NewCredentials(provider Provider) *Credentials { + return &Credentials{ + provider: provider, + forceRefresh: true, + } +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + c.m.Lock() + defer c.m.Unlock() + + if c.isExpired() { + creds, err := c.provider.Retrieve() + if err != nil { + return Value{}, err + } + c.creds = creds + c.forceRefresh = false + } + + return c.creds, nil +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.m.Lock() + defer c.m.Unlock() + + c.forceRefresh = true +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be retrieved. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.m.Lock() + defer c.m.Unlock() + + return c.isExpired() +} + +// isExpired helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpired() bool { + return c.forceRefresh || c.provider.IsExpired() +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go new file mode 100644 index 0000000..aa9d689 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -0,0 +1,178 @@ +package ec2rolecreds + +import ( + "bufio" + "encoding/json" + "fmt" + "path" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/ec2metadata" +) + +// ProviderName provides a name of EC2Role provider +const ProviderName = "EC2RoleProvider" + +// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// Example how to configure the EC2RoleProvider with custom http Client, Endpoint +// or ExpiryWindow +// +// p := &ec2rolecreds.EC2RoleProvider{ +// // Pass in a custom timeout to be used when requesting +// // IAM EC2 Role credentials. +// Client: ec2metadata.New(sess, aws.Config{ +// HTTPClient: &http.Client{Timeout: 10 * time.Second}, +// }), +// +// // Do not use early expiry of credentials. If a non zero value is +// // specified the credentials will be expired early +// ExpiryWindow: 0, +// } +type EC2RoleProvider struct { + credentials.Expiry + + // Required EC2Metadata client to use when connecting to EC2 metadata service. + Client *ec2metadata.EC2Metadata + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. +// The ConfigProvider is satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: ec2metadata.New(c), + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 +// metadata service. +func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: client, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { + credsList, err := requestCredList(m.Client) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + if len(credsList) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) + } + credsName := credsList[0] + + roleCreds, err := requestCred(m.Client, credsName) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + ProviderName: ProviderName, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshalling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "/iam/security-credentials" + +// requestCredList requests a list of credentials from the EC2 service. +// If there are no credentials, or there is an error making or receiving the request +func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { + resp, err := client.GetMetadata(iamSecurityCredsPath) + if err != nil { + return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) + } + + credsList := []string{} + s := bufio.NewScanner(strings.NewReader(resp)) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName)) + if err != nil { + return ec2RoleCredRespBody{}, + awserr.New("EC2RoleRequestError", + fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), + err) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, + awserr.New("SerializationError", + fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), + err) + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) + } + + return respCreds, nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go new file mode 100644 index 0000000..96655bc --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -0,0 +1,77 @@ +package credentials + +import ( + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// EnvProviderName provides a name of Env provider +const EnvProviderName = "EnvProvider" + +var ( + // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be + // found in the process's environment. + // + // @readonly + ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) + + // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key + // can't be found in the process's environment. + // + // @readonly + ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) +) + +// A EnvProvider retrieves credentials from the environment variables of the +// running process. Environment credentials never expire. +// +// Environment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY +type EnvProvider struct { + retrieved bool +} + +// NewEnvCredentials returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvCredentials() *Credentials { + return NewCredentials(&EnvProvider{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvProvider) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + if id == "" { + return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound + } + + if secret == "" { + return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + ProviderName: EnvProviderName, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvProvider) IsExpired() bool { + return !e.retrieved +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini new file mode 100644 index 0000000..7fc91d9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini @@ -0,0 +1,12 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret + +[with_colon] +aws_access_key_id: accessKey +aws_secret_access_key: secret diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go new file mode 100644 index 0000000..7fb7cbf --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -0,0 +1,151 @@ +package credentials + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/go-ini/ini" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// SharedCredsProviderName provides a name of SharedCreds provider +const SharedCredsProviderName = "SharedCredentialsProvider" + +var ( + // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. + // + // @readonly + ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) +) + +// A SharedCredentialsProvider retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type SharedCredentialsProvider struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + Filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewSharedCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewSharedCredentials(filename, profile string) *Credentials { + return NewCredentials(&SharedCredentialsProvider{ + Filename: filename, + Profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *SharedCredentialsProvider) Retrieve() (Value, error) { + p.retrieved = false + + filename, err := p.filename() + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + creds, err := loadProfile(filename, p.profile()) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + p.retrieved = true + return creds, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *SharedCredentialsProvider) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (Value, error) { + config, err := ini.Load(filename) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) + } + iniProfile, err := config.GetSection(profile) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err) + } + + id, err := iniProfile.GetKey("aws_access_key_id") + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", + fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), + err) + } + + secret, err := iniProfile.GetKey("aws_secret_access_key") + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", + fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), + nil) + } + + // Default to empty string if not found + token := iniProfile.Key("aws_session_token") + + return Value{ + AccessKeyID: id.String(), + SecretAccessKey: secret.String(), + SessionToken: token.String(), + ProviderName: SharedCredsProviderName, + }, nil +} + +// filename returns the filename to use to read AWS shared credentials. +// +// Will return an error if the user's home directory path cannot be found. +func (p *SharedCredentialsProvider) filename() (string, error) { + if p.Filename == "" { + if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" { + return p.Filename, nil + } + + homeDir := os.Getenv("HOME") // *nix + if homeDir == "" { // Windows + homeDir = os.Getenv("USERPROFILE") + } + if homeDir == "" { + return "", ErrSharedCredentialsHomeNotFound + } + + p.Filename = filepath.Join(homeDir, ".aws", "credentials") + } + + return p.Filename, nil +} + +// profile returns the AWS shared credentials profile. If empty will read +// environment variable "AWS_PROFILE". If that is not set profile will +// return "default". +func (p *SharedCredentialsProvider) profile() string { + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + } + if p.Profile == "" { + p.Profile = "default" + } + + return p.Profile +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go new file mode 100644 index 0000000..71189e7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -0,0 +1,48 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// StaticProviderName provides a name of Static provider +const StaticProviderName = "StaticProvider" + +var ( + // ErrStaticCredentialsEmpty is emitted when static credentials are empty. + // + // @readonly + ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) +) + +// A StaticProvider is a set of credentials which are set pragmatically, +// and will never expire. +type StaticProvider struct { + Value +} + +// NewStaticCredentials returns a pointer to a new Credentials object +// wrapping a static credentials value provider. +func NewStaticCredentials(id, secret, token string) *Credentials { + return NewCredentials(&StaticProvider{Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + }}) +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s *StaticProvider) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty + } + + s.Value.ProviderName = StaticProviderName + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For StaticProvider, the credentials never expired. +func (s *StaticProvider) IsExpired() bool { + return false +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go new file mode 100644 index 0000000..043960d --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -0,0 +1,97 @@ +// Package defaults is a collection of helpers to retrieve the SDK's default +// configuration and handlers. +// +// Generally this package shouldn't be used directly, but session.Session +// instead. This package is useful when you need to reset the defaults +// of a session or service client to the SDK defaults before setting +// additional parameters. +package defaults + +import ( + "net/http" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/endpoints" +) + +// A Defaults provides a collection of default values for SDK clients. +type Defaults struct { + Config *aws.Config + Handlers request.Handlers +} + +// Get returns the SDK's default values with Config and handlers pre-configured. +func Get() Defaults { + cfg := Config() + handlers := Handlers() + cfg.Credentials = CredChain(cfg, handlers) + + return Defaults{ + Config: cfg, + Handlers: handlers, + } +} + +// Config returns the default configuration without credentials. +// To retrieve a config with credentials also included use +// `defaults.Get().Config` instead. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the configuration of an +// existing service client or session. +func Config() *aws.Config { + return aws.NewConfig(). + WithCredentials(credentials.AnonymousCredentials). + WithRegion(os.Getenv("AWS_REGION")). + WithHTTPClient(http.DefaultClient). + WithMaxRetries(aws.UseServiceDefaultRetries). + WithLogger(aws.NewDefaultLogger()). + WithLogLevel(aws.LogOff). + WithSleepDelay(time.Sleep) +} + +// Handlers returns the default request handlers. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the request handlers of an +// existing service client or session. +func Handlers() request.Handlers { + var handlers request.Handlers + + handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.AfterEachFn = request.HandlerListStopOnError + handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + handlers.Send.PushBackNamed(corehandlers.SendHandler) + handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) + + return handlers +} + +// CredChain returns the default credential chain. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the credentials of an +// existing service client or session's Config. +func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { + endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName, *cfg.Region, true) + + return credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.NewClient(*cfg, handlers, endpoint, signingRegion), + ExpiryWindow: 5 * time.Minute, + }, + }}) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go new file mode 100644 index 0000000..669c813 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -0,0 +1,140 @@ +package ec2metadata + +import ( + "encoding/json" + "fmt" + "path" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// GetMetadata uses the path provided to request information from the EC2 +// instance metdata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadata(p string) (string, error) { + op := &request.Operation{ + Name: "GetMetadata", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "meta-data", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + + return output.Content, req.Send() +} + +// GetDynamicData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicData(p string) (string, error) { + op := &request.Operation{ + Name: "GetDynamicData", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "dynamic", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + + return output.Content, req.Send() +} + +// GetInstanceIdentityDocument retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { + resp, err := c.GetDynamicData("instance-identity/document") + if err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 instance identity document", err) + } + + doc := EC2InstanceIdentityDocument{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("SerializationError", + "failed to decode EC2 instance identity document", err) + } + + return doc, nil +} + +// IAMInfo retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { + resp, err := c.GetMetadata("iam/info") + if err != nil { + return EC2IAMInfo{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 IAM info", err) + } + + info := EC2IAMInfo{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { + return EC2IAMInfo{}, + awserr.New("SerializationError", + "failed to decode EC2 IAM info", err) + } + + if info.Code != "Success" { + errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) + return EC2IAMInfo{}, + awserr.New("EC2MetadataError", errMsg, nil) + } + + return info, nil +} + +// Region returns the region the instance is running in. +func (c *EC2Metadata) Region() (string, error) { + resp, err := c.GetMetadata("placement/availability-zone") + if err != nil { + return "", err + } + + // returns region without the suffix. Eg: us-west-2a becomes us-west-2 + return resp[:len(resp)-1], nil +} + +// Available returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) Available() bool { + if _, err := c.GetMetadata("instance-id"); err != nil { + return false + } + + return true +} + +// An EC2IAMInfo provides the shape for unmarshalling +// an IAM info from the metadata API +type EC2IAMInfo struct { + Code string + LastUpdated time.Time + InstanceProfileArn string + InstanceProfileID string +} + +// An EC2InstanceIdentityDocument provides the shape for unmarshalling +// an instance identity document +type EC2InstanceIdentityDocument struct { + DevpayProductCodes []string `json:"devpayProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go new file mode 100644 index 0000000..5b4379d --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -0,0 +1,124 @@ +// Package ec2metadata provides the client for making API calls to the +// EC2 Metadata service. +package ec2metadata + +import ( + "bytes" + "errors" + "io" + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ServiceName is the name of the service. +const ServiceName = "ec2metadata" + +// A EC2Metadata is an EC2 Metadata service Client. +type EC2Metadata struct { + *client.Client +} + +// New creates a new instance of the EC2Metadata client with a session. +// This client is safe to use across multiple goroutines. +// +// +// Example: +// // Create a EC2Metadata client from just a session. +// svc := ec2metadata.New(mySession) +// +// // Create a EC2Metadata client with additional configuration +// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { + c := p.ClientConfig(ServiceName, cfgs...) + return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// NewClient returns a new EC2Metadata client. Should be used to create +// a client when not using a session. Generally using just New with a session +// is preferred. +// +// If an unmodified HTTP client is provided from the stdlib default, or no client +// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. +// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. +func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { + if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { + // If the http client is unmodified and this feature is not disabled + // set custom timeouts for EC2Metadata requests. + cfg.HTTPClient = &http.Client{ + // use a shorter timeout than default because the metadata + // service is local if it is running, and to fail faster + // if not running on an ec2 instance. + Timeout: 5 * time.Second, + } + } + + svc := &EC2Metadata{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: endpoint, + APIVersion: "latest", + }, + handlers, + ), + } + + svc.Handlers.Unmarshal.PushBack(unmarshalHandler) + svc.Handlers.UnmarshalError.PushBack(unmarshalError) + svc.Handlers.Validate.Clear() + svc.Handlers.Validate.PushBack(validateEndpointHandler) + + // Add additional options to the service config + for _, option := range opts { + option(svc.Client) + } + + return svc +} + +func httpClientZero(c *http.Client) bool { + return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) +} + +type metadataOutput struct { + Content string +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + b := &bytes.Buffer{} + if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) + return + } + + if data, ok := r.Data.(*metadataOutput); ok { + data.Content = b.String() + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + b := &bytes.Buffer{} + if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())) +} + +func validateEndpointHandler(r *request.Request) { + if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/errors.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/errors.go new file mode 100644 index 0000000..5766361 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/errors.go @@ -0,0 +1,17 @@ +package aws + +import "github.com/aws/aws-sdk-go/aws/awserr" + +var ( + // ErrMissingRegion is an error that is returned if region configuration is + // not found. + // + // @readonly + ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) + + // ErrMissingEndpoint is an error that is returned if an endpoint cannot be + // resolved for a service. + // + // @readonly + ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) +) diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/logger.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/logger.go new file mode 100644 index 0000000..db87188 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/logger.go @@ -0,0 +1,112 @@ +package aws + +import ( + "log" + "os" +) + +// A LogLevelType defines the level logging should be performed at. Used to instruct +// the SDK which statements should be logged. +type LogLevelType uint + +// LogLevel returns the pointer to a LogLevel. Should be used to workaround +// not being able to take the address of a non-composite literal. +func LogLevel(l LogLevelType) *LogLevelType { + return &l +} + +// Value returns the LogLevel value or the default value LogOff if the LogLevel +// is nil. Safe to use on nil value LogLevelTypes. +func (l *LogLevelType) Value() LogLevelType { + if l != nil { + return *l + } + return LogOff +} + +// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be +// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If +// LogLevel is nill, will default to LogOff comparison. +func (l *LogLevelType) Matches(v LogLevelType) bool { + c := l.Value() + return c&v == v +} + +// AtLeast returns true if this LogLevel is at least high enough to satisfies v. +// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default +// to LogOff comparison. +func (l *LogLevelType) AtLeast(v LogLevelType) bool { + c := l.Value() + return c >= v +} + +const ( + // LogOff states that no logging should be performed by the SDK. This is the + // default state of the SDK, and should be use to disable all logging. + LogOff LogLevelType = iota * 0x1000 + + // LogDebug state that debug output should be logged by the SDK. This should + // be used to inspect request made and responses received. + LogDebug +) + +// Debug Logging Sub Levels +const ( + // LogDebugWithSigning states that the SDK should log request signing and + // presigning events. This should be used to log the signing details of + // requests for debugging. Will also enable LogDebug. + LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) + + // LogDebugWithHTTPBody states the SDK should log HTTP request and response + // HTTP bodys in addition to the headers and path. This should be used to + // see the body content of requests and responses made while using the SDK + // Will also enable LogDebug. + LogDebugWithHTTPBody + + // LogDebugWithRequestRetries states the SDK should log when service requests will + // be retried. This should be used to log when you want to log when service + // requests are being retried. Will also enable LogDebug. + LogDebugWithRequestRetries + + // LogDebugWithRequestErrors states the SDK should log when service requests fail + // to build, send, validate, or unmarshal. + LogDebugWithRequestErrors +) + +// A Logger is a minimalistic interface for the SDK to log messages to. Should +// be used to provide custom logging writers for the SDK to use. +type Logger interface { + Log(...interface{}) +} + +// A LoggerFunc is a convenience type to convert a function taking a variadic +// list of arguments and wrap it so the Logger interface can be used. +// +// Example: +// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { +// fmt.Fprintln(os.Stdout, args...) +// })}) +type LoggerFunc func(...interface{}) + +// Log calls the wrapped function with the arguments provided +func (f LoggerFunc) Log(args ...interface{}) { + f(args...) +} + +// NewDefaultLogger returns a Logger which will write log messages to stdout, and +// use same formatting runes as the stdlib log.Logger +func NewDefaultLogger() Logger { + return &defaultLogger{ + logger: log.New(os.Stdout, "", log.LstdFlags), + } +} + +// A defaultLogger provides a minimalistic logger satisfying the Logger interface. +type defaultLogger struct { + logger *log.Logger +} + +// Log logs the parameters to the stdlib logger. See log.Println. +func (l defaultLogger) Log(args ...interface{}) { + l.logger.Println(args...) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/handlers.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/handlers.go new file mode 100644 index 0000000..5279c19 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -0,0 +1,187 @@ +package request + +import ( + "fmt" + "strings" +) + +// A Handlers provides a collection of request handlers for various +// stages of handling requests. +type Handlers struct { + Validate HandlerList + Build HandlerList + Sign HandlerList + Send HandlerList + ValidateResponse HandlerList + Unmarshal HandlerList + UnmarshalMeta HandlerList + UnmarshalError HandlerList + Retry HandlerList + AfterRetry HandlerList +} + +// Copy returns of this handler's lists. +func (h *Handlers) Copy() Handlers { + return Handlers{ + Validate: h.Validate.copy(), + Build: h.Build.copy(), + Sign: h.Sign.copy(), + Send: h.Send.copy(), + ValidateResponse: h.ValidateResponse.copy(), + Unmarshal: h.Unmarshal.copy(), + UnmarshalError: h.UnmarshalError.copy(), + UnmarshalMeta: h.UnmarshalMeta.copy(), + Retry: h.Retry.copy(), + AfterRetry: h.AfterRetry.copy(), + } +} + +// Clear removes callback functions for all handlers +func (h *Handlers) Clear() { + h.Validate.Clear() + h.Build.Clear() + h.Send.Clear() + h.Sign.Clear() + h.Unmarshal.Clear() + h.UnmarshalMeta.Clear() + h.UnmarshalError.Clear() + h.ValidateResponse.Clear() + h.Retry.Clear() + h.AfterRetry.Clear() +} + +// A HandlerListRunItem represents an entry in the HandlerList which +// is being run. +type HandlerListRunItem struct { + Index int + Handler NamedHandler + Request *Request +} + +// A HandlerList manages zero or more handlers in a list. +type HandlerList struct { + list []NamedHandler + + // Called after each request handler in the list is called. If set + // and the func returns true the HandlerList will continue to iterate + // over the request handlers. If false is returned the HandlerList + // will stop iterating. + // + // Should be used if extra logic to be performed between each handler + // in the list. This can be used to terminate a list's iteration + // based on a condition such as error like, HandlerListStopOnError. + // Or for logging like HandlerListLogItem. + AfterEachFn func(item HandlerListRunItem) bool +} + +// A NamedHandler is a struct that contains a name and function callback. +type NamedHandler struct { + Name string + Fn func(*Request) +} + +// copy creates a copy of the handler list. +func (l *HandlerList) copy() HandlerList { + n := HandlerList{ + AfterEachFn: l.AfterEachFn, + } + n.list = append([]NamedHandler{}, l.list...) + return n +} + +// Clear clears the handler list. +func (l *HandlerList) Clear() { + l.list = []NamedHandler{} +} + +// Len returns the number of handlers in the list. +func (l *HandlerList) Len() int { + return len(l.list) +} + +// PushBack pushes handler f to the back of the handler list. +func (l *HandlerList) PushBack(f func(*Request)) { + l.list = append(l.list, NamedHandler{"__anonymous", f}) +} + +// PushFront pushes handler f to the front of the handler list. +func (l *HandlerList) PushFront(f func(*Request)) { + l.list = append([]NamedHandler{{"__anonymous", f}}, l.list...) +} + +// PushBackNamed pushes named handler f to the back of the handler list. +func (l *HandlerList) PushBackNamed(n NamedHandler) { + l.list = append(l.list, n) +} + +// PushFrontNamed pushes named handler f to the front of the handler list. +func (l *HandlerList) PushFrontNamed(n NamedHandler) { + l.list = append([]NamedHandler{n}, l.list...) +} + +// Remove removes a NamedHandler n +func (l *HandlerList) Remove(n NamedHandler) { + newlist := []NamedHandler{} + for _, m := range l.list { + if m.Name != n.Name { + newlist = append(newlist, m) + } + } + l.list = newlist +} + +// Run executes all handlers in the list with a given request object. +func (l *HandlerList) Run(r *Request) { + for i, h := range l.list { + h.Fn(r) + item := HandlerListRunItem{ + Index: i, Handler: h, Request: r, + } + if l.AfterEachFn != nil && !l.AfterEachFn(item) { + return + } + } +} + +// HandlerListLogItem logs the request handler and the state of the +// request's Error value. Always returns true to continue iterating +// request handlers in a HandlerList. +func HandlerListLogItem(item HandlerListRunItem) bool { + if item.Request.Config.Logger == nil { + return true + } + item.Request.Config.Logger.Log("DEBUG: RequestHandler", + item.Index, item.Handler.Name, item.Request.Error) + + return true +} + +// HandlerListStopOnError returns false to stop the HandlerList iterating +// over request handlers if Request.Error is not nil. True otherwise +// to continue iterating. +func HandlerListStopOnError(item HandlerListRunItem) bool { + return item.Request.Error == nil +} + +// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request +// header. If the extra parameters are provided they will be added as metadata to the +// name/version pair resulting in the following format. +// "name/version (extra0; extra1; ...)" +// The user agent part will be concatenated with this current request's user agent string. +func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { + ua := fmt.Sprintf("%s/%s", name, version) + if len(extra) > 0 { + ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) + } + return func(r *Request) { + AddToUserAgent(r, ua) + } +} + +// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. +// The input string will be concatenated with the current request's user agent string. +func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { + return func(r *Request) { + AddToUserAgent(r, s) + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/offset_reader.go new file mode 100644 index 0000000..da6396d --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -0,0 +1,49 @@ +package request + +import ( + "io" + "sync" +) + +// offsetReader is a thread-safe io.ReadCloser to prevent racing +// with retrying requests +type offsetReader struct { + buf io.ReadSeeker + lock sync.RWMutex + closed bool +} + +func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { + reader := &offsetReader{} + buf.Seek(offset, 0) + + reader.buf = buf + return reader +} + +// Close is a thread-safe close. Uses the write lock. +func (o *offsetReader) Close() error { + o.lock.Lock() + defer o.lock.Unlock() + o.closed = true + return nil +} + +// Read is a thread-safe read using a read lock. +func (o *offsetReader) Read(p []byte) (int, error) { + o.lock.RLock() + defer o.lock.RUnlock() + + if o.closed { + return 0, io.EOF + } + + return o.buf.Read(p) +} + +// CloseAndCopy will return a new offsetReader with a copy of the old buffer +// and close the old buffer. +func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader { + o.Close() + return newOffsetReader(o.buf, offset) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request.go new file mode 100644 index 0000000..a4b7054 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request.go @@ -0,0 +1,322 @@ +package request + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" +) + +// A Request is the service request to be made. +type Request struct { + Config aws.Config + ClientInfo metadata.ClientInfo + Handlers Handlers + + Retryer + Time time.Time + ExpireTime time.Duration + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + NotHoist bool + SignedHeaderVals http.Header + + built bool +} + +// An Operation is the service API operation to be made. +type Operation struct { + Name string + HTTPMethod string + HTTPPath string + *Paginator +} + +// Paginator keeps track of pagination configuration for an API operation. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// New returns a new Request pointer for the service API +// operation and parameters. +// +// Params is any value of input parameters to be the request payload. +// Data is pointer value to an object which the request's response +// payload will be deserialized to. +func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, + retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + + method := operation.HTTPMethod + if method == "" { + method = "POST" + } + p := operation.HTTPPath + if p == "" { + p = "/" + } + + httpReq, _ := http.NewRequest(method, "", nil) + httpReq.URL, _ = url.Parse(clientInfo.Endpoint + p) + + r := &Request{ + Config: cfg, + ClientInfo: clientInfo, + Handlers: handlers.Copy(), + + Retryer: retryer, + Time: time.Now(), + ExpireTime: 0, + Operation: operation, + HTTPRequest: httpReq, + Body: nil, + Params: params, + Error: nil, + Data: data, + } + r.SetBufferBody([]byte{}) + + return r +} + +// WillRetry returns if the request's can be retried. +func (r *Request) WillRetry() bool { + return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() +} + +// ParamsFilled returns if the request's parameters have been populated +// and the parameters are valid. False is returned if no parameters are +// provided or invalid. +func (r *Request) ParamsFilled() bool { + return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() +} + +// DataFilled returns true if the request's data for response deserialization +// target has been set and is a valid. False is returned if data is not +// set, or is invalid. +func (r *Request) DataFilled() bool { + return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() +} + +// SetBufferBody will set the request's body bytes that will be sent to +// the service API. +func (r *Request) SetBufferBody(buf []byte) { + r.SetReaderBody(bytes.NewReader(buf)) +} + +// SetStringBody sets the body of the request to be backed by a string. +func (r *Request) SetStringBody(s string) { + r.SetReaderBody(strings.NewReader(s)) +} + +// SetReaderBody will set the request's body reader. +func (r *Request) SetReaderBody(reader io.ReadSeeker) { + r.HTTPRequest.Body = newOffsetReader(reader, 0) + r.Body = reader +} + +// Presign returns the request's signed URL. Error will be returned +// if the signing fails. +func (r *Request) Presign(expireTime time.Duration) (string, error) { + r.ExpireTime = expireTime + r.NotHoist = false + r.Sign() + if r.Error != nil { + return "", r.Error + } + return r.HTTPRequest.URL.String(), nil +} + +// PresignRequest behaves just like presign, but hoists all headers and signs them. +// Also returns the signed hash back to the user +func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) { + r.ExpireTime = expireTime + r.NotHoist = true + r.Sign() + if r.Error != nil { + return "", nil, r.Error + } + return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil +} + +func debugLogReqError(r *Request, stage string, retrying bool, err error) { + if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { + return + } + + retryStr := "not retrying" + if retrying { + retryStr = "will retry" + } + + r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", + stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) +} + +// Build will build the request's object so it can be signed and sent +// to the service. Build will also validate all the request's parameters. +// Anny additional build Handlers set on this request will be run +// in the order they were set. +// +// The request will only be built once. Multiple calls to build will have +// no effect. +// +// If any Validate or Build errors occur the build will stop and the error +// which occurred will be returned. +func (r *Request) Build() error { + if !r.built { + r.Error = nil + r.Handlers.Validate.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Request", false, r.Error) + return r.Error + } + r.Handlers.Build.Run(r) + if r.Error != nil { + debugLogReqError(r, "Build Request", false, r.Error) + return r.Error + } + r.built = true + } + + return r.Error +} + +// Sign will sign the request retuning error if errors are encountered. +// +// Send will build the request prior to signing. All Sign Handlers will +// be executed in the order they were set. +func (r *Request) Sign() error { + r.Build() + if r.Error != nil { + debugLogReqError(r, "Build Request", false, r.Error) + return r.Error + } + + r.Handlers.Sign.Run(r) + return r.Error +} + +// Send will send the request returning error if errors are encountered. +// +// Send will sign the request prior to sending. All Send Handlers will +// be executed in the order they were set. +func (r *Request) Send() error { + for { + if aws.BoolValue(r.Retryable) { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + var body io.ReadCloser + if reader, ok := r.HTTPRequest.Body.(*offsetReader); ok { + body = reader.CloseAndCopy(r.BodyStart) + } else { + if r.Config.Logger != nil { + r.Config.Logger.Log("Request body type has been overwritten. May cause race conditions") + } + r.Body.Seek(r.BodyStart, 0) + body = ioutil.NopCloser(r.Body) + } + + r.HTTPRequest = &http.Request{ + URL: r.HTTPRequest.URL, + Header: r.HTTPRequest.Header, + Close: r.HTTPRequest.Close, + Form: r.HTTPRequest.Form, + PostForm: r.HTTPRequest.PostForm, + Body: body, + MultipartForm: r.HTTPRequest.MultipartForm, + Host: r.HTTPRequest.Host, + Method: r.HTTPRequest.Method, + Proto: r.HTTPRequest.Proto, + ContentLength: r.HTTPRequest.ContentLength, + } + // Closing response body. Since we are setting a new request to send off, this + // response will get squashed and leaked. + r.HTTPResponse.Body.Close() + } + + r.Sign() + if r.Error != nil { + return r.Error + } + + r.Retryable = nil + + r.Handlers.Send.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", false, r.Error) + return r.Error + } + debugLogReqError(r, "Send Request", true, err) + continue + } + + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.UnmarshalError.Run(r) + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Response", false, r.Error) + return r.Error + } + debugLogReqError(r, "Validate Response", true, err) + continue + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", false, r.Error) + return r.Error + } + debugLogReqError(r, "Unmarshal Response", true, err) + continue + } + + break + } + + return nil +} + +// AddToUserAgent adds the string to the end of the request's current user agent. +func AddToUserAgent(r *Request, s string) { + curUA := r.HTTPRequest.Header.Get("User-Agent") + if len(curUA) > 0 { + s = curUA + " " + s + } + r.HTTPRequest.Header.Set("User-Agent", s) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go new file mode 100644 index 0000000..2939ec4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -0,0 +1,104 @@ +package request + +import ( + "reflect" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +//type Paginater interface { +// HasNextPage() bool +// NextPage() *Request +// EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error +//} + +// HasNextPage returns true if this request has more pages of data available. +func (r *Request) HasNextPage() bool { + return len(r.nextPageTokens()) > 0 +} + +// nextPageTokens returns the tokens to use when asking for the next page of +// data. +func (r *Request) nextPageTokens() []interface{} { + if r.Operation.Paginator == nil { + return nil + } + + if r.Operation.TruncationToken != "" { + tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) + if len(tr) == 0 { + return nil + } + + switch v := tr[0].(type) { + case *bool: + if !aws.BoolValue(v) { + return nil + } + case bool: + if v == false { + return nil + } + } + } + + tokens := []interface{}{} + tokenAdded := false + for _, outToken := range r.Operation.OutputTokens { + v, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(v) > 0 { + tokens = append(tokens, v[0]) + tokenAdded = true + } else { + tokens = append(tokens, nil) + } + } + if !tokenAdded { + return nil + } + + return tokens +} + +// NextPage returns a new Request that can be executed to return the next +// page of result data. Call .Send() on this request to execute it. +func (r *Request) NextPage() *Request { + tokens := r.nextPageTokens() + if len(tokens) == 0 { + return nil + } + + data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() + nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) + for i, intok := range nr.Operation.InputTokens { + awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) + } + return nr +} + +// EachPage iterates over each page of a paginated request object. The fn +// parameter should be a function with the following sample signature: +// +// func(page *T, lastPage bool) bool { +// return true // return false to stop iterating +// } +// +// Where "T" is the structure type matching the output structure of the given +// operation. For example, a request object generated by +// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput +// as the structure "T". The lastPage value represents whether the page is +// the last page of data or not. The return value of this function should +// return true to keep iterating or false to stop. +func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + for page := r; page != nil; page = page.NextPage() { + if err := page.Send(); err != nil { + return err + } + if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { + return page.Error + } + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/retryer.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/retryer.go new file mode 100644 index 0000000..8cc8b01 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -0,0 +1,101 @@ +package request + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Retryer is an interface to control retry logic for a given service. +// The default implementation used by most services is the service.DefaultRetryer +// structure, which contains basic retry logic using exponential backoff. +type Retryer interface { + RetryRules(*Request) time.Duration + ShouldRetry(*Request) bool + MaxRetries() int +} + +// WithRetryer sets a config Retryer value to the given Config returning it +// for chaining. +func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { + cfg.Retryer = retryer + return cfg +} + +// retryableCodes is a collection of service response codes which are retry-able +// without any further action. +var retryableCodes = map[string]struct{}{ + "RequestError": {}, + "RequestTimeout": {}, +} + +var throttleCodes = map[string]struct{}{ + "ProvisionedThroughputExceededException": {}, + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once + "TooManyRequestsException": {}, // Lambda functions +} + +// credsExpiredCodes is a collection of error codes which signify the credentials +// need to be refreshed. Expired tokens require refreshing of credentials, and +// resigning before the request can be retried. +var credsExpiredCodes = map[string]struct{}{ + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "RequestExpired": {}, // EC2 Only +} + +func isCodeThrottle(code string) bool { + _, ok := throttleCodes[code] + return ok +} + +func isCodeRetryable(code string) bool { + if _, ok := retryableCodes[code]; ok { + return true + } + + return isCodeExpiredCreds(code) +} + +func isCodeExpiredCreds(code string) bool { + _, ok := credsExpiredCodes[code] + return ok +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +func (r *Request) IsErrorRetryable() bool { + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeRetryable(err.Code()) + } + } + return false +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if the request has no Error set +func (r *Request) IsErrorThrottle() bool { + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeThrottle(err.Code()) + } + } + return false +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +func (r *Request) IsErrorExpired() bool { + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeExpiredCreds(err.Code()) + } + } + return false +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/session/session.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/session/session.go new file mode 100644 index 0000000..47e4536 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/session/session.go @@ -0,0 +1,120 @@ +// Package session provides a way to create service clients with shared configuration +// and handlers. +// +// Generally this package should be used instead of the `defaults` package. +// +// A session should be used to share configurations and request handlers between multiple +// service clients. When service clients need specific configuration aws.Config can be +// used to provide additional configuration directly to the service client. +package session + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/endpoints" +) + +// A Session provides a central location to create service clients from and +// store configurations and request handlers for those services. +// +// Sessions are safe to create service clients concurrently, but it is not safe +// to mutate the session concurrently. +type Session struct { + Config *aws.Config + Handlers request.Handlers +} + +// New creates a new instance of the handlers merging in the provided Configs +// on top of the SDK's default configurations. Once the session is created it +// can be mutated to modify Configs or Handlers. The session is safe to be read +// concurrently, but it should not be written to concurrently. +// +// Example: +// // Create a session with the default config and request handlers. +// sess := session.New() +// +// // Create a session with a custom region +// sess := session.New(&aws.Config{Region: aws.String("us-east-1")}) +// +// // Create a session, and add additional handlers for all service +// // clients created with the session to inherit. Adds logging handler. +// sess := session.New() +// sess.Handlers.Send.PushFront(func(r *request.Request) { +// // Log every request made and its payload +// logger.Println("Request: %s/%s, Payload: %s", r.ClientInfo.ServiceName, r.Operation, r.Params) +// }) +// +// // Create a S3 client instance from a session +// sess := session.New() +// svc := s3.New(sess) +func New(cfgs ...*aws.Config) *Session { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Apply the passed in configs so the configuration can be applied to the + // default credential chain + cfg.MergeIn(cfgs...) + cfg.Credentials = defaults.CredChain(cfg, handlers) + + // Reapply any passed in configs to override credentials if set + cfg.MergeIn(cfgs...) + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + + return s +} + +func initHandlers(s *Session) { + // Add the Validate parameter handler if it is not disabled. + s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) + if !aws.BoolValue(s.Config.DisableParamValidation) { + s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) + } +} + +// Copy creates and returns a copy of the current session, coping the config +// and handlers. If any additional configs are provided they will be merged +// on top of the session's copied config. +// +// Example: +// // Create a copy of the current session, configured for the us-west-2 region. +// sess.Copy(&aws.Config{Region: aws.String("us-west-2"}) +func (s *Session) Copy(cfgs ...*aws.Config) *Session { + newSession := &Session{ + Config: s.Config.Copy(cfgs...), + Handlers: s.Handlers.Copy(), + } + + initHandlers(newSession) + + return newSession +} + +// ClientConfig satisfies the client.ConfigProvider interface and is used to +// configure the service client instances. Passing the Session to the service +// client's constructor (New) will use this method to configure the client. +// +// Example: +// sess := session.New() +// s3.New(sess) +func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + endpoint, signingRegion := endpoints.NormalizeEndpoint( + aws.StringValue(s.Config.Endpoint), serviceName, + aws.StringValue(s.Config.Region), aws.BoolValue(s.Config.DisableSSL)) + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: endpoint, + SigningRegion: signingRegion, + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types.go new file mode 100644 index 0000000..fa014b4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types.go @@ -0,0 +1,106 @@ +package aws + +import ( + "io" + "sync" +) + +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { + return ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if they are available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// Read reads from the reader up to size of p. The number of bytes read, and +// error if it occurred will be returned. +// +// If the reader is not an io.Reader zero bytes read, and nil error will be returned. +// +// Performs the same functionality as io.Reader Read +func (r ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read to offset, interpreted according to +// whence: 0 means relative to the origin of the file, 1 means relative to the +// current offset, and 2 means relative to the end. Seek returns the new offset +// and an error, if any. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// Close closes the ReaderSeekerCloser. +// +// If the ReaderSeekerCloser is not an io.Closer nothing will be done. +func (r ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} + +// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface +// Can be used with the s3manager.Downloader to download content to a buffer +// in memory. Safe to use concurrently. +type WriteAtBuffer struct { + buf []byte + m sync.Mutex + + // GrowthCoeff defines the growth rate of the internal buffer. By + // default, the growth rate is 1, where expanding the internal + // buffer will allocate only enough capacity to fit the new expected + // length. + GrowthCoeff float64 +} + +// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer +// provided by buf. +func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { + return &WriteAtBuffer{buf: buf} +} + +// WriteAt writes a slice of bytes to a buffer starting at the position provided +// The number of bytes written will be returned, or error. Can overwrite previous +// written slices if the write ats overlap. +func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { + pLen := len(p) + expLen := pos + int64(pLen) + b.m.Lock() + defer b.m.Unlock() + if int64(len(b.buf)) < expLen { + if int64(cap(b.buf)) < expLen { + if b.GrowthCoeff < 1 { + b.GrowthCoeff = 1 + } + newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) + copy(newBuf, b.buf) + b.buf = newBuf + } + b.buf = b.buf[:expLen] + } + copy(b.buf[pos:], p) + return pLen, nil +} + +// Bytes returns a slice of bytes written to the buffer. +func (b *WriteAtBuffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.buf[:len(b.buf):len(b.buf)] +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go new file mode 100644 index 0000000..805dc71 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go @@ -0,0 +1,8 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +// SDKName is the name of this AWS SDK +const SDKName = "aws-sdk-go" + +// SDKVersion is the version of this SDK +const SDKVersion = "1.1.15" diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go new file mode 100644 index 0000000..2b279e6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go @@ -0,0 +1,65 @@ +// Package endpoints validates regional endpoints for services. +package endpoints + +//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go +//go:generate gofmt -s -w endpoints_map.go + +import ( + "fmt" + "regexp" + "strings" +) + +// NormalizeEndpoint takes and endpoint and service API information to return a +// normalized endpoint and signing region. If the endpoint is not an empty string +// the service name and region will be used to look up the service's API endpoint. +// If the endpoint is provided the scheme will be added if it is not present. +func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL bool) (normEndpoint, signingRegion string) { + if endpoint == "" { + return EndpointForRegion(serviceName, region, disableSSL) + } + + return AddScheme(endpoint, disableSSL), "" +} + +// EndpointForRegion returns an endpoint and its signing region for a service and region. +// if the service and region pair are not found endpoint and signingRegion will be empty. +func EndpointForRegion(svcName, region string, disableSSL bool) (endpoint, signingRegion string) { + derivedKeys := []string{ + region + "/" + svcName, + region + "/*", + "*/" + svcName, + "*/*", + } + + for _, key := range derivedKeys { + if val, ok := endpointsMap.Endpoints[key]; ok { + ep := val.Endpoint + ep = strings.Replace(ep, "{region}", region, -1) + ep = strings.Replace(ep, "{service}", svcName, -1) + + endpoint = ep + signingRegion = val.SigningRegion + break + } + } + + return AddScheme(endpoint, disableSSL), signingRegion +} + +// Regular expression to determine if the endpoint string is prefixed with a scheme. +var schemeRE = regexp.MustCompile("^([^:]+)://") + +// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no +// scheme. If disableSSL is true HTTP will be added instead of the default HTTPS. +func AddScheme(endpoint string, disableSSL bool) string { + if endpoint != "" && !schemeRE.MatchString(endpoint) { + scheme := "https" + if disableSSL { + scheme = "http" + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + + return endpoint +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json new file mode 100644 index 0000000..5f4991c --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json @@ -0,0 +1,75 @@ +{ + "version": 2, + "endpoints": { + "*/*": { + "endpoint": "{service}.{region}.amazonaws.com" + }, + "cn-north-1/*": { + "endpoint": "{service}.{region}.amazonaws.com.cn", + "signatureVersion": "v4" + }, + "cn-north-1/ec2metadata": { + "endpoint": "http://169.254.169.254/latest" + }, + "us-gov-west-1/iam": { + "endpoint": "iam.us-gov.amazonaws.com" + }, + "us-gov-west-1/sts": { + "endpoint": "sts.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "us-gov-west-1/ec2metadata": { + "endpoint": "http://169.254.169.254/latest" + }, + "*/cloudfront": { + "endpoint": "cloudfront.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/cloudsearchdomain": { + "endpoint": "", + "signingRegion": "us-east-1" + }, + "*/data.iot": { + "endpoint": "", + "signingRegion": "us-east-1" + }, + "*/ec2metadata": { + "endpoint": "http://169.254.169.254/latest" + }, + "*/iam": { + "endpoint": "iam.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/importexport": { + "endpoint": "importexport.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/route53": { + "endpoint": "route53.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/sts": { + "endpoint": "sts.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/waf": { + "endpoint": "waf.amazonaws.com", + "signingRegion": "us-east-1" + }, + "us-east-1/sdb": { + "endpoint": "sdb.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "us-east-1/s3": { + "endpoint": "s3.amazonaws.com" + }, + "eu-central-1/s3": { + "endpoint": "{service}.{region}.amazonaws.com" + } + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go new file mode 100644 index 0000000..e995315 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go @@ -0,0 +1,88 @@ +package endpoints + +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +type endpointStruct struct { + Version int + Endpoints map[string]endpointEntry +} + +type endpointEntry struct { + Endpoint string + SigningRegion string +} + +var endpointsMap = endpointStruct{ + Version: 2, + Endpoints: map[string]endpointEntry{ + "*/*": { + Endpoint: "{service}.{region}.amazonaws.com", + }, + "*/cloudfront": { + Endpoint: "cloudfront.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/cloudsearchdomain": { + Endpoint: "", + SigningRegion: "us-east-1", + }, + "*/data.iot": { + Endpoint: "", + SigningRegion: "us-east-1", + }, + "*/ec2metadata": { + Endpoint: "http://169.254.169.254/latest", + }, + "*/iam": { + Endpoint: "iam.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/importexport": { + Endpoint: "importexport.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/route53": { + Endpoint: "route53.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "*/sts": { + Endpoint: "sts.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/waf": { + Endpoint: "waf.amazonaws.com", + SigningRegion: "us-east-1", + }, + "cn-north-1/*": { + Endpoint: "{service}.{region}.amazonaws.com.cn", + }, + "cn-north-1/ec2metadata": { + Endpoint: "http://169.254.169.254/latest", + }, + "eu-central-1/s3": { + Endpoint: "{service}.{region}.amazonaws.com", + }, + "us-east-1/s3": { + Endpoint: "s3.amazonaws.com", + }, + "us-east-1/sdb": { + Endpoint: "sdb.amazonaws.com", + SigningRegion: "us-east-1", + }, + "us-gov-west-1/ec2metadata": { + Endpoint: "http://169.254.169.254/latest", + }, + "us-gov-west-1/iam": { + Endpoint: "iam.us-gov.amazonaws.com", + }, + "us-gov-west-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "us-gov-west-1/sts": { + Endpoint: "sts.us-gov-west-1.amazonaws.com", + }, + }, +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/idempotency.go new file mode 100644 index 0000000..53831df --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/idempotency.go @@ -0,0 +1,75 @@ +package protocol + +import ( + "crypto/rand" + "fmt" + "reflect" +) + +// RandReader is the random reader the protocol package will use to read +// random bytes from. This is exported for testing, and should not be used. +var RandReader = rand.Reader + +const idempotencyTokenFillTag = `idempotencyToken` + +// CanSetIdempotencyToken returns true if the struct field should be +// automatically populated with a Idempotency token. +// +// Only *string and string type fields that are tagged with idempotencyToken +// which are not already set can be auto filled. +func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { + switch u := v.Interface().(type) { + // To auto fill an Idempotency token the field must be a string, + // tagged for auto fill, and have a zero value. + case *string: + return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + case string: + return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + } + + return false +} + +// GetIdempotencyToken returns a randomly generated idempotency token. +func GetIdempotencyToken() string { + b := make([]byte, 16) + RandReader.Read(b) + + return UUIDVersion4(b) +} + +// SetIdempotencyToken will set the value provided with a Idempotency Token. +// Given that the value can be set. Will panic if value is not setable. +func SetIdempotencyToken(v reflect.Value) { + if v.Kind() == reflect.Ptr { + if v.IsNil() && v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = reflect.Indirect(v) + + if !v.CanSet() { + panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) + } + + b := make([]byte, 16) + _, err := rand.Read(b) + if err != nil { + // TODO handle error + return + } + + v.Set(reflect.ValueOf(UUIDVersion4(b))) +} + +// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided +func UUIDVersion4(u []byte) string { + // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 + // 13th character is "4" + u[6] = (u[6] | 0x40) & 0x4F + // 17th character is "8", "9", "a", or "b" + u[8] = (u[8] | 0x80) & 0xBF + + return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/build.go new file mode 100644 index 0000000..56d69db --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -0,0 +1,36 @@ +// Package query provides serialisation of AWS query requests, and responses. +package query + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" +) + +// BuildHandler is a named request handler for building query protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} + +// Build builds a request for an AWS Query service. +func Build(r *request.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.ClientInfo.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, false); err != nil { + r.Error = awserr.New("SerializationError", "failed encoding Query request", err) + return + } + + if r.ExpireTime == 0 { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go new file mode 100644 index 0000000..60ea0bd --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -0,0 +1,230 @@ +package queryutil + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// Parse parses an object i and fills a url.Values object. The isEC2 flag +// indicates if this is the EC2 Query sub-protocol. +func Parse(body url.Values, i interface{}, isEC2 bool) error { + q := queryParser{isEC2: isEC2} + return q.parseValue(body, reflect.ValueOf(i), "", "") +} + +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +type queryParser struct { + isEC2 bool +} + +func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + value = elemOf(value) + + // no need to handle zero values + if !value.IsValid() { + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + return q.parseStruct(v, value, prefix) + case "list": + return q.parseList(v, value, prefix, tag) + case "map": + return q.parseMap(v, value, prefix, tag) + default: + return q.parseScalar(v, value, prefix, tag) + } +} + +func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { + if !value.IsValid() { + return nil + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + elemValue := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + elemValue = reflect.ValueOf(token) + } + + var name string + if q.isEC2 { + name = field.Tag.Get("queryName") + } + if name == "" { + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if name != "" && q.isEC2 { + name = strings.ToUpper(name[0:1]) + name[1:] + } + } + if name == "" { + name = field.Name + } + + if prefix != "" { + name = prefix + "." + name + } + + if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".member" + } + + for i := 0; i < value.Len(); i++ { + slicePrefix := prefix + if slicePrefix == "" { + slicePrefix = strconv.Itoa(i + 1) + } else { + slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) + } + if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".entry" + } + + // sort keys for improved serialization consistency. + // this is not strictly necessary for protocol support. + mapKeyValues := value.MapKeys() + mapKeys := map[string]reflect.Value{} + mapKeyNames := make([]string, len(mapKeyValues)) + for i, mapKey := range mapKeyValues { + name := mapKey.String() + mapKeys[name] = mapKey + mapKeyNames[i] = name + } + sort.Strings(mapKeyNames) + + for i, mapKeyName := range mapKeyNames { + mapKey := mapKeys[mapKeyName] + mapValue := value.MapIndex(mapKey) + + kname := tag.Get("locationNameKey") + if kname == "" { + kname = "key" + } + vname := tag.Get("locationNameValue") + if vname == "" { + vname = "value" + } + + // serialize key + var keyName string + if prefix == "" { + keyName = strconv.Itoa(i+1) + "." + kname + } else { + keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname + } + + if err := q.parseValue(v, mapKey, keyName, ""); err != nil { + return err + } + + // serialize value + var valueName string + if prefix == "" { + valueName = strconv.Itoa(i+1) + "." + vname + } else { + valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname + } + + if err := q.parseValue(v, mapValue, valueName, ""); err != nil { + return err + } + } + + return nil +} + +func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { + switch value := r.Interface().(type) { + case string: + v.Set(name, value) + case []byte: + if !r.IsNil() { + v.Set(name, base64.StdEncoding.EncodeToString(value)) + } + case bool: + v.Set(name, strconv.FormatBool(value)) + case int64: + v.Set(name, strconv.FormatInt(value, 10)) + case int: + v.Set(name, strconv.Itoa(value)) + case float64: + v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) + case float32: + v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + v.Set(name, value.UTC().Format(ISO8601UTC)) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go new file mode 100644 index 0000000..a3ea409 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -0,0 +1,35 @@ +package query + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go + +import ( + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalHandler is a named request handler for unmarshaling query protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals a response for an AWS Query service. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") + if err != nil { + r.Error = awserr.New("SerializationError", "failed decoding Query response", err) + return + } + } +} + +// UnmarshalMeta unmarshals header response values for an AWS Query service. +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go new file mode 100644 index 0000000..f214296 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -0,0 +1,66 @@ +package query + +import ( + "encoding/xml" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"ErrorResponse"` + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +type xmlServiceUnavailableResponse struct { + XMLName xml.Name `xml:"ServiceUnavailableException"` +} + +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} + +// UnmarshalError unmarshals an error response for an AWS Query service. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to read from query HTTP response body", err) + return + } + + // First check for specific error + resp := xmlErrorResponse{} + decodeErr := xml.Unmarshal(bodyBytes, &resp) + if decodeErr == nil { + reqID := resp.RequestID + if reqID == "" { + reqID = r.RequestID + } + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Code, resp.Message, nil), + r.HTTPResponse.StatusCode, + reqID, + ) + return + } + + // Check for unhandled error + servUnavailResp := xmlServiceUnavailableResponse{} + unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp) + if unavailErr == nil { + r.Error = awserr.NewRequestFailure( + awserr.New("ServiceUnavailableException", "service is unavailable", nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Failed to retrieve any error message from the response body + r.Error = awserr.New("SerializationError", + "failed to decode query XML error response", decodeErr) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/build.go new file mode 100644 index 0000000..5f41251 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -0,0 +1,256 @@ +// Package rest provides RESTful serialization of AWS requests and responses. +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RFC822 returns an RFC822 formatted timestamp for AWS protocols +const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" + +// Whether the byte value can be sent without escaping in AWS URLs +var noEscape [256]bool + +var errValueNotSet = fmt.Errorf("value not set") + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +// BuildHandler is a named request handler for building rest protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} + +// Build builds the REST component of a service request. +func Build(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v) + buildBody(r, v) + } +} + +func buildLocationElements(r *request.Request, v reflect.Value) { + query := r.HTTPRequest.URL.Query() + + for i := 0; i < v.NumField(); i++ { + m := v.Field(i) + if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + field := v.Type().Field(i) + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + if m.Kind() == reflect.Ptr { + m = m.Elem() + } + if !m.IsValid() { + continue + } + + var err error + switch field.Tag.Get("location") { + case "headers": // header maps + err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag.Get("locationName")) + case "header": + err = buildHeader(&r.HTTPRequest.Header, m, name) + case "uri": + err = buildURI(r.HTTPRequest.URL, m, name) + case "querystring": + err = buildQueryString(query, m, name) + } + r.Error = err + } + if r.Error != nil { + return + } + } + + r.HTTPRequest.URL.RawQuery = query.Encode() + updatePath(r.HTTPRequest.URL, r.HTTPRequest.URL.Path) +} + +func buildBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := reflect.Indirect(v.FieldByName(payloadName)) + if payload.IsValid() && payload.Interface() != nil { + switch reader := payload.Interface().(type) { + case io.ReadSeeker: + r.SetReaderBody(reader) + case []byte: + r.SetBufferBody(reader) + case string: + r.SetStringBody(reader) + default: + r.Error = awserr.New("SerializationError", + "failed to encode REST request", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } +} + +func buildHeader(header *http.Header, v reflect.Value, name string) error { + str, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + header.Add(name, str) + + return nil +} + +func buildHeaderMap(header *http.Header, v reflect.Value, prefix string) error { + for _, key := range v.MapKeys() { + str, err := convertType(v.MapIndex(key)) + if err == errValueNotSet { + continue + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + + } + + header.Add(prefix+key.String(), str) + } + return nil +} + +func buildURI(u *url.URL, v reflect.Value, name string) error { + value, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + uri := u.Path + uri = strings.Replace(uri, "{"+name+"}", EscapePath(value, true), -1) + uri = strings.Replace(uri, "{"+name+"+}", EscapePath(value, false), -1) + u.Path = uri + + return nil +} + +func buildQueryString(query url.Values, v reflect.Value, name string) error { + switch value := v.Interface().(type) { + case []*string: + for _, item := range value { + query.Add(name, *item) + } + case map[string]*string: + for key, item := range value { + query.Add(key, *item) + } + case map[string][]*string: + for key, items := range value { + for _, item := range items { + query.Add(key, *item) + } + } + default: + str, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + query.Set(name, str) + } + + return nil +} + +func updatePath(url *url.URL, urlPath string) { + scheme, query := url.Scheme, url.RawQuery + + hasSlash := strings.HasSuffix(urlPath, "/") + + // clean up path + urlPath = path.Clean(urlPath) + if hasSlash && !strings.HasSuffix(urlPath, "/") { + urlPath += "/" + } + + // get formatted URL minus scheme so we can build this into Opaque + url.Scheme, url.Path, url.RawQuery = "", "", "" + s := url.String() + url.Scheme = scheme + url.RawQuery = query + + // build opaque URI + url.Opaque = s + urlPath +} + +// EscapePath escapes part of a URL path in Amazon style +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + fmt.Fprintf(&buf, "%%%02X", c) + } + } + return buf.String() +} + +func convertType(v reflect.Value) (string, error) { + v = reflect.Indirect(v) + if !v.IsValid() { + return "", errValueNotSet + } + + var str string + switch value := v.Interface().(type) { + case string: + str = value + case []byte: + str = base64.StdEncoding.EncodeToString(value) + case bool: + str = strconv.FormatBool(value) + case int64: + str = strconv.FormatInt(value, 10) + case float64: + str = strconv.FormatFloat(value, 'f', -1, 64) + case time.Time: + str = value.UTC().Format(RFC822) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return "", err + } + return str, nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go new file mode 100644 index 0000000..4366de2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go @@ -0,0 +1,45 @@ +package rest + +import "reflect" + +// PayloadMember returns the payload field member of i if there is one, or nil. +func PayloadMember(i interface{}) interface{} { + if i == nil { + return nil + } + + v := reflect.ValueOf(i).Elem() + if !v.IsValid() { + return nil + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + field, _ := v.Type().FieldByName(payloadName) + if field.Tag.Get("type") != "structure" { + return nil + } + + payload := v.FieldByName(payloadName) + if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { + return payload.Interface() + } + } + } + return nil +} + +// PayloadType returns the type of a payload field member of i if there is one, or "". +func PayloadType(i interface{}) string { + v := reflect.Indirect(reflect.ValueOf(i)) + if !v.IsValid() { + return "" + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + if member, ok := v.Type().FieldByName(payloadName); ok { + return member.Tag.Get("type") + } + } + } + return "" +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go new file mode 100644 index 0000000..2cba1d9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -0,0 +1,198 @@ +package rest + +import ( + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals the REST component of a response in a REST service. +func Unmarshal(r *request.Request) { + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalBody(r, v) + } +} + +// UnmarshalMeta unmarshals the REST metadata of a response in a REST service +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") + if r.RequestID == "" { + // Alternative version of request id in the header + r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") + } + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalLocationElements(r, v) + } +} + +func unmarshalBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := v.FieldByName(payloadName) + if payload.IsValid() { + switch payload.Interface().(type) { + case []byte: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + payload.Set(reflect.ValueOf(b)) + } + case *string: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + str := string(b) + payload.Set(reflect.ValueOf(&str)) + } + default: + switch payload.Type().String() { + case "io.ReadSeeker": + payload.Set(reflect.ValueOf(aws.ReadSeekCloser(r.HTTPResponse.Body))) + case "aws.ReadSeekCloser", "io.ReadCloser": + payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + default: + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + defer r.HTTPResponse.Body.Close() + r.Error = awserr.New("SerializationError", + "failed to decode REST response", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } + } +} + +func unmarshalLocationElements(r *request.Request, v reflect.Value) { + for i := 0; i < v.NumField(); i++ { + m, field := v.Field(i), v.Type().Field(i) + if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + + switch field.Tag.Get("location") { + case "statusCode": + unmarshalStatusCode(m, r.HTTPResponse.StatusCode) + case "header": + err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name)) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + case "headers": + prefix := field.Tag.Get("locationName") + err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + } + } + if r.Error != nil { + return + } + } +} + +func unmarshalStatusCode(v reflect.Value, statusCode int) { + if !v.IsValid() { + return + } + + switch v.Interface().(type) { + case *int64: + s := int64(statusCode) + v.Set(reflect.ValueOf(&s)) + } +} + +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { + switch r.Interface().(type) { + case map[string]*string: // we only support string map value types + out := map[string]*string{} + for k, v := range headers { + k = http.CanonicalHeaderKey(k) + if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { + out[k[len(prefix):]] = &v[0] + } + } + r.Set(reflect.ValueOf(out)) + } + return nil +} + +func unmarshalHeader(v reflect.Value, header string) error { + if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } + + switch v.Interface().(type) { + case *string: + v.Set(reflect.ValueOf(&header)) + case []byte: + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *bool: + b, err := strconv.ParseBool(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *int64: + i, err := strconv.ParseInt(header, 10, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&i)) + case *float64: + f, err := strconv.ParseFloat(header, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&f)) + case *time.Time: + t, err := time.Parse(RFC822, header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&t)) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return err + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go new file mode 100644 index 0000000..c74088b --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go @@ -0,0 +1,69 @@ +// Package restxml provides RESTful XML serialisation of AWS +// requests and responses. +package restxml + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go + +import ( + "bytes" + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/protocol/rest" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// BuildHandler is a named request handler for building restxml protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.restxml.Build", Fn: Build} + +// UnmarshalHandler is a named request handler for unmarshaling restxml protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restxml.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling restxml protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalMeta", Fn: UnmarshalMeta} + +// UnmarshalErrorHandler is a named request handler for unmarshaling restxml protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalError", Fn: UnmarshalError} + +// Build builds a request payload for the REST XML protocol. +func Build(r *request.Request) { + rest.Build(r) + + if t := rest.PayloadType(r.Params); t == "structure" || t == "" { + var buf bytes.Buffer + err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to encode rest XML request", err) + return + } + r.SetBufferBody(buf.Bytes()) + } +} + +// Unmarshal unmarshals a payload response for the REST XML protocol. +func Unmarshal(r *request.Request) { + if t := rest.PayloadType(r.Data); t == "structure" || t == "" { + defer r.HTTPResponse.Body.Close() + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, "") + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST XML response", err) + return + } + } else { + rest.Unmarshal(r) + } +} + +// UnmarshalMeta unmarshals response headers for the REST XML protocol. +func UnmarshalMeta(r *request.Request) { + rest.UnmarshalMeta(r) +} + +// UnmarshalError unmarshals a response error for the REST XML protocol. +func UnmarshalError(r *request.Request) { + query.UnmarshalError(r) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go new file mode 100644 index 0000000..da1a681 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go @@ -0,0 +1,21 @@ +package protocol + +import ( + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body +var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} + +// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. +func UnmarshalDiscardBody(r *request.Request) { + if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { + return + } + + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go new file mode 100644 index 0000000..ceb4132 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -0,0 +1,293 @@ +// Package xmlutil provides XML serialisation of AWS requests and responses. +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// BuildXML will serialize params into an xml.Encoder. +// Error will be returned if the serialization of any of the params or nested values fails. +func BuildXML(params interface{}, e *xml.Encoder) error { + b := xmlBuilder{encoder: e, namespaces: map[string]string{}} + root := NewXMLElement(xml.Name{}) + if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { + return err + } + for _, c := range root.Children { + for _, v := range c { + return StructToXML(e, v, false) + } + } + return nil +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +// A xmlBuilder serializes values from Go code to XML +type xmlBuilder struct { + encoder *xml.Encoder + namespaces map[string]string +} + +// buildValue generic XMLNode builder for any type. Will build value for their specific type +// struct, list, map, scalar. +// +// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If +// type is not provided reflect will be used to determine the value's type. +func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + value = elemOf(value) + if !value.IsValid() { // no need to handle zero values + return nil + } else if tag.Get("location") != "" { // don't handle non-body location values + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := value.Type().FieldByName("_"); ok { + tag = tag + reflect.StructTag(" ") + field.Tag + } + return b.buildStruct(value, current, tag) + case "list": + return b.buildList(value, current, tag) + case "map": + return b.buildMap(value, current, tag) + default: + return b.buildScalar(value, current, tag) + } +} + +// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested +// types are converted to XMLNodes also. +func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + fieldAdded := false + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + + // there is an xmlNamespace associated with this struct + if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { + ns := xml.Attr{ + Name: xml.Name{Local: "xmlns"}, + Value: uri, + } + if prefix != "" { + b.namespaces[prefix] = uri // register the namespace + ns.Name.Local = "xmlns:" + prefix + } + + child.Attr = append(child.Attr, ns) + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + member := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + + mTag := field.Tag + if mTag.Get("location") != "" { // skip non-body members + continue + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(token) + } + + memberName := mTag.Get("locationName") + if memberName == "" { + memberName = field.Name + mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) + } + if err := b.buildValue(member, child, mTag); err != nil { + return err + } + + fieldAdded = true + } + + if fieldAdded { // only append this child if we have one ore more valid members + current.AddChild(child) + } + + return nil +} + +// buildList adds the value's list items to the current XMLNode as children nodes. All +// nested values in the list are converted to XMLNodes also. +func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted lists + return nil + } + + // check for unflattened list member + flattened := tag.Get("flattened") != "" + + xname := xml.Name{Local: tag.Get("locationName")} + if flattened { + for i := 0; i < value.Len(); i++ { + child := NewXMLElement(xname) + current.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } else { + list := NewXMLElement(xname) + current.AddChild(list) + + for i := 0; i < value.Len(); i++ { + iname := tag.Get("locationNameList") + if iname == "" { + iname = "member" + } + + child := NewXMLElement(xml.Name{Local: iname}) + list.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } + + return nil +} + +// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All +// nested values in the map are converted to XMLNodes also. +// +// Error will be returned if it is unable to build the map's values into XMLNodes +func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted maps + return nil + } + + maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + current.AddChild(maproot) + current = maproot + + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + // sorting is not required for compliance, but it makes testing easier + keys := make([]string, value.Len()) + for i, k := range value.MapKeys() { + keys[i] = k.String() + } + sort.Strings(keys) + + for _, k := range keys { + v := value.MapIndex(reflect.ValueOf(k)) + + mapcur := current + if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps + child := NewXMLElement(xml.Name{Local: "entry"}) + mapcur.AddChild(child) + mapcur = child + } + + kchild := NewXMLElement(xml.Name{Local: kname}) + kchild.Text = k + vchild := NewXMLElement(xml.Name{Local: vname}) + mapcur.AddChild(kchild) + mapcur.AddChild(vchild) + + if err := b.buildValue(v, vchild, ""); err != nil { + return err + } + } + + return nil +} + +// buildScalar will convert the value into a string and append it as a attribute or child +// of the current XMLNode. +// +// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. +// +// Error will be returned if the value type is unsupported. +func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + var str string + switch converted := value.Interface().(type) { + case string: + str = converted + case []byte: + if !value.IsNil() { + str = base64.StdEncoding.EncodeToString(converted) + } + case bool: + str = strconv.FormatBool(converted) + case int64: + str = strconv.FormatInt(converted, 10) + case int: + str = strconv.Itoa(converted) + case float64: + str = strconv.FormatFloat(converted, 'f', -1, 64) + case float32: + str = strconv.FormatFloat(float64(converted), 'f', -1, 32) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + str = converted.UTC().Format(ISO8601UTC) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", + tag.Get("locationName"), value.Interface(), value.Type().Name()) + } + + xname := xml.Name{Local: tag.Get("locationName")} + if tag.Get("xmlAttribute") != "" { // put into current node's attribute list + attr := xml.Attr{Name: xname, Value: str} + current.Attr = append(current.Attr, attr) + } else { // regular text node + current.AddChild(&XMLNode{Name: xname, Text: str}) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go new file mode 100644 index 0000000..49f291a --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -0,0 +1,260 @@ +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" +) + +// UnmarshalXML deserializes an xml.Decoder into the container v. V +// needs to match the shape of the XML expected to be decoded. +// If the shape doesn't match unmarshaling will fail. +func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { + n, _ := XMLToStruct(d, nil) + if n.Children != nil { + for _, root := range n.Children { + for _, c := range root { + if wrappedChild, ok := c.Children[wrapper]; ok { + c = wrappedChild[0] // pull out wrapped element + } + + err := parse(reflect.ValueOf(v), c, "") + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } + } + return nil + } + return nil +} + +// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect +// will be used to determine the type from r. +func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + rtype := r.Type() + if rtype.Kind() == reflect.Ptr { + rtype = rtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch rtype.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := rtype.FieldByName("_"); ok { + tag = field.Tag + } + return parseStruct(r, node, tag) + case "list": + return parseList(r, node, tag) + case "map": + return parseMap(r, node, tag) + default: + return parseScalar(r, node, tag) + } +} + +// parseStruct deserializes a structure and its fields from an XMLNode. Any nested +// types in the structure will also be deserialized. +func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + if r.Kind() == reflect.Ptr { + if r.IsNil() { // create the structure if it's nil + s := reflect.New(r.Type().Elem()) + r.Set(s) + r = s + } + + r = r.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return parseStruct(r.FieldByName(payload), node, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if c := field.Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + // try to find the field by name in elements + elems := node.Children[name] + + if elems == nil { // try to find the field in attributes + for _, a := range node.Attr { + if name == a.Name.Local { + // turn this into a text node for de-serializing + elems = []*XMLNode{{Text: a.Value}} + } + } + } + + member := r.FieldByName(field.Name) + for _, elem := range elems { + err := parse(member, elem, field.Tag) + if err != nil { + return err + } + } + } + return nil +} + +// parseList deserializes a list of values from an XML node. Each list entry +// will also be deserialized. +func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + + if tag.Get("flattened") == "" { // look at all item entries + mname := "member" + if name := tag.Get("locationNameList"); name != "" { + mname = name + } + + if Children, ok := node.Children[mname]; ok { + if r.IsNil() { + r.Set(reflect.MakeSlice(t, len(Children), len(Children))) + } + + for i, c := range Children { + err := parse(r.Index(i), c, "") + if err != nil { + return err + } + } + } + } else { // flattened list means this is a single element + if r.IsNil() { + r.Set(reflect.MakeSlice(t, 0, 0)) + } + + childR := reflect.Zero(t.Elem()) + r.Set(reflect.Append(r, childR)) + err := parse(r.Index(r.Len()-1), node, "") + if err != nil { + return err + } + } + + return nil +} + +// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode +// will also be deserialized as map entries. +func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + if r.IsNil() { + r.Set(reflect.MakeMap(r.Type())) + } + + if tag.Get("flattened") == "" { // look at all child entries + for _, entry := range node.Children["entry"] { + parseMapEntry(r, entry, tag) + } + } else { // this element is itself an entry + parseMapEntry(r, node, tag) + } + + return nil +} + +// parseMapEntry deserializes a map entry from a XML node. +func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + keys, ok := node.Children[kname] + values := node.Children[vname] + if ok { + for i, key := range keys { + keyR := reflect.ValueOf(key.Text) + value := values[i] + valueR := reflect.New(r.Type().Elem()).Elem() + + parse(valueR, value, "") + r.SetMapIndex(keyR, valueR) + } + } + return nil +} + +// parseScaller deserializes an XMLNode value into a concrete type based on the +// interface type of r. +// +// Error is returned if the deserialization fails due to invalid type conversion, +// or unsupported interface type. +func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + switch r.Interface().(type) { + case *string: + r.Set(reflect.ValueOf(&node.Text)) + return nil + case []byte: + b, err := base64.StdEncoding.DecodeString(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(b)) + case *bool: + v, err := strconv.ParseBool(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *int64: + v, err := strconv.ParseInt(node.Text, 10, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *float64: + v, err := strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + t, err := time.Parse(ISO8601UTC, node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go new file mode 100644 index 0000000..72c198a --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -0,0 +1,105 @@ +package xmlutil + +import ( + "encoding/xml" + "io" + "sort" +) + +// A XMLNode contains the values to be encoded or decoded. +type XMLNode struct { + Name xml.Name `json:",omitempty"` + Children map[string][]*XMLNode `json:",omitempty"` + Text string `json:",omitempty"` + Attr []xml.Attr `json:",omitempty"` +} + +// NewXMLElement returns a pointer to a new XMLNode initialized to default values. +func NewXMLElement(name xml.Name) *XMLNode { + return &XMLNode{ + Name: name, + Children: map[string][]*XMLNode{}, + Attr: []xml.Attr{}, + } +} + +// AddChild adds child to the XMLNode. +func (n *XMLNode) AddChild(child *XMLNode) { + if _, ok := n.Children[child.Name.Local]; !ok { + n.Children[child.Name.Local] = []*XMLNode{} + } + n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) +} + +// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. +func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { + out := &XMLNode{} + for { + tok, err := d.Token() + if tok == nil || err == io.EOF { + break + } + if err != nil { + return out, err + } + + switch typed := tok.(type) { + case xml.CharData: + out.Text = string(typed.Copy()) + case xml.StartElement: + el := typed.Copy() + out.Attr = el.Attr + if out.Children == nil { + out.Children = map[string][]*XMLNode{} + } + + name := typed.Name.Local + slice := out.Children[name] + if slice == nil { + slice = []*XMLNode{} + } + node, e := XMLToStruct(d, &el) + if e != nil { + return out, e + } + node.Name = typed.Name + slice = append(slice, node) + out.Children[name] = slice + case xml.EndElement: + if s != nil && s.Name.Local == typed.Name.Local { // matching end token + return out, nil + } + } + } + return out, nil +} + +// StructToXML writes an XMLNode to a xml.Encoder as tokens. +func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { + e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr}) + + if node.Text != "" { + e.EncodeToken(xml.CharData([]byte(node.Text))) + } else if sorted { + sortedNames := []string{} + for k := range node.Children { + sortedNames = append(sortedNames, k) + } + sort.Strings(sortedNames) + + for _, k := range sortedNames { + for _, v := range node.Children[k] { + StructToXML(e, v, sorted) + } + } + } else { + for _, c := range node.Children { + for _, v := range c { + StructToXML(e, v, sorted) + } + } + } + + e.EncodeToken(xml.EndElement{Name: node.Name}) + return e.Flush() +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go new file mode 100644 index 0000000..244c86d --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go @@ -0,0 +1,82 @@ +package v4 + +import ( + "net/http" + "strings" +) + +// validator houses a set of rule needed for validation of a +// string value +type rules []rule + +// rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that rule +type rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// mapRule generic rule for maps +type mapRule map[string]struct{} + +// IsValid for the map rule satisfies whether it exists in the map +func (m mapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// whitelist is a generic rule for whitelisting +type whitelist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (w whitelist) IsValid(value string) bool { + return w.rule.IsValid(value) +} + +// blacklist is a generic rule for blacklisting +type blacklist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (b blacklist) IsValid(value string) bool { + return !b.rule.IsValid(value) +} + +type patterns []string + +// IsValid for patterns checks each pattern and returns if a match has +// been found +func (p patterns) IsValid(value string) bool { + for _, pattern := range p { + if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) { + return true + } + } + return false +} + +// inclusiveRules rules allow for rules to depend on one another +type inclusiveRules []rule + +// IsValid will return true if all rules are true +func (r inclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/v4.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/v4.go new file mode 100644 index 0000000..14b0c66 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/v4.go @@ -0,0 +1,438 @@ +// Package v4 implements signing for AWS V4 signer +package v4 + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + authHeaderPrefix = "AWS4-HMAC-SHA256" + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" +) + +var ignoredHeaders = rules{ + blacklist{ + mapRule{ + "Content-Length": struct{}{}, + "User-Agent": struct{}{}, + }, + }, +} + +// requiredSignedHeaders is a whitelist for build canonical headers. +var requiredSignedHeaders = rules{ + whitelist{ + mapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + }, + }, + patterns{"X-Amz-Meta-"}, +} + +// allowedHoisting is a whitelist for build query headers. The boolean value +// represents whether or not it is a pattern. +var allowedQueryHoisting = inclusiveRules{ + blacklist{requiredSignedHeaders}, + patterns{"X-Amz-"}, +} + +type signer struct { + Request *http.Request + Time time.Time + ExpireTime time.Duration + ServiceName string + Region string + CredValues credentials.Value + Credentials *credentials.Credentials + Query url.Values + Body io.ReadSeeker + Debug aws.LogLevelType + Logger aws.Logger + + isPresign bool + formattedTime string + formattedShortTime string + + signedHeaders string + canonicalHeaders string + canonicalString string + credentialString string + stringToSign string + signature string + authorization string + notHoist bool + signedHeaderVals http.Header +} + +// Sign requests with signature version 4. +// +// Will sign the requests with the service config's Credentials object +// Signing is skipped if the credentials is the credentials.AnonymousCredentials +// object. +func Sign(req *request.Request) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + region := req.ClientInfo.SigningRegion + if region == "" { + region = aws.StringValue(req.Config.Region) + } + + name := req.ClientInfo.SigningName + if name == "" { + name = req.ClientInfo.ServiceName + } + + s := signer{ + Request: req.HTTPRequest, + Time: req.Time, + ExpireTime: req.ExpireTime, + Query: req.HTTPRequest.URL.Query(), + Body: req.Body, + ServiceName: name, + Region: region, + Credentials: req.Config.Credentials, + Debug: req.Config.LogLevel.Value(), + Logger: req.Config.Logger, + notHoist: req.NotHoist, + } + + req.Error = s.sign() + req.SignedHeaderVals = s.signedHeaderVals +} + +func (v4 *signer) sign() error { + if v4.ExpireTime != 0 { + v4.isPresign = true + } + + if v4.isRequestSigned() { + if !v4.Credentials.IsExpired() { + // If the request is already signed, and the credentials have not + // expired yet ignore the signing request. + return nil + } + + // The credentials have expired for this request. The current signing + // is invalid, and needs to be request because the request will fail. + if v4.isPresign { + v4.removePresign() + // Update the request's query string to ensure the values stays in + // sync in the case retrieving the new credentials fails. + v4.Request.URL.RawQuery = v4.Query.Encode() + } + } + + var err error + v4.CredValues, err = v4.Credentials.Get() + if err != nil { + return err + } + + if v4.isPresign { + v4.Query.Set("X-Amz-Algorithm", authHeaderPrefix) + if v4.CredValues.SessionToken != "" { + v4.Query.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) + } else { + v4.Query.Del("X-Amz-Security-Token") + } + } else if v4.CredValues.SessionToken != "" { + v4.Request.Header.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) + } + + v4.build() + + if v4.Debug.Matches(aws.LogDebugWithSigning) { + v4.logSigningInfo() + } + + return nil +} + +const logSignInfoMsg = `DEBUG: Request Signiture: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func (v4 *signer) logSigningInfo() { + signedURLMsg := "" + if v4.isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, v4.Request.URL.String()) + } + msg := fmt.Sprintf(logSignInfoMsg, v4.canonicalString, v4.stringToSign, signedURLMsg) + v4.Logger.Log(msg) +} + +func (v4 *signer) build() { + + v4.buildTime() // no depends + v4.buildCredentialString() // no depends + + unsignedHeaders := v4.Request.Header + if v4.isPresign { + if !v4.notHoist { + urlValues := url.Values{} + urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends + for k := range urlValues { + v4.Query[k] = urlValues[k] + } + } + } + + v4.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) + v4.buildCanonicalString() // depends on canon headers / signed headers + v4.buildStringToSign() // depends on canon string + v4.buildSignature() // depends on string to sign + + if v4.isPresign { + v4.Request.URL.RawQuery += "&X-Amz-Signature=" + v4.signature + } else { + parts := []string{ + authHeaderPrefix + " Credential=" + v4.CredValues.AccessKeyID + "/" + v4.credentialString, + "SignedHeaders=" + v4.signedHeaders, + "Signature=" + v4.signature, + } + v4.Request.Header.Set("Authorization", strings.Join(parts, ", ")) + } +} + +func (v4 *signer) buildTime() { + v4.formattedTime = v4.Time.UTC().Format(timeFormat) + v4.formattedShortTime = v4.Time.UTC().Format(shortTimeFormat) + + if v4.isPresign { + duration := int64(v4.ExpireTime / time.Second) + v4.Query.Set("X-Amz-Date", v4.formattedTime) + v4.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) + } else { + v4.Request.Header.Set("X-Amz-Date", v4.formattedTime) + } +} + +func (v4 *signer) buildCredentialString() { + v4.credentialString = strings.Join([]string{ + v4.formattedShortTime, + v4.Region, + v4.ServiceName, + "aws4_request", + }, "/") + + if v4.isPresign { + v4.Query.Set("X-Amz-Credential", v4.CredValues.AccessKeyID+"/"+v4.credentialString) + } +} + +func buildQuery(r rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} +func (v4 *signer) buildCanonicalHeaders(r rule, header http.Header) { + var headers []string + headers = append(headers, "host") + for k, v := range header { + canonicalKey := http.CanonicalHeaderKey(k) + if !r.IsValid(canonicalKey) { + continue // ignored header + } + + lowerCaseKey := strings.ToLower(k) + headers = append(headers, lowerCaseKey) + + if v4.signedHeaderVals == nil { + v4.signedHeaderVals = make(http.Header) + } + v4.signedHeaderVals[lowerCaseKey] = v + } + sort.Strings(headers) + + v4.signedHeaders = strings.Join(headers, ";") + + if v4.isPresign { + v4.Query.Set("X-Amz-SignedHeaders", v4.signedHeaders) + } + + headerValues := make([]string, len(headers)) + for i, k := range headers { + if k == "host" { + headerValues[i] = "host:" + v4.Request.URL.Host + } else { + headerValues[i] = k + ":" + + strings.Join(v4.Request.Header[http.CanonicalHeaderKey(k)], ",") + } + } + + v4.canonicalHeaders = strings.Join(headerValues, "\n") +} + +func (v4 *signer) buildCanonicalString() { + v4.Request.URL.RawQuery = strings.Replace(v4.Query.Encode(), "+", "%20", -1) + uri := v4.Request.URL.Opaque + if uri != "" { + uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/") + } else { + uri = v4.Request.URL.Path + } + if uri == "" { + uri = "/" + } + + if v4.ServiceName != "s3" { + uri = rest.EscapePath(uri, false) + } + + v4.canonicalString = strings.Join([]string{ + v4.Request.Method, + uri, + v4.Request.URL.RawQuery, + v4.canonicalHeaders + "\n", + v4.signedHeaders, + v4.bodyDigest(), + }, "\n") +} + +func (v4 *signer) buildStringToSign() { + v4.stringToSign = strings.Join([]string{ + authHeaderPrefix, + v4.formattedTime, + v4.credentialString, + hex.EncodeToString(makeSha256([]byte(v4.canonicalString))), + }, "\n") +} + +func (v4 *signer) buildSignature() { + secret := v4.CredValues.SecretAccessKey + date := makeHmac([]byte("AWS4"+secret), []byte(v4.formattedShortTime)) + region := makeHmac(date, []byte(v4.Region)) + service := makeHmac(region, []byte(v4.ServiceName)) + credentials := makeHmac(service, []byte("aws4_request")) + signature := makeHmac(credentials, []byte(v4.stringToSign)) + v4.signature = hex.EncodeToString(signature) +} + +func (v4 *signer) bodyDigest() string { + hash := v4.Request.Header.Get("X-Amz-Content-Sha256") + if hash == "" { + if v4.isPresign && v4.ServiceName == "s3" { + hash = "UNSIGNED-PAYLOAD" + } else if v4.Body == nil { + hash = hex.EncodeToString(makeSha256([]byte{})) + } else { + hash = hex.EncodeToString(makeSha256Reader(v4.Body)) + } + v4.Request.Header.Add("X-Amz-Content-Sha256", hash) + } + return hash +} + +// isRequestSigned returns if the request is currently signed or presigned +func (v4 *signer) isRequestSigned() bool { + if v4.isPresign && v4.Query.Get("X-Amz-Signature") != "" { + return true + } + if v4.Request.Header.Get("Authorization") != "" { + return true + } + + return false +} + +// unsign removes signing flags for both signed and presigned requests. +func (v4 *signer) removePresign() { + v4.Query.Del("X-Amz-Algorithm") + v4.Query.Del("X-Amz-Signature") + v4.Query.Del("X-Amz-Security-Token") + v4.Query.Del("X-Amz-Date") + v4.Query.Del("X-Amz-Expires") + v4.Query.Del("X-Amz-Credential") + v4.Query.Del("X-Amz-SignedHeaders") +} + +func makeHmac(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256Reader(reader io.ReadSeeker) []byte { + hash := sha256.New() + start, _ := reader.Seek(0, 1) + defer reader.Seek(start, 0) + + io.Copy(hash, reader) + return hash.Sum(nil) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/waiter/waiter.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/waiter/waiter.go new file mode 100644 index 0000000..b51e944 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/waiter/waiter.go @@ -0,0 +1,134 @@ +package waiter + +import ( + "fmt" + "reflect" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides a collection of configuration values to setup a generated +// waiter code with. +type Config struct { + Name string + Delay int + MaxAttempts int + Operation string + Acceptors []WaitAcceptor +} + +// A WaitAcceptor provides the information needed to wait for an API operation +// to complete. +type WaitAcceptor struct { + Expected interface{} + Matcher string + State string + Argument string +} + +// A Waiter provides waiting for an operation to complete. +type Waiter struct { + Config + Client interface{} + Input interface{} +} + +// Wait waits for an operation to complete, expire max attempts, or fail. Error +// is returned if the operation fails. +func (w *Waiter) Wait() error { + client := reflect.ValueOf(w.Client) + in := reflect.ValueOf(w.Input) + method := client.MethodByName(w.Config.Operation + "Request") + + for i := 0; i < w.MaxAttempts; i++ { + res := method.Call([]reflect.Value{in}) + req := res[0].Interface().(*request.Request) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Waiter")) + + err := req.Send() + for _, a := range w.Acceptors { + result := false + var vals []interface{} + switch a.Matcher { + case "pathAll", "path": + // Require all matches to be equal for result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + if len(vals) == 0 { + break + } + result = true + for _, val := range vals { + if !awsutil.DeepEqual(val, a.Expected) { + result = false + break + } + } + case "pathAny": + // Only a single match needs to equal for the result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + for _, val := range vals { + if awsutil.DeepEqual(val, a.Expected) { + result = true + break + } + } + case "status": + s := a.Expected.(int) + result = s == req.HTTPResponse.StatusCode + case "error": + if aerr, ok := err.(awserr.Error); ok { + result = aerr.Code() == a.Expected.(string) + } + case "pathList": + // ignored matcher + default: + logf(client, "WARNING: Waiter for %s encountered unexpected matcher: %s", + w.Config.Operation, a.Matcher) + } + + if !result { + // If there was no matching result found there is nothing more to do + // for this response, retry the request. + continue + } + + switch a.State { + case "success": + // waiter completed + return nil + case "failure": + // Waiter failure state triggered + return awserr.New("ResourceNotReady", + fmt.Sprintf("failed waiting for successful resource state"), err) + case "retry": + // clear the error and retry the operation + err = nil + default: + logf(client, "WARNING: Waiter for %s encountered unexpected state: %s", + w.Config.Operation, a.State) + } + } + if err != nil { + return err + } + + time.Sleep(time.Second * time.Duration(w.Delay)) + } + + return awserr.New("ResourceNotReady", + fmt.Sprintf("exceeded %d wait attempts", w.MaxAttempts), nil) +} + +func logf(client reflect.Value, msg string, args ...interface{}) { + cfgVal := client.FieldByName("Config") + if !cfgVal.IsValid() { + return + } + if cfg, ok := cfgVal.Interface().(*aws.Config); ok && cfg.Logger != nil { + cfg.Logger.Log(fmt.Sprintf(msg, args...)) + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/route53/api.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/route53/api.go new file mode 100644 index 0000000..04747ea --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/route53/api.go @@ -0,0 +1,5688 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package route53 provides a client for Amazon Route 53. +package route53 + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAssociateVPCWithHostedZone = "AssociateVPCWithHostedZone" + +// AssociateVPCWithHostedZoneRequest generates a request for the AssociateVPCWithHostedZone operation. +func (c *Route53) AssociateVPCWithHostedZoneRequest(input *AssociateVPCWithHostedZoneInput) (req *request.Request, output *AssociateVPCWithHostedZoneOutput) { + op := &request.Operation{ + Name: opAssociateVPCWithHostedZone, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/hostedzone/{Id}/associatevpc", + } + + if input == nil { + input = &AssociateVPCWithHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &AssociateVPCWithHostedZoneOutput{} + req.Data = output + return +} + +// This action associates a VPC with an hosted zone. +// +// To associate a VPC with an hosted zone, send a POST request to the /Route +// 53 API version/hostedzone/hosted zone ID/associatevpc resource. The request +// body must include a document with a AssociateVPCWithHostedZoneRequest element. +// The response returns the AssociateVPCWithHostedZoneResponse element that +// contains ChangeInfo for you to track the progress of the AssociateVPCWithHostedZoneRequest +// you made. See GetChange operation for how to track the progress of your change. +func (c *Route53) AssociateVPCWithHostedZone(input *AssociateVPCWithHostedZoneInput) (*AssociateVPCWithHostedZoneOutput, error) { + req, out := c.AssociateVPCWithHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opChangeResourceRecordSets = "ChangeResourceRecordSets" + +// ChangeResourceRecordSetsRequest generates a request for the ChangeResourceRecordSets operation. +func (c *Route53) ChangeResourceRecordSetsRequest(input *ChangeResourceRecordSetsInput) (req *request.Request, output *ChangeResourceRecordSetsOutput) { + op := &request.Operation{ + Name: opChangeResourceRecordSets, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/hostedzone/{Id}/rrset/", + } + + if input == nil { + input = &ChangeResourceRecordSetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ChangeResourceRecordSetsOutput{} + req.Data = output + return +} + +// Use this action to create or change your authoritative DNS information. To +// use this action, send a POST request to the /Route 53 API version/hostedzone/hosted +// Zone ID/rrset resource. The request body must include a document with a ChangeResourceRecordSetsRequest +// element. +// +// Changes are a list of change items and are considered transactional. For +// more information on transactional changes, also known as change batches, +// see POST ChangeResourceRecordSets (http://docs.aws.amazon.com/Route53/latest/APIReference/API_ChangeResourceRecordSets.html) +// in the Amazon Route 53 API Reference. +// +// Due to the nature of transactional changes, you cannot delete the same resource +// record set more than once in a single change batch. If you attempt to delete +// the same change batch more than once, Amazon Route 53 returns an InvalidChangeBatch +// error. In response to a ChangeResourceRecordSets request, your DNS data is +// changed on all Amazon Route 53 DNS servers. Initially, the status of a change +// is PENDING. This means the change has not yet propagated to all the authoritative +// Amazon Route 53 DNS servers. When the change is propagated to all hosts, +// the change returns a status of INSYNC. +// +// Note the following limitations on a ChangeResourceRecordSets request: +// +// A request cannot contain more than 100 Change elements. A request cannot +// contain more than 1000 ResourceRecord elements. The sum of the number of +// characters (including spaces) in all Value elements in a request cannot exceed +// 32,000 characters. +func (c *Route53) ChangeResourceRecordSets(input *ChangeResourceRecordSetsInput) (*ChangeResourceRecordSetsOutput, error) { + req, out := c.ChangeResourceRecordSetsRequest(input) + err := req.Send() + return out, err +} + +const opChangeTagsForResource = "ChangeTagsForResource" + +// ChangeTagsForResourceRequest generates a request for the ChangeTagsForResource operation. +func (c *Route53) ChangeTagsForResourceRequest(input *ChangeTagsForResourceInput) (req *request.Request, output *ChangeTagsForResourceOutput) { + op := &request.Operation{ + Name: opChangeTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/tags/{ResourceType}/{ResourceId}", + } + + if input == nil { + input = &ChangeTagsForResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &ChangeTagsForResourceOutput{} + req.Data = output + return +} + +func (c *Route53) ChangeTagsForResource(input *ChangeTagsForResourceInput) (*ChangeTagsForResourceOutput, error) { + req, out := c.ChangeTagsForResourceRequest(input) + err := req.Send() + return out, err +} + +const opCreateHealthCheck = "CreateHealthCheck" + +// CreateHealthCheckRequest generates a request for the CreateHealthCheck operation. +func (c *Route53) CreateHealthCheckRequest(input *CreateHealthCheckInput) (req *request.Request, output *CreateHealthCheckOutput) { + op := &request.Operation{ + Name: opCreateHealthCheck, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/healthcheck", + } + + if input == nil { + input = &CreateHealthCheckInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateHealthCheckOutput{} + req.Data = output + return +} + +// This action creates a new health check. +// +// To create a new health check, send a POST request to the /Route 53 API version/healthcheck +// resource. The request body must include a document with a CreateHealthCheckRequest +// element. The response returns the CreateHealthCheckResponse element that +// contains metadata about the health check. +func (c *Route53) CreateHealthCheck(input *CreateHealthCheckInput) (*CreateHealthCheckOutput, error) { + req, out := c.CreateHealthCheckRequest(input) + err := req.Send() + return out, err +} + +const opCreateHostedZone = "CreateHostedZone" + +// CreateHostedZoneRequest generates a request for the CreateHostedZone operation. +func (c *Route53) CreateHostedZoneRequest(input *CreateHostedZoneInput) (req *request.Request, output *CreateHostedZoneOutput) { + op := &request.Operation{ + Name: opCreateHostedZone, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/hostedzone", + } + + if input == nil { + input = &CreateHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateHostedZoneOutput{} + req.Data = output + return +} + +// This action creates a new hosted zone. +// +// To create a new hosted zone, send a POST request to the /Route 53 API version/hostedzone +// resource. The request body must include a document with a CreateHostedZoneRequest +// element. The response returns the CreateHostedZoneResponse element that contains +// metadata about the hosted zone. +// +// Amazon Route 53 automatically creates a default SOA record and four NS records +// for the zone. The NS records in the hosted zone are the name servers you +// give your registrar to delegate your domain to. For more information about +// SOA and NS records, see NS and SOA Records that Amazon Route 53 Creates for +// a Hosted Zone (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/SOA-NSrecords.html) +// in the Amazon Route 53 Developer Guide. +// +// When you create a zone, its initial status is PENDING. This means that it +// is not yet available on all DNS servers. The status of the zone changes to +// INSYNC when the NS and SOA records are available on all Amazon Route 53 DNS +// servers. +// +// When trying to create a hosted zone using a reusable delegation set, you +// could specify an optional DelegationSetId, and Route53 would assign those +// 4 NS records for the zone, instead of alloting a new one. +func (c *Route53) CreateHostedZone(input *CreateHostedZoneInput) (*CreateHostedZoneOutput, error) { + req, out := c.CreateHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opCreateReusableDelegationSet = "CreateReusableDelegationSet" + +// CreateReusableDelegationSetRequest generates a request for the CreateReusableDelegationSet operation. +func (c *Route53) CreateReusableDelegationSetRequest(input *CreateReusableDelegationSetInput) (req *request.Request, output *CreateReusableDelegationSetOutput) { + op := &request.Operation{ + Name: opCreateReusableDelegationSet, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/delegationset", + } + + if input == nil { + input = &CreateReusableDelegationSetInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateReusableDelegationSetOutput{} + req.Data = output + return +} + +// This action creates a reusable delegationSet. +// +// To create a new reusable delegationSet, send a POST request to the /Route +// 53 API version/delegationset resource. The request body must include a document +// with a CreateReusableDelegationSetRequest element. The response returns the +// CreateReusableDelegationSetResponse element that contains metadata about +// the delegationSet. +// +// If the optional parameter HostedZoneId is specified, it marks the delegationSet +// associated with that particular hosted zone as reusable. +func (c *Route53) CreateReusableDelegationSet(input *CreateReusableDelegationSetInput) (*CreateReusableDelegationSetOutput, error) { + req, out := c.CreateReusableDelegationSetRequest(input) + err := req.Send() + return out, err +} + +const opCreateTrafficPolicy = "CreateTrafficPolicy" + +// CreateTrafficPolicyRequest generates a request for the CreateTrafficPolicy operation. +func (c *Route53) CreateTrafficPolicyRequest(input *CreateTrafficPolicyInput) (req *request.Request, output *CreateTrafficPolicyOutput) { + op := &request.Operation{ + Name: opCreateTrafficPolicy, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/trafficpolicy", + } + + if input == nil { + input = &CreateTrafficPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTrafficPolicyOutput{} + req.Data = output + return +} + +// Creates a traffic policy, which you use to create multiple DNS resource record +// sets for one domain name (such as example.com) or one subdomain name (such +// as www.example.com). +// +// To create a traffic policy, send a POST request to the /Route 53 API version/trafficpolicy +// resource. The request body must include a document with a CreateTrafficPolicyRequest +// element. The response includes the CreateTrafficPolicyResponse element, which +// contains information about the new traffic policy. +func (c *Route53) CreateTrafficPolicy(input *CreateTrafficPolicyInput) (*CreateTrafficPolicyOutput, error) { + req, out := c.CreateTrafficPolicyRequest(input) + err := req.Send() + return out, err +} + +const opCreateTrafficPolicyInstance = "CreateTrafficPolicyInstance" + +// CreateTrafficPolicyInstanceRequest generates a request for the CreateTrafficPolicyInstance operation. +func (c *Route53) CreateTrafficPolicyInstanceRequest(input *CreateTrafficPolicyInstanceInput) (req *request.Request, output *CreateTrafficPolicyInstanceOutput) { + op := &request.Operation{ + Name: opCreateTrafficPolicyInstance, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/trafficpolicyinstance", + } + + if input == nil { + input = &CreateTrafficPolicyInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTrafficPolicyInstanceOutput{} + req.Data = output + return +} + +// Creates resource record sets in a specified hosted zone based on the settings +// in a specified traffic policy version. In addition, CreateTrafficPolicyInstance +// associates the resource record sets with a specified domain name (such as +// example.com) or subdomain name (such as www.example.com). Amazon Route 53 +// responds to DNS queries for the domain or subdomain name by using the resource +// record sets that CreateTrafficPolicyInstance created. +// +// To create a traffic policy instance, send a POST request to the /Route 53 +// API version/trafficpolicyinstance resource. The request body must include +// a document with a CreateTrafficPolicyRequest element. The response returns +// the CreateTrafficPolicyInstanceResponse element, which contains information +// about the traffic policy instance. +func (c *Route53) CreateTrafficPolicyInstance(input *CreateTrafficPolicyInstanceInput) (*CreateTrafficPolicyInstanceOutput, error) { + req, out := c.CreateTrafficPolicyInstanceRequest(input) + err := req.Send() + return out, err +} + +const opCreateTrafficPolicyVersion = "CreateTrafficPolicyVersion" + +// CreateTrafficPolicyVersionRequest generates a request for the CreateTrafficPolicyVersion operation. +func (c *Route53) CreateTrafficPolicyVersionRequest(input *CreateTrafficPolicyVersionInput) (req *request.Request, output *CreateTrafficPolicyVersionOutput) { + op := &request.Operation{ + Name: opCreateTrafficPolicyVersion, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/trafficpolicy/{Id}", + } + + if input == nil { + input = &CreateTrafficPolicyVersionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTrafficPolicyVersionOutput{} + req.Data = output + return +} + +// Creates a new version of an existing traffic policy. When you create a new +// version of a traffic policy, you specify the ID of the traffic policy that +// you want to update and a JSON-formatted document that describes the new version. +// +// You use traffic policies to create multiple DNS resource record sets for +// one domain name (such as example.com) or one subdomain name (such as www.example.com). +// +// To create a new version, send a POST request to the /Route 53 API version/trafficpolicy/ +// resource. The request body includes a document with a CreateTrafficPolicyVersionRequest +// element. The response returns the CreateTrafficPolicyVersionResponse element, +// which contains information about the new version of the traffic policy. +func (c *Route53) CreateTrafficPolicyVersion(input *CreateTrafficPolicyVersionInput) (*CreateTrafficPolicyVersionOutput, error) { + req, out := c.CreateTrafficPolicyVersionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteHealthCheck = "DeleteHealthCheck" + +// DeleteHealthCheckRequest generates a request for the DeleteHealthCheck operation. +func (c *Route53) DeleteHealthCheckRequest(input *DeleteHealthCheckInput) (req *request.Request, output *DeleteHealthCheckOutput) { + op := &request.Operation{ + Name: opDeleteHealthCheck, + HTTPMethod: "DELETE", + HTTPPath: "/2013-04-01/healthcheck/{HealthCheckId}", + } + + if input == nil { + input = &DeleteHealthCheckInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteHealthCheckOutput{} + req.Data = output + return +} + +// This action deletes a health check. To delete a health check, send a DELETE +// request to the /Route 53 API version/healthcheck/health check ID resource. +// +// You can delete a health check only if there are no resource record sets +// associated with this health check. If resource record sets are associated +// with this health check, you must disassociate them before you can delete +// your health check. If you try to delete a health check that is associated +// with resource record sets, Amazon Route 53 will deny your request with a +// HealthCheckInUse error. For information about disassociating the records +// from your health check, see ChangeResourceRecordSets. +func (c *Route53) DeleteHealthCheck(input *DeleteHealthCheckInput) (*DeleteHealthCheckOutput, error) { + req, out := c.DeleteHealthCheckRequest(input) + err := req.Send() + return out, err +} + +const opDeleteHostedZone = "DeleteHostedZone" + +// DeleteHostedZoneRequest generates a request for the DeleteHostedZone operation. +func (c *Route53) DeleteHostedZoneRequest(input *DeleteHostedZoneInput) (req *request.Request, output *DeleteHostedZoneOutput) { + op := &request.Operation{ + Name: opDeleteHostedZone, + HTTPMethod: "DELETE", + HTTPPath: "/2013-04-01/hostedzone/{Id}", + } + + if input == nil { + input = &DeleteHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteHostedZoneOutput{} + req.Data = output + return +} + +// This action deletes a hosted zone. To delete a hosted zone, send a DELETE +// request to the /Route 53 API version/hostedzone/hosted zone ID resource. +// +// For more information about deleting a hosted zone, see Deleting a Hosted +// Zone (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DeleteHostedZone.html) +// in the Amazon Route 53 Developer Guide. +// +// You can delete a hosted zone only if there are no resource record sets +// other than the default SOA record and NS resource record sets. If your hosted +// zone contains other resource record sets, you must delete them before you +// can delete your hosted zone. If you try to delete a hosted zone that contains +// other resource record sets, Amazon Route 53 will deny your request with a +// HostedZoneNotEmpty error. For information about deleting records from your +// hosted zone, see ChangeResourceRecordSets. +func (c *Route53) DeleteHostedZone(input *DeleteHostedZoneInput) (*DeleteHostedZoneOutput, error) { + req, out := c.DeleteHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opDeleteReusableDelegationSet = "DeleteReusableDelegationSet" + +// DeleteReusableDelegationSetRequest generates a request for the DeleteReusableDelegationSet operation. +func (c *Route53) DeleteReusableDelegationSetRequest(input *DeleteReusableDelegationSetInput) (req *request.Request, output *DeleteReusableDelegationSetOutput) { + op := &request.Operation{ + Name: opDeleteReusableDelegationSet, + HTTPMethod: "DELETE", + HTTPPath: "/2013-04-01/delegationset/{Id}", + } + + if input == nil { + input = &DeleteReusableDelegationSetInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteReusableDelegationSetOutput{} + req.Data = output + return +} + +// This action deletes a reusable delegation set. To delete a reusable delegation +// set, send a DELETE request to the /Route 53 API version/delegationset/delegation +// set ID resource. +// +// You can delete a reusable delegation set only if there are no associated +// hosted zones. If your reusable delegation set contains associated hosted +// zones, you must delete them before you can delete your reusable delegation +// set. If you try to delete a reusable delegation set that contains associated +// hosted zones, Amazon Route 53 will deny your request with a DelegationSetInUse +// error. +func (c *Route53) DeleteReusableDelegationSet(input *DeleteReusableDelegationSetInput) (*DeleteReusableDelegationSetOutput, error) { + req, out := c.DeleteReusableDelegationSetRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTrafficPolicy = "DeleteTrafficPolicy" + +// DeleteTrafficPolicyRequest generates a request for the DeleteTrafficPolicy operation. +func (c *Route53) DeleteTrafficPolicyRequest(input *DeleteTrafficPolicyInput) (req *request.Request, output *DeleteTrafficPolicyOutput) { + op := &request.Operation{ + Name: opDeleteTrafficPolicy, + HTTPMethod: "DELETE", + HTTPPath: "/2013-04-01/trafficpolicy/{Id}/{Version}", + } + + if input == nil { + input = &DeleteTrafficPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTrafficPolicyOutput{} + req.Data = output + return +} + +// Deletes a traffic policy. To delete a traffic policy, send a DELETE request +// to the /Route 53 API version/trafficpolicy resource. +func (c *Route53) DeleteTrafficPolicy(input *DeleteTrafficPolicyInput) (*DeleteTrafficPolicyOutput, error) { + req, out := c.DeleteTrafficPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTrafficPolicyInstance = "DeleteTrafficPolicyInstance" + +// DeleteTrafficPolicyInstanceRequest generates a request for the DeleteTrafficPolicyInstance operation. +func (c *Route53) DeleteTrafficPolicyInstanceRequest(input *DeleteTrafficPolicyInstanceInput) (req *request.Request, output *DeleteTrafficPolicyInstanceOutput) { + op := &request.Operation{ + Name: opDeleteTrafficPolicyInstance, + HTTPMethod: "DELETE", + HTTPPath: "/2013-04-01/trafficpolicyinstance/{Id}", + } + + if input == nil { + input = &DeleteTrafficPolicyInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTrafficPolicyInstanceOutput{} + req.Data = output + return +} + +// Deletes a traffic policy instance and all of the resource record sets that +// Amazon Route 53 created when you created the instance. +// +// To delete a traffic policy instance, send a DELETE request to the /Route +// 53 API version/trafficpolicy/traffic policy instance ID resource. +// +// When you delete a traffic policy instance, Amazon Route 53 also deletes +// all of the resource record sets that were created when you created the traffic +// policy instance. +func (c *Route53) DeleteTrafficPolicyInstance(input *DeleteTrafficPolicyInstanceInput) (*DeleteTrafficPolicyInstanceOutput, error) { + req, out := c.DeleteTrafficPolicyInstanceRequest(input) + err := req.Send() + return out, err +} + +const opDisassociateVPCFromHostedZone = "DisassociateVPCFromHostedZone" + +// DisassociateVPCFromHostedZoneRequest generates a request for the DisassociateVPCFromHostedZone operation. +func (c *Route53) DisassociateVPCFromHostedZoneRequest(input *DisassociateVPCFromHostedZoneInput) (req *request.Request, output *DisassociateVPCFromHostedZoneOutput) { + op := &request.Operation{ + Name: opDisassociateVPCFromHostedZone, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/hostedzone/{Id}/disassociatevpc", + } + + if input == nil { + input = &DisassociateVPCFromHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &DisassociateVPCFromHostedZoneOutput{} + req.Data = output + return +} + +// This action disassociates a VPC from an hosted zone. +// +// To disassociate a VPC to a hosted zone, send a POST request to the /Route +// 53 API version/hostedzone/hosted zone ID/disassociatevpc resource. The request +// body must include a document with a DisassociateVPCFromHostedZoneRequest +// element. The response returns the DisassociateVPCFromHostedZoneResponse element +// that contains ChangeInfo for you to track the progress of the DisassociateVPCFromHostedZoneRequest +// you made. See GetChange operation for how to track the progress of your change. +func (c *Route53) DisassociateVPCFromHostedZone(input *DisassociateVPCFromHostedZoneInput) (*DisassociateVPCFromHostedZoneOutput, error) { + req, out := c.DisassociateVPCFromHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opGetChange = "GetChange" + +// GetChangeRequest generates a request for the GetChange operation. +func (c *Route53) GetChangeRequest(input *GetChangeInput) (req *request.Request, output *GetChangeOutput) { + op := &request.Operation{ + Name: opGetChange, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/change/{Id}", + } + + if input == nil { + input = &GetChangeInput{} + } + + req = c.newRequest(op, input, output) + output = &GetChangeOutput{} + req.Data = output + return +} + +// This action returns the current status of a change batch request. The status +// is one of the following values: +// +// - PENDING indicates that the changes in this request have not replicated +// to all Amazon Route 53 DNS servers. This is the initial status of all change +// batch requests. +// +// - INSYNC indicates that the changes have replicated to all Amazon Route +// 53 DNS servers. +func (c *Route53) GetChange(input *GetChangeInput) (*GetChangeOutput, error) { + req, out := c.GetChangeRequest(input) + err := req.Send() + return out, err +} + +const opGetChangeDetails = "GetChangeDetails" + +// GetChangeDetailsRequest generates a request for the GetChangeDetails operation. +func (c *Route53) GetChangeDetailsRequest(input *GetChangeDetailsInput) (req *request.Request, output *GetChangeDetailsOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, GetChangeDetails, has been deprecated") + } + op := &request.Operation{ + Name: opGetChangeDetails, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/changedetails/{Id}", + } + + if input == nil { + input = &GetChangeDetailsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetChangeDetailsOutput{} + req.Data = output + return +} + +// This action returns the status and changes of a change batch request. +func (c *Route53) GetChangeDetails(input *GetChangeDetailsInput) (*GetChangeDetailsOutput, error) { + req, out := c.GetChangeDetailsRequest(input) + err := req.Send() + return out, err +} + +const opGetCheckerIpRanges = "GetCheckerIpRanges" + +// GetCheckerIpRangesRequest generates a request for the GetCheckerIpRanges operation. +func (c *Route53) GetCheckerIpRangesRequest(input *GetCheckerIpRangesInput) (req *request.Request, output *GetCheckerIpRangesOutput) { + op := &request.Operation{ + Name: opGetCheckerIpRanges, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/checkeripranges", + } + + if input == nil { + input = &GetCheckerIpRangesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetCheckerIpRangesOutput{} + req.Data = output + return +} + +// To retrieve a list of the IP ranges used by Amazon Route 53 health checkers +// to check the health of your resources, send a GET request to the /Route 53 +// API version/checkeripranges resource. You can use these IP addresses to configure +// router and firewall rules to allow health checkers to check the health of +// your resources. +func (c *Route53) GetCheckerIpRanges(input *GetCheckerIpRangesInput) (*GetCheckerIpRangesOutput, error) { + req, out := c.GetCheckerIpRangesRequest(input) + err := req.Send() + return out, err +} + +const opGetGeoLocation = "GetGeoLocation" + +// GetGeoLocationRequest generates a request for the GetGeoLocation operation. +func (c *Route53) GetGeoLocationRequest(input *GetGeoLocationInput) (req *request.Request, output *GetGeoLocationOutput) { + op := &request.Operation{ + Name: opGetGeoLocation, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/geolocation", + } + + if input == nil { + input = &GetGeoLocationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetGeoLocationOutput{} + req.Data = output + return +} + +// To retrieve a single geo location, send a GET request to the /Route 53 API +// version/geolocation resource with one of these options: continentcode | countrycode +// | countrycode and subdivisioncode. +func (c *Route53) GetGeoLocation(input *GetGeoLocationInput) (*GetGeoLocationOutput, error) { + req, out := c.GetGeoLocationRequest(input) + err := req.Send() + return out, err +} + +const opGetHealthCheck = "GetHealthCheck" + +// GetHealthCheckRequest generates a request for the GetHealthCheck operation. +func (c *Route53) GetHealthCheckRequest(input *GetHealthCheckInput) (req *request.Request, output *GetHealthCheckOutput) { + op := &request.Operation{ + Name: opGetHealthCheck, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/healthcheck/{HealthCheckId}", + } + + if input == nil { + input = &GetHealthCheckInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHealthCheckOutput{} + req.Data = output + return +} + +// To retrieve the health check, send a GET request to the /Route 53 API version/healthcheck/health +// check ID resource. +func (c *Route53) GetHealthCheck(input *GetHealthCheckInput) (*GetHealthCheckOutput, error) { + req, out := c.GetHealthCheckRequest(input) + err := req.Send() + return out, err +} + +const opGetHealthCheckCount = "GetHealthCheckCount" + +// GetHealthCheckCountRequest generates a request for the GetHealthCheckCount operation. +func (c *Route53) GetHealthCheckCountRequest(input *GetHealthCheckCountInput) (req *request.Request, output *GetHealthCheckCountOutput) { + op := &request.Operation{ + Name: opGetHealthCheckCount, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/healthcheckcount", + } + + if input == nil { + input = &GetHealthCheckCountInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHealthCheckCountOutput{} + req.Data = output + return +} + +// To retrieve a count of all your health checks, send a GET request to the +// /Route 53 API version/healthcheckcount resource. +func (c *Route53) GetHealthCheckCount(input *GetHealthCheckCountInput) (*GetHealthCheckCountOutput, error) { + req, out := c.GetHealthCheckCountRequest(input) + err := req.Send() + return out, err +} + +const opGetHealthCheckLastFailureReason = "GetHealthCheckLastFailureReason" + +// GetHealthCheckLastFailureReasonRequest generates a request for the GetHealthCheckLastFailureReason operation. +func (c *Route53) GetHealthCheckLastFailureReasonRequest(input *GetHealthCheckLastFailureReasonInput) (req *request.Request, output *GetHealthCheckLastFailureReasonOutput) { + op := &request.Operation{ + Name: opGetHealthCheckLastFailureReason, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/healthcheck/{HealthCheckId}/lastfailurereason", + } + + if input == nil { + input = &GetHealthCheckLastFailureReasonInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHealthCheckLastFailureReasonOutput{} + req.Data = output + return +} + +// If you want to learn why a health check is currently failing or why it failed +// most recently (if at all), you can get the failure reason for the most recent +// failure. Send a GET request to the /Route 53 API version/healthcheck/health +// check ID/lastfailurereason resource. +func (c *Route53) GetHealthCheckLastFailureReason(input *GetHealthCheckLastFailureReasonInput) (*GetHealthCheckLastFailureReasonOutput, error) { + req, out := c.GetHealthCheckLastFailureReasonRequest(input) + err := req.Send() + return out, err +} + +const opGetHealthCheckStatus = "GetHealthCheckStatus" + +// GetHealthCheckStatusRequest generates a request for the GetHealthCheckStatus operation. +func (c *Route53) GetHealthCheckStatusRequest(input *GetHealthCheckStatusInput) (req *request.Request, output *GetHealthCheckStatusOutput) { + op := &request.Operation{ + Name: opGetHealthCheckStatus, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/healthcheck/{HealthCheckId}/status", + } + + if input == nil { + input = &GetHealthCheckStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHealthCheckStatusOutput{} + req.Data = output + return +} + +// To retrieve the health check status, send a GET request to the /Route 53 +// API version/healthcheck/health check ID/status resource. You can use this +// call to get a health check's current status. +func (c *Route53) GetHealthCheckStatus(input *GetHealthCheckStatusInput) (*GetHealthCheckStatusOutput, error) { + req, out := c.GetHealthCheckStatusRequest(input) + err := req.Send() + return out, err +} + +const opGetHostedZone = "GetHostedZone" + +// GetHostedZoneRequest generates a request for the GetHostedZone operation. +func (c *Route53) GetHostedZoneRequest(input *GetHostedZoneInput) (req *request.Request, output *GetHostedZoneOutput) { + op := &request.Operation{ + Name: opGetHostedZone, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzone/{Id}", + } + + if input == nil { + input = &GetHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHostedZoneOutput{} + req.Data = output + return +} + +// To retrieve the delegation set for a hosted zone, send a GET request to the +// /Route 53 API version/hostedzone/hosted zone ID resource. The delegation +// set is the four Amazon Route 53 name servers that were assigned to the hosted +// zone when you created it. +func (c *Route53) GetHostedZone(input *GetHostedZoneInput) (*GetHostedZoneOutput, error) { + req, out := c.GetHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opGetHostedZoneCount = "GetHostedZoneCount" + +// GetHostedZoneCountRequest generates a request for the GetHostedZoneCount operation. +func (c *Route53) GetHostedZoneCountRequest(input *GetHostedZoneCountInput) (req *request.Request, output *GetHostedZoneCountOutput) { + op := &request.Operation{ + Name: opGetHostedZoneCount, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzonecount", + } + + if input == nil { + input = &GetHostedZoneCountInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHostedZoneCountOutput{} + req.Data = output + return +} + +// To retrieve a count of all your hosted zones, send a GET request to the /Route +// 53 API version/hostedzonecount resource. +func (c *Route53) GetHostedZoneCount(input *GetHostedZoneCountInput) (*GetHostedZoneCountOutput, error) { + req, out := c.GetHostedZoneCountRequest(input) + err := req.Send() + return out, err +} + +const opGetReusableDelegationSet = "GetReusableDelegationSet" + +// GetReusableDelegationSetRequest generates a request for the GetReusableDelegationSet operation. +func (c *Route53) GetReusableDelegationSetRequest(input *GetReusableDelegationSetInput) (req *request.Request, output *GetReusableDelegationSetOutput) { + op := &request.Operation{ + Name: opGetReusableDelegationSet, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/delegationset/{Id}", + } + + if input == nil { + input = &GetReusableDelegationSetInput{} + } + + req = c.newRequest(op, input, output) + output = &GetReusableDelegationSetOutput{} + req.Data = output + return +} + +// To retrieve the reusable delegation set, send a GET request to the /Route +// 53 API version/delegationset/delegation set ID resource. +func (c *Route53) GetReusableDelegationSet(input *GetReusableDelegationSetInput) (*GetReusableDelegationSetOutput, error) { + req, out := c.GetReusableDelegationSetRequest(input) + err := req.Send() + return out, err +} + +const opGetTrafficPolicy = "GetTrafficPolicy" + +// GetTrafficPolicyRequest generates a request for the GetTrafficPolicy operation. +func (c *Route53) GetTrafficPolicyRequest(input *GetTrafficPolicyInput) (req *request.Request, output *GetTrafficPolicyOutput) { + op := &request.Operation{ + Name: opGetTrafficPolicy, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicy/{Id}/{Version}", + } + + if input == nil { + input = &GetTrafficPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTrafficPolicyOutput{} + req.Data = output + return +} + +// Gets information about a specific traffic policy version. To get the information, +// send a GET request to the /Route 53 API version/trafficpolicy resource. +func (c *Route53) GetTrafficPolicy(input *GetTrafficPolicyInput) (*GetTrafficPolicyOutput, error) { + req, out := c.GetTrafficPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetTrafficPolicyInstance = "GetTrafficPolicyInstance" + +// GetTrafficPolicyInstanceRequest generates a request for the GetTrafficPolicyInstance operation. +func (c *Route53) GetTrafficPolicyInstanceRequest(input *GetTrafficPolicyInstanceInput) (req *request.Request, output *GetTrafficPolicyInstanceOutput) { + op := &request.Operation{ + Name: opGetTrafficPolicyInstance, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicyinstance/{Id}", + } + + if input == nil { + input = &GetTrafficPolicyInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTrafficPolicyInstanceOutput{} + req.Data = output + return +} + +// Gets information about a specified traffic policy instance. +// +// To get information about the traffic policy instance, send a GET request +// to the /Route 53 API version/trafficpolicyinstance resource. +// +// After you submit a CreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance +// request, there's a brief delay while Amazon Route 53 creates the resource +// record sets that are specified in the traffic policy definition. For more +// information, see the State response element. +func (c *Route53) GetTrafficPolicyInstance(input *GetTrafficPolicyInstanceInput) (*GetTrafficPolicyInstanceOutput, error) { + req, out := c.GetTrafficPolicyInstanceRequest(input) + err := req.Send() + return out, err +} + +const opGetTrafficPolicyInstanceCount = "GetTrafficPolicyInstanceCount" + +// GetTrafficPolicyInstanceCountRequest generates a request for the GetTrafficPolicyInstanceCount operation. +func (c *Route53) GetTrafficPolicyInstanceCountRequest(input *GetTrafficPolicyInstanceCountInput) (req *request.Request, output *GetTrafficPolicyInstanceCountOutput) { + op := &request.Operation{ + Name: opGetTrafficPolicyInstanceCount, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicyinstancecount", + } + + if input == nil { + input = &GetTrafficPolicyInstanceCountInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTrafficPolicyInstanceCountOutput{} + req.Data = output + return +} + +// Gets the number of traffic policy instances that are associated with the +// current AWS account. +// +// To get the number of traffic policy instances, send a GET request to the +// /Route 53 API version/trafficpolicyinstancecount resource. +func (c *Route53) GetTrafficPolicyInstanceCount(input *GetTrafficPolicyInstanceCountInput) (*GetTrafficPolicyInstanceCountOutput, error) { + req, out := c.GetTrafficPolicyInstanceCountRequest(input) + err := req.Send() + return out, err +} + +const opListChangeBatchesByHostedZone = "ListChangeBatchesByHostedZone" + +// ListChangeBatchesByHostedZoneRequest generates a request for the ListChangeBatchesByHostedZone operation. +func (c *Route53) ListChangeBatchesByHostedZoneRequest(input *ListChangeBatchesByHostedZoneInput) (req *request.Request, output *ListChangeBatchesByHostedZoneOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, ListChangeBatchesByHostedZone, has been deprecated") + } + op := &request.Operation{ + Name: opListChangeBatchesByHostedZone, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzone/{Id}/changes", + } + + if input == nil { + input = &ListChangeBatchesByHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &ListChangeBatchesByHostedZoneOutput{} + req.Data = output + return +} + +// This action gets the list of ChangeBatches in a given time period for a given +// hosted zone. +func (c *Route53) ListChangeBatchesByHostedZone(input *ListChangeBatchesByHostedZoneInput) (*ListChangeBatchesByHostedZoneOutput, error) { + req, out := c.ListChangeBatchesByHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opListChangeBatchesByRRSet = "ListChangeBatchesByRRSet" + +// ListChangeBatchesByRRSetRequest generates a request for the ListChangeBatchesByRRSet operation. +func (c *Route53) ListChangeBatchesByRRSetRequest(input *ListChangeBatchesByRRSetInput) (req *request.Request, output *ListChangeBatchesByRRSetOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, ListChangeBatchesByRRSet, has been deprecated") + } + op := &request.Operation{ + Name: opListChangeBatchesByRRSet, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzone/{Id}/rrsChanges", + } + + if input == nil { + input = &ListChangeBatchesByRRSetInput{} + } + + req = c.newRequest(op, input, output) + output = &ListChangeBatchesByRRSetOutput{} + req.Data = output + return +} + +// This action gets the list of ChangeBatches in a given time period for a given +// hosted zone and RRSet. +func (c *Route53) ListChangeBatchesByRRSet(input *ListChangeBatchesByRRSetInput) (*ListChangeBatchesByRRSetOutput, error) { + req, out := c.ListChangeBatchesByRRSetRequest(input) + err := req.Send() + return out, err +} + +const opListGeoLocations = "ListGeoLocations" + +// ListGeoLocationsRequest generates a request for the ListGeoLocations operation. +func (c *Route53) ListGeoLocationsRequest(input *ListGeoLocationsInput) (req *request.Request, output *ListGeoLocationsOutput) { + op := &request.Operation{ + Name: opListGeoLocations, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/geolocations", + } + + if input == nil { + input = &ListGeoLocationsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListGeoLocationsOutput{} + req.Data = output + return +} + +// To retrieve a list of supported geo locations, send a GET request to the +// /Route 53 API version/geolocations resource. The response to this request +// includes a GeoLocationDetailsList element with zero, one, or multiple GeoLocationDetails +// child elements. The list is sorted by country code, and then subdivision +// code, followed by continents at the end of the list. +// +// By default, the list of geo locations is displayed on a single page. You +// can control the length of the page that is displayed by using the MaxItems +// parameter. If the list is truncated, IsTruncated will be set to true and +// a combination of NextContinentCode, NextCountryCode, NextSubdivisionCode +// will be populated. You can pass these as parameters to StartContinentCode, +// StartCountryCode, StartSubdivisionCode to control the geo location that the +// list begins with. +func (c *Route53) ListGeoLocations(input *ListGeoLocationsInput) (*ListGeoLocationsOutput, error) { + req, out := c.ListGeoLocationsRequest(input) + err := req.Send() + return out, err +} + +const opListHealthChecks = "ListHealthChecks" + +// ListHealthChecksRequest generates a request for the ListHealthChecks operation. +func (c *Route53) ListHealthChecksRequest(input *ListHealthChecksInput) (req *request.Request, output *ListHealthChecksOutput) { + op := &request.Operation{ + Name: opListHealthChecks, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/healthcheck", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListHealthChecksInput{} + } + + req = c.newRequest(op, input, output) + output = &ListHealthChecksOutput{} + req.Data = output + return +} + +// To retrieve a list of your health checks, send a GET request to the /Route +// 53 API version/healthcheck resource. The response to this request includes +// a HealthChecks element with zero, one, or multiple HealthCheck child elements. +// By default, the list of health checks is displayed on a single page. You +// can control the length of the page that is displayed by using the MaxItems +// parameter. You can use the Marker parameter to control the health check that +// the list begins with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +func (c *Route53) ListHealthChecks(input *ListHealthChecksInput) (*ListHealthChecksOutput, error) { + req, out := c.ListHealthChecksRequest(input) + err := req.Send() + return out, err +} + +func (c *Route53) ListHealthChecksPages(input *ListHealthChecksInput, fn func(p *ListHealthChecksOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListHealthChecksRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListHealthChecksOutput), lastPage) + }) +} + +const opListHostedZones = "ListHostedZones" + +// ListHostedZonesRequest generates a request for the ListHostedZones operation. +func (c *Route53) ListHostedZonesRequest(input *ListHostedZonesInput) (req *request.Request, output *ListHostedZonesOutput) { + op := &request.Operation{ + Name: opListHostedZones, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzone", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListHostedZonesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListHostedZonesOutput{} + req.Data = output + return +} + +// To retrieve a list of your hosted zones, send a GET request to the /Route +// 53 API version/hostedzone resource. The response to this request includes +// a HostedZones element with zero, one, or multiple HostedZone child elements. +// By default, the list of hosted zones is displayed on a single page. You can +// control the length of the page that is displayed by using the MaxItems parameter. +// You can use the Marker parameter to control the hosted zone that the list +// begins with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +func (c *Route53) ListHostedZones(input *ListHostedZonesInput) (*ListHostedZonesOutput, error) { + req, out := c.ListHostedZonesRequest(input) + err := req.Send() + return out, err +} + +func (c *Route53) ListHostedZonesPages(input *ListHostedZonesInput, fn func(p *ListHostedZonesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListHostedZonesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListHostedZonesOutput), lastPage) + }) +} + +const opListHostedZonesByName = "ListHostedZonesByName" + +// ListHostedZonesByNameRequest generates a request for the ListHostedZonesByName operation. +func (c *Route53) ListHostedZonesByNameRequest(input *ListHostedZonesByNameInput) (req *request.Request, output *ListHostedZonesByNameOutput) { + op := &request.Operation{ + Name: opListHostedZonesByName, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzonesbyname", + } + + if input == nil { + input = &ListHostedZonesByNameInput{} + } + + req = c.newRequest(op, input, output) + output = &ListHostedZonesByNameOutput{} + req.Data = output + return +} + +// To retrieve a list of your hosted zones in lexicographic order, send a GET +// request to the /Route 53 API version/hostedzonesbyname resource. The response +// to this request includes a HostedZones element with zero or more HostedZone +// child elements lexicographically ordered by DNS name. By default, the list +// of hosted zones is displayed on a single page. You can control the length +// of the page that is displayed by using the MaxItems parameter. You can use +// the DNSName and HostedZoneId parameters to control the hosted zone that the +// list begins with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +func (c *Route53) ListHostedZonesByName(input *ListHostedZonesByNameInput) (*ListHostedZonesByNameOutput, error) { + req, out := c.ListHostedZonesByNameRequest(input) + err := req.Send() + return out, err +} + +const opListResourceRecordSets = "ListResourceRecordSets" + +// ListResourceRecordSetsRequest generates a request for the ListResourceRecordSets operation. +func (c *Route53) ListResourceRecordSetsRequest(input *ListResourceRecordSetsInput) (req *request.Request, output *ListResourceRecordSetsOutput) { + op := &request.Operation{ + Name: opListResourceRecordSets, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzone/{Id}/rrset", + Paginator: &request.Paginator{ + InputTokens: []string{"StartRecordName", "StartRecordType", "StartRecordIdentifier"}, + OutputTokens: []string{"NextRecordName", "NextRecordType", "NextRecordIdentifier"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListResourceRecordSetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListResourceRecordSetsOutput{} + req.Data = output + return +} + +// Imagine all the resource record sets in a zone listed out in front of you. +// Imagine them sorted lexicographically first by DNS name (with the labels +// reversed, like "com.amazon.www" for example), and secondarily, lexicographically +// by record type. This operation retrieves at most MaxItems resource record +// sets from this list, in order, starting at a position specified by the Name +// and Type arguments: +// +// If both Name and Type are omitted, this means start the results at the +// first RRSET in the HostedZone. If Name is specified but Type is omitted, +// this means start the results at the first RRSET in the list whose name is +// greater than or equal to Name. If both Name and Type are specified, this +// means start the results at the first RRSET in the list whose name is greater +// than or equal to Name and whose type is greater than or equal to Type. It +// is an error to specify the Type but not the Name. Use ListResourceRecordSets +// to retrieve a single known record set by specifying the record set's name +// and type, and setting MaxItems = 1 +// +// To retrieve all the records in a HostedZone, first pause any processes making +// calls to ChangeResourceRecordSets. Initially call ListResourceRecordSets +// without a Name and Type to get the first page of record sets. For subsequent +// calls, set Name and Type to the NextName and NextType values returned by +// the previous response. +// +// In the presence of concurrent ChangeResourceRecordSets calls, there is no +// consistency of results across calls to ListResourceRecordSets. The only way +// to get a consistent multi-page snapshot of all RRSETs in a zone is to stop +// making changes while pagination is in progress. +// +// However, the results from ListResourceRecordSets are consistent within a +// page. If MakeChange calls are taking place concurrently, the result of each +// one will either be completely visible in your results or not at all. You +// will not see partial changes, or changes that do not ultimately succeed. +// (This follows from the fact that MakeChange is atomic) +// +// The results from ListResourceRecordSets are strongly consistent with ChangeResourceRecordSets. +// To be precise, if a single process makes a call to ChangeResourceRecordSets +// and receives a successful response, the effects of that change will be visible +// in a subsequent call to ListResourceRecordSets by that process. +func (c *Route53) ListResourceRecordSets(input *ListResourceRecordSetsInput) (*ListResourceRecordSetsOutput, error) { + req, out := c.ListResourceRecordSetsRequest(input) + err := req.Send() + return out, err +} + +func (c *Route53) ListResourceRecordSetsPages(input *ListResourceRecordSetsInput, fn func(p *ListResourceRecordSetsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListResourceRecordSetsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListResourceRecordSetsOutput), lastPage) + }) +} + +const opListReusableDelegationSets = "ListReusableDelegationSets" + +// ListReusableDelegationSetsRequest generates a request for the ListReusableDelegationSets operation. +func (c *Route53) ListReusableDelegationSetsRequest(input *ListReusableDelegationSetsInput) (req *request.Request, output *ListReusableDelegationSetsOutput) { + op := &request.Operation{ + Name: opListReusableDelegationSets, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/delegationset", + } + + if input == nil { + input = &ListReusableDelegationSetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListReusableDelegationSetsOutput{} + req.Data = output + return +} + +// To retrieve a list of your reusable delegation sets, send a GET request to +// the /Route 53 API version/delegationset resource. The response to this request +// includes a DelegationSets element with zero, one, or multiple DelegationSet +// child elements. By default, the list of delegation sets is displayed on a +// single page. You can control the length of the page that is displayed by +// using the MaxItems parameter. You can use the Marker parameter to control +// the delegation set that the list begins with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +func (c *Route53) ListReusableDelegationSets(input *ListReusableDelegationSetsInput) (*ListReusableDelegationSetsOutput, error) { + req, out := c.ListReusableDelegationSetsRequest(input) + err := req.Send() + return out, err +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a request for the ListTagsForResource operation. +func (c *Route53) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/tags/{ResourceType}/{ResourceId}", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForResourceOutput{} + req.Data = output + return +} + +func (c *Route53) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + err := req.Send() + return out, err +} + +const opListTagsForResources = "ListTagsForResources" + +// ListTagsForResourcesRequest generates a request for the ListTagsForResources operation. +func (c *Route53) ListTagsForResourcesRequest(input *ListTagsForResourcesInput) (req *request.Request, output *ListTagsForResourcesOutput) { + op := &request.Operation{ + Name: opListTagsForResources, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/tags/{ResourceType}", + } + + if input == nil { + input = &ListTagsForResourcesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForResourcesOutput{} + req.Data = output + return +} + +func (c *Route53) ListTagsForResources(input *ListTagsForResourcesInput) (*ListTagsForResourcesOutput, error) { + req, out := c.ListTagsForResourcesRequest(input) + err := req.Send() + return out, err +} + +const opListTrafficPolicies = "ListTrafficPolicies" + +// ListTrafficPoliciesRequest generates a request for the ListTrafficPolicies operation. +func (c *Route53) ListTrafficPoliciesRequest(input *ListTrafficPoliciesInput) (req *request.Request, output *ListTrafficPoliciesOutput) { + op := &request.Operation{ + Name: opListTrafficPolicies, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicies", + } + + if input == nil { + input = &ListTrafficPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTrafficPoliciesOutput{} + req.Data = output + return +} + +// Gets information about the latest version for every traffic policy that is +// associated with the current AWS account. To get the information, send a GET +// request to the /Route 53 API version/trafficpolicy resource. +// +// Amazon Route 53 returns a maximum of 100 items in each response. If you +// have a lot of traffic policies, you can use the maxitems parameter to list +// them in groups of up to 100. +// +// The response includes three values that help you navigate from one group +// of maxitems traffic policies to the next: +// +// IsTruncated If the value of IsTruncated in the response is true, there +// are more traffic policies associated with the current AWS account. +// +// If IsTruncated is false, this response includes the last traffic policy +// that is associated with the current account. +// +// TrafficPolicyIdMarker If IsTruncated is true, TrafficPolicyIdMarker is the +// ID of the first traffic policy in the next group of MaxItems traffic policies. +// If you want to list more traffic policies, make another call to ListTrafficPolicies, +// and specify the value of the TrafficPolicyIdMarker element from the response +// in the TrafficPolicyIdMarker request parameter. +// +// If IsTruncated is false, the TrafficPolicyIdMarker element is omitted from +// the response. +// +// MaxItems The value that you specified for the MaxItems parameter in the +// request that produced the current response. +func (c *Route53) ListTrafficPolicies(input *ListTrafficPoliciesInput) (*ListTrafficPoliciesOutput, error) { + req, out := c.ListTrafficPoliciesRequest(input) + err := req.Send() + return out, err +} + +const opListTrafficPolicyInstances = "ListTrafficPolicyInstances" + +// ListTrafficPolicyInstancesRequest generates a request for the ListTrafficPolicyInstances operation. +func (c *Route53) ListTrafficPolicyInstancesRequest(input *ListTrafficPolicyInstancesInput) (req *request.Request, output *ListTrafficPolicyInstancesOutput) { + op := &request.Operation{ + Name: opListTrafficPolicyInstances, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicyinstances", + } + + if input == nil { + input = &ListTrafficPolicyInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTrafficPolicyInstancesOutput{} + req.Data = output + return +} + +// Gets information about the traffic policy instances that you created by using +// the current AWS account. +// +// After you submit an UpdateTrafficPolicyInstance request, there's a brief +// delay while Amazon Route 53 creates the resource record sets that are specified +// in the traffic policy definition. For more information, see the State response +// element. To get information about the traffic policy instances that are associated +// with the current AWS account, send a GET request to the /Route 53 API version/trafficpolicyinstance +// resource. +// +// Amazon Route 53 returns a maximum of 100 items in each response. If you +// have a lot of traffic policy instances, you can use the MaxItems parameter +// to list them in groups of up to 100. +// +// The response includes five values that help you navigate from one group +// of MaxItems traffic policy instances to the next: +// +// IsTruncated If the value of IsTruncated in the response is true, there +// are more traffic policy instances associated with the current AWS account. +// +// If IsTruncated is false, this response includes the last traffic policy +// instance that is associated with the current account. +// +// MaxItems The value that you specified for the MaxItems parameter in the +// request that produced the current response. +// +// HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker +// If IsTruncated is true, these three values in the response represent the +// first traffic policy instance in the next group of MaxItems traffic policy +// instances. To list more traffic policy instances, make another call to ListTrafficPolicyInstances, +// and specify these values in the corresponding request parameters. +// +// If IsTruncated is false, all three elements are omitted from the response. +func (c *Route53) ListTrafficPolicyInstances(input *ListTrafficPolicyInstancesInput) (*ListTrafficPolicyInstancesOutput, error) { + req, out := c.ListTrafficPolicyInstancesRequest(input) + err := req.Send() + return out, err +} + +const opListTrafficPolicyInstancesByHostedZone = "ListTrafficPolicyInstancesByHostedZone" + +// ListTrafficPolicyInstancesByHostedZoneRequest generates a request for the ListTrafficPolicyInstancesByHostedZone operation. +func (c *Route53) ListTrafficPolicyInstancesByHostedZoneRequest(input *ListTrafficPolicyInstancesByHostedZoneInput) (req *request.Request, output *ListTrafficPolicyInstancesByHostedZoneOutput) { + op := &request.Operation{ + Name: opListTrafficPolicyInstancesByHostedZone, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicyinstances/hostedzone", + } + + if input == nil { + input = &ListTrafficPolicyInstancesByHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTrafficPolicyInstancesByHostedZoneOutput{} + req.Data = output + return +} + +// Gets information about the traffic policy instances that you created in a +// specified hosted zone. +// +// After you submit an UpdateTrafficPolicyInstance request, there's a brief +// delay while Amazon Route 53 creates the resource record sets that are specified +// in the traffic policy definition. For more information, see the State response +// element. To get information about the traffic policy instances that you created +// in a specified hosted zone, send a GET request to the /Route 53 API version/trafficpolicyinstance +// resource and include the ID of the hosted zone. +// +// Amazon Route 53 returns a maximum of 100 items in each response. If you +// have a lot of traffic policy instances, you can use the MaxItems parameter +// to list them in groups of up to 100. +// +// The response includes four values that help you navigate from one group +// of MaxItems traffic policy instances to the next: +// +// IsTruncated If the value of IsTruncated in the response is true, there +// are more traffic policy instances associated with the current AWS account. +// +// If IsTruncated is false, this response includes the last traffic policy +// instance that is associated with the current account. +// +// MaxItems The value that you specified for the MaxItems parameter in the +// request that produced the current response. +// +// TrafficPolicyInstanceNameMarker and TrafficPolicyInstanceTypeMarker If IsTruncated +// is true, these two values in the response represent the first traffic policy +// instance in the next group of MaxItems traffic policy instances. To list +// more traffic policy instances, make another call to ListTrafficPolicyInstancesByHostedZone, +// and specify these values in the corresponding request parameters. +// +// If IsTruncated is false, all three elements are omitted from the response. +func (c *Route53) ListTrafficPolicyInstancesByHostedZone(input *ListTrafficPolicyInstancesByHostedZoneInput) (*ListTrafficPolicyInstancesByHostedZoneOutput, error) { + req, out := c.ListTrafficPolicyInstancesByHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opListTrafficPolicyInstancesByPolicy = "ListTrafficPolicyInstancesByPolicy" + +// ListTrafficPolicyInstancesByPolicyRequest generates a request for the ListTrafficPolicyInstancesByPolicy operation. +func (c *Route53) ListTrafficPolicyInstancesByPolicyRequest(input *ListTrafficPolicyInstancesByPolicyInput) (req *request.Request, output *ListTrafficPolicyInstancesByPolicyOutput) { + op := &request.Operation{ + Name: opListTrafficPolicyInstancesByPolicy, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicyinstances/trafficpolicy", + } + + if input == nil { + input = &ListTrafficPolicyInstancesByPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTrafficPolicyInstancesByPolicyOutput{} + req.Data = output + return +} + +// Gets information about the traffic policy instances that you created by using +// a specify traffic policy version. +// +// After you submit a CreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance +// request, there's a brief delay while Amazon Route 53 creates the resource +// record sets that are specified in the traffic policy definition. For more +// information, see the State response element. To get information about the +// traffic policy instances that you created by using a specify traffic policy +// version, send a GET request to the /Route 53 API version/trafficpolicyinstance +// resource and include the ID and version of the traffic policy. +// +// Amazon Route 53 returns a maximum of 100 items in each response. If you +// have a lot of traffic policy instances, you can use the MaxItems parameter +// to list them in groups of up to 100. +// +// The response includes five values that help you navigate from one group +// of MaxItems traffic policy instances to the next: +// +// IsTruncated If the value of IsTruncated in the response is true, there +// are more traffic policy instances associated with the specified traffic policy. +// +// If IsTruncated is false, this response includes the last traffic policy +// instance that is associated with the specified traffic policy. +// +// MaxItems The value that you specified for the MaxItems parameter in the +// request that produced the current response. +// +// HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker +// If IsTruncated is true, these values in the response represent the first +// traffic policy instance in the next group of MaxItems traffic policy instances. +// To list more traffic policy instances, make another call to ListTrafficPolicyInstancesByPolicy, +// and specify these values in the corresponding request parameters. +// +// If IsTruncated is false, all three elements are omitted from the response. +func (c *Route53) ListTrafficPolicyInstancesByPolicy(input *ListTrafficPolicyInstancesByPolicyInput) (*ListTrafficPolicyInstancesByPolicyOutput, error) { + req, out := c.ListTrafficPolicyInstancesByPolicyRequest(input) + err := req.Send() + return out, err +} + +const opListTrafficPolicyVersions = "ListTrafficPolicyVersions" + +// ListTrafficPolicyVersionsRequest generates a request for the ListTrafficPolicyVersions operation. +func (c *Route53) ListTrafficPolicyVersionsRequest(input *ListTrafficPolicyVersionsInput) (req *request.Request, output *ListTrafficPolicyVersionsOutput) { + op := &request.Operation{ + Name: opListTrafficPolicyVersions, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicies/{Id}/versions", + } + + if input == nil { + input = &ListTrafficPolicyVersionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTrafficPolicyVersionsOutput{} + req.Data = output + return +} + +// Gets information about all of the versions for a specified traffic policy. +// ListTrafficPolicyVersions lists only versions that have not been deleted. +// +// Amazon Route 53 returns a maximum of 100 items in each response. If you +// have a lot of traffic policies, you can use the maxitems parameter to list +// them in groups of up to 100. +// +// The response includes three values that help you navigate from one group +// of maxitemsmaxitems traffic policies to the next: +// +// IsTruncated If the value of IsTruncated in the response is true, there +// are more traffic policy versions associated with the specified traffic policy. +// +// If IsTruncated is false, this response includes the last traffic policy +// version that is associated with the specified traffic policy. +// +// TrafficPolicyVersionMarker The ID of the next traffic policy version that +// is associated with the current AWS account. If you want to list more traffic +// policies, make another call to ListTrafficPolicyVersions, and specify the +// value of the TrafficPolicyVersionMarker element in the TrafficPolicyVersionMarker +// request parameter. +// +// If IsTruncated is false, Amazon Route 53 omits the TrafficPolicyVersionMarker +// element from the response. +// +// MaxItems The value that you specified for the MaxItems parameter in the +// request that produced the current response. +func (c *Route53) ListTrafficPolicyVersions(input *ListTrafficPolicyVersionsInput) (*ListTrafficPolicyVersionsOutput, error) { + req, out := c.ListTrafficPolicyVersionsRequest(input) + err := req.Send() + return out, err +} + +const opUpdateHealthCheck = "UpdateHealthCheck" + +// UpdateHealthCheckRequest generates a request for the UpdateHealthCheck operation. +func (c *Route53) UpdateHealthCheckRequest(input *UpdateHealthCheckInput) (req *request.Request, output *UpdateHealthCheckOutput) { + op := &request.Operation{ + Name: opUpdateHealthCheck, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/healthcheck/{HealthCheckId}", + } + + if input == nil { + input = &UpdateHealthCheckInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateHealthCheckOutput{} + req.Data = output + return +} + +// This action updates an existing health check. +// +// To update a health check, send a POST request to the /Route 53 API version/healthcheck/health +// check ID resource. The request body must include a document with an UpdateHealthCheckRequest +// element. The response returns an UpdateHealthCheckResponse element, which +// contains metadata about the health check. +func (c *Route53) UpdateHealthCheck(input *UpdateHealthCheckInput) (*UpdateHealthCheckOutput, error) { + req, out := c.UpdateHealthCheckRequest(input) + err := req.Send() + return out, err +} + +const opUpdateHostedZoneComment = "UpdateHostedZoneComment" + +// UpdateHostedZoneCommentRequest generates a request for the UpdateHostedZoneComment operation. +func (c *Route53) UpdateHostedZoneCommentRequest(input *UpdateHostedZoneCommentInput) (req *request.Request, output *UpdateHostedZoneCommentOutput) { + op := &request.Operation{ + Name: opUpdateHostedZoneComment, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/hostedzone/{Id}", + } + + if input == nil { + input = &UpdateHostedZoneCommentInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateHostedZoneCommentOutput{} + req.Data = output + return +} + +// To update the hosted zone comment, send a POST request to the /Route 53 API +// version/hostedzone/hosted zone ID resource. The request body must include +// a document with a UpdateHostedZoneCommentRequest element. The response to +// this request includes the modified HostedZone element. +// +// The comment can have a maximum length of 256 characters. +func (c *Route53) UpdateHostedZoneComment(input *UpdateHostedZoneCommentInput) (*UpdateHostedZoneCommentOutput, error) { + req, out := c.UpdateHostedZoneCommentRequest(input) + err := req.Send() + return out, err +} + +const opUpdateTrafficPolicyComment = "UpdateTrafficPolicyComment" + +// UpdateTrafficPolicyCommentRequest generates a request for the UpdateTrafficPolicyComment operation. +func (c *Route53) UpdateTrafficPolicyCommentRequest(input *UpdateTrafficPolicyCommentInput) (req *request.Request, output *UpdateTrafficPolicyCommentOutput) { + op := &request.Operation{ + Name: opUpdateTrafficPolicyComment, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/trafficpolicy/{Id}/{Version}", + } + + if input == nil { + input = &UpdateTrafficPolicyCommentInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateTrafficPolicyCommentOutput{} + req.Data = output + return +} + +// Updates the comment for a specified traffic policy version. +// +// To update the comment, send a POST request to the /Route 53 API version/trafficpolicy/ +// resource. +// +// The request body must include a document with an UpdateTrafficPolicyCommentRequest +// element. +func (c *Route53) UpdateTrafficPolicyComment(input *UpdateTrafficPolicyCommentInput) (*UpdateTrafficPolicyCommentOutput, error) { + req, out := c.UpdateTrafficPolicyCommentRequest(input) + err := req.Send() + return out, err +} + +const opUpdateTrafficPolicyInstance = "UpdateTrafficPolicyInstance" + +// UpdateTrafficPolicyInstanceRequest generates a request for the UpdateTrafficPolicyInstance operation. +func (c *Route53) UpdateTrafficPolicyInstanceRequest(input *UpdateTrafficPolicyInstanceInput) (req *request.Request, output *UpdateTrafficPolicyInstanceOutput) { + op := &request.Operation{ + Name: opUpdateTrafficPolicyInstance, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/trafficpolicyinstance/{Id}", + } + + if input == nil { + input = &UpdateTrafficPolicyInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateTrafficPolicyInstanceOutput{} + req.Data = output + return +} + +// Updates the resource record sets in a specified hosted zone that were created +// based on the settings in a specified traffic policy version. +// +// The DNS type of the resource record sets that you're updating must match +// the DNS type in the JSON document that is associated with the traffic policy +// version that you're using to update the traffic policy instance. When you +// update a traffic policy instance, Amazon Route 53 continues to respond to +// DNS queries for the root resource record set name (such as example.com) while +// it replaces one group of resource record sets with another. Amazon Route +// 53 performs the following operations: +// +// Amazon Route 53 creates a new group of resource record sets based on the +// specified traffic policy. This is true regardless of how substantial the +// differences are between the existing resource record sets and the new resource +// record sets. When all of the new resource record sets have been created, +// Amazon Route 53 starts to respond to DNS queries for the root resource record +// set name (such as example.com) by using the new resource record sets. Amazon +// Route 53 deletes the old group of resource record sets that are associated +// with the root resource record set name. To update a traffic policy instance, +// send a POST request to the /Route 53 API version/trafficpolicyinstance/traffic +// policy ID resource. The request body must include a document with an UpdateTrafficPolicyInstanceRequest +// element. +func (c *Route53) UpdateTrafficPolicyInstance(input *UpdateTrafficPolicyInstanceInput) (*UpdateTrafficPolicyInstanceOutput, error) { + req, out := c.UpdateTrafficPolicyInstanceRequest(input) + err := req.Send() + return out, err +} + +// Alias resource record sets only: Information about the CloudFront distribution, +// ELB load balancer, Amazon S3 bucket, or Amazon Route 53 resource record set +// to which you are routing traffic. +// +// If you're creating resource record sets for a private hosted zone, note +// the following: +// +// You can create alias resource record sets only for Amazon Route 53 resource +// record sets in the same private hosted zone. Creating alias resource record +// sets for CloudFront distributions, ELB load balancers, and Amazon S3 buckets +// is not supported. You can't create alias resource record sets for failover, +// geolocation, or latency resource record sets in a private hosted zone. For +// more information and an example, see Example: Creating Alias Resource Record +// Sets (http://docs.aws.amazon.com/Route53/latest/APIReference/CreateAliasRRSAPI.html) +// in the Amazon Route 53 API Reference. +type AliasTarget struct { + _ struct{} `type:"structure"` + + // Alias resource record sets only: The external DNS name associated with the + // AWS Resource. The value that you specify depends on where you want to route + // queries: + // + // A CloudFront distribution: Specify the domain name that CloudFront assigned + // when you created your distribution. Your CloudFront distribution must include + // an alternate domain name that matches the name of the resource record set. + // For example, if the name of the resource record set is acme.example.com, + // your CloudFront distribution must include acme.example.com as one of the + // alternate domain names. For more information, see Using Alternate Domain + // Names (CNAMEs) (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/CNAMEs.html) + // in the Amazon CloudFront Developer Guide. An ELB load balancer: Specify the + // DNS name associated with the load balancer. You can get the DNS name by using + // the AWS Management Console, the ELB API, or the AWS CLI. Use the same method + // to get values for HostedZoneId and DNSName. If you get one value from the + // console and the other value from the API or the CLI, creating the resource + // record set will fail. An Elastic Beanstalk environment: Specify the CNAME + // attribute for the environment. (The environment must have a regionalized + // domain name.) An Amazon S3 bucket that is configured as a static website: + // Specify the domain name of the Amazon S3 website endpoint in which you created + // the bucket; for example, s3-website-us-east-1.amazonaws.com. For more information + // about valid values, see the table Amazon Simple Storage Service (S3) Website + // Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. For more information about + // using Amazon S3 buckets for websites, see Hosting a Static Website on Amazon + // S3 (http://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) in + // the Amazon Simple Storage Service Developer Guide. Another Amazon Route 53 + // resource record set: Specify the value of the Name element for a resource + // record set in the current hosted zone. For more information and an example, + // see Example: Creating Alias Resource Record Sets (http://docs.aws.amazon.com/Route53/latest/APIReference/CreateAliasRRSAPI.html) + // in the Amazon Route 53 API Reference. + DNSName *string `type:"string" required:"true"` + + // Alias resource record sets only: If you set the value of EvaluateTargetHealth + // to true for the resource record set or sets in an alias, weighted alias, + // latency alias, or failover alias resource record set, and if you specify + // a value for HealthCheckId for every resource record set that is referenced + // by these alias resource record sets, the alias resource record sets inherit + // the health of the referenced resource record sets. + // + // In this configuration, when Amazon Route 53 receives a DNS query for an + // alias resource record set: + // + // Amazon Route 53 looks at the resource record sets that are referenced by + // the alias resource record sets to determine which health checks they're using. + // Amazon Route 53 checks the current status of each health check. (Amazon Route + // 53 periodically checks the health of the endpoint that is specified in a + // health check; it doesn't perform the health check when the DNS query arrives.) + // Based on the status of the health checks, Amazon Route 53 determines which + // resource record sets are healthy. Unhealthy resource record sets are immediately + // removed from consideration. In addition, if all of the resource record sets + // that are referenced by an alias resource record set are unhealthy, that alias + // resource record set also is immediately removed from consideration. Based + // on the configuration of the alias resource record sets (weighted alias or + // latency alias, for example) and the configuration of the resource record + // sets that they reference, Amazon Route 53 chooses a resource record set from + // the healthy resource record sets, and responds to the query. Note the following: + // + // You cannot set EvaluateTargetHealth to true when the alias target is a CloudFront + // distribution. If the AWS resource that you specify in AliasTarget is a resource + // record set or a group of resource record sets (for example, a group of weighted + // resource record sets), but it is not another alias resource record set, we + // recommend that you associate a health check with all of the resource record + // sets in the alias target. For more information, see What Happens When You + // Omit Health Checks? (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-complex-configs.html#dns-failover-complex-configs-hc-omitting) + // in the Amazon Route 53 Developer Guide. If you specify an ELB load balancer + // in AliasTarget, Elastic Load Balancing routes queries only to the healthy + // Amazon EC2 instances that are registered with the load balancer. If no Amazon + // EC2 instances are healthy or if the load balancer itself is unhealthy, and + // if EvaluateTargetHealth is true for the corresponding alias resource record + // set, Amazon Route 53 routes queries to other resources. When you create a + // load balancer, you configure settings for Elastic Load Balancing health checks; + // they're not Amazon Route 53 health checks, but they perform a similar function. + // Do not create Amazon Route 53 health checks for the Amazon EC2 instances + // that you register with an ELB load balancer. For more information, see How + // Health Checks Work in More Complex Amazon Route 53 Configurations (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-complex-configs.html) + // in the Amazon Route 53 Developer Guide. We recommend that you set EvaluateTargetHealth + // to true only when you have enough idle capacity to handle the failure of + // one or more endpoints. + // + // For more information and examples, see Amazon Route 53 Health Checks and + // DNS Failover (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html) + // in the Amazon Route 53 Developer Guide. + EvaluateTargetHealth *bool `type:"boolean" required:"true"` + + // Alias resource record sets only: The value you use depends on where you want + // to route queries: + // + // A CloudFront distribution: Specify Z2FDTNDATAQYW2. An ELB load balancer: + // Specify the value of the hosted zone ID for the load balancer. You can get + // the hosted zone ID by using the AWS Management Console, the ELB API, or the + // AWS CLI. Use the same method to get values for HostedZoneId and DNSName. + // If you get one value from the console and the other value from the API or + // the CLI, creating the resource record set will fail. An Amazon S3 bucket + // that is configured as a static website: Specify the hosted zone ID for the + // Amazon S3 website endpoint in which you created the bucket. For more information + // about valid values, see the table Amazon Simple Storage Service (S3) Website + // Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. Another Amazon Route 53 resource + // record set in your hosted zone: Specify the hosted zone ID of your hosted + // zone. (An alias resource record set cannot reference a resource record set + // in a different hosted zone.) For more information and an example, see Example: + // Creating Alias Resource Record Sets (http://docs.aws.amazon.com/Route53/latest/APIReference/CreateAliasRRSAPI.html) + // in the Amazon Route 53 API Reference. + HostedZoneId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AliasTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AliasTarget) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to associate a +// VPC with an hosted zone. +type AssociateVPCWithHostedZoneInput struct { + _ struct{} `locationName:"AssociateVPCWithHostedZoneRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // Optional: Any comments you want to include about a AssociateVPCWithHostedZoneRequest. + Comment *string `type:"string"` + + // The ID of the hosted zone you want to associate your VPC with. + // + // Note that you cannot associate a VPC with a hosted zone that doesn't have + // an existing VPC association. + HostedZoneId *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The VPC that you want your hosted zone to be associated with. + VPC *VPC `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AssociateVPCWithHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateVPCWithHostedZoneInput) GoString() string { + return s.String() +} + +// A complex type containing the response information for the request. +type AssociateVPCWithHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains the ID, the status, and the date and time of + // your AssociateVPCWithHostedZoneRequest. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AssociateVPCWithHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateVPCWithHostedZoneOutput) GoString() string { + return s.String() +} + +// A complex type that contains the information for each change in a change +// batch request. +type Change struct { + _ struct{} `type:"structure"` + + // The action to perform: + // + // CREATE: Creates a resource record set that has the specified values. DELETE: + // Deletes a existing resource record set that has the specified values for + // Name, Type, SetIdentifier (for latency, weighted, geolocation, and failover + // resource record sets), and TTL (except alias resource record sets, for which + // the TTL is determined by the AWS resource that you're routing DNS queries + // to). UPSERT: If a resource record set does not already exist, Amazon Route + // 53 creates it. If a resource record set does exist, Amazon Route 53 updates + // it with the values in the request. Amazon Route 53 can update an existing + // resource record set only when all of the following values match: Name, Type, + // and SetIdentifier (for weighted, latency, geolocation, and failover resource + // record sets). + Action *string `type:"string" required:"true" enum:"ChangeAction"` + + // Information about the resource record set to create or delete. + ResourceRecordSet *ResourceRecordSet `type:"structure" required:"true"` +} + +// String returns the string representation +func (s Change) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Change) GoString() string { + return s.String() +} + +// A complex type that contains an optional comment and the changes that you +// want to make with a change batch request. +type ChangeBatch struct { + _ struct{} `type:"structure"` + + // A complex type that contains one Change element for each resource record + // set that you want to create or delete. + Changes []*Change `locationNameList:"Change" min:"1" type:"list" required:"true"` + + // Optional: Any comments you want to include about a change batch request. + Comment *string `type:"string"` +} + +// String returns the string representation +func (s ChangeBatch) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeBatch) GoString() string { + return s.String() +} + +// A complex type that lists the changes and information for a ChangeBatch. +type ChangeBatchRecord struct { + _ struct{} `deprecated:"true" type:"structure"` + + // A list of changes made in the ChangeBatch. + Changes []*Change `locationNameList:"Change" min:"1" type:"list"` + + // A complex type that describes change information about changes made to your + // hosted zone. + // + // This element contains an ID that you use when performing a GetChange action + // to get detailed information about the change. + Comment *string `type:"string"` + + // The ID of the request. Use this ID to track when the change has completed + // across all Amazon Route 53 DNS servers. + Id *string `type:"string" required:"true"` + + // The current state of the request. PENDING indicates that this request has + // not yet been applied to all Amazon Route 53 DNS servers. + // + // Valid Values: PENDING | INSYNC + Status *string `type:"string" required:"true" enum:"ChangeStatus"` + + // The date and time the change was submitted, in the format YYYY-MM-DDThh:mm:ssZ, + // as specified in the ISO 8601 standard (for example, 2009-11-19T19:37:58Z). + // The Z after the time indicates that the time is listed in Coordinated Universal + // Time (UTC). + SubmittedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The AWS account ID attached to the changes. + Submitter *string `type:"string"` +} + +// String returns the string representation +func (s ChangeBatchRecord) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeBatchRecord) GoString() string { + return s.String() +} + +// A complex type that describes change information about changes made to your +// hosted zone. +// +// This element contains an ID that you use when performing a GetChange action +// to get detailed information about the change. +type ChangeInfo struct { + _ struct{} `type:"structure"` + + // A complex type that describes change information about changes made to your + // hosted zone. + // + // This element contains an ID that you use when performing a GetChange action + // to get detailed information about the change. + Comment *string `type:"string"` + + // The ID of the request. Use this ID to track when the change has completed + // across all Amazon Route 53 DNS servers. + Id *string `type:"string" required:"true"` + + // The current state of the request. PENDING indicates that this request has + // not yet been applied to all Amazon Route 53 DNS servers. + // + // Valid Values: PENDING | INSYNC + Status *string `type:"string" required:"true" enum:"ChangeStatus"` + + // The date and time the change was submitted, in the format YYYY-MM-DDThh:mm:ssZ, + // as specified in the ISO 8601 standard (for example, 2009-11-19T19:37:58Z). + // The Z after the time indicates that the time is listed in Coordinated Universal + // Time (UTC). + SubmittedAt *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` +} + +// String returns the string representation +func (s ChangeInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeInfo) GoString() string { + return s.String() +} + +// A complex type that contains a change batch. +type ChangeResourceRecordSetsInput struct { + _ struct{} `locationName:"ChangeResourceRecordSetsRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A complex type that contains an optional comment and the Changes element. + ChangeBatch *ChangeBatch `type:"structure" required:"true"` + + // The ID of the hosted zone that contains the resource record sets that you + // want to change. + HostedZoneId *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s ChangeResourceRecordSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeResourceRecordSetsInput) GoString() string { + return s.String() +} + +// A complex type containing the response for the request. +type ChangeResourceRecordSetsOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about changes made to your hosted + // zone. + // + // This element contains an ID that you use when performing a GetChange action + // to get detailed information about the change. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ChangeResourceRecordSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeResourceRecordSetsOutput) GoString() string { + return s.String() +} + +// A complex type containing information about a request to add, change, or +// delete the tags that are associated with a resource. +type ChangeTagsForResourceInput struct { + _ struct{} `locationName:"ChangeTagsForResourceRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A complex type that contains a list of Tag elements. Each Tag element identifies + // a tag that you want to add or update for the specified resource. + AddTags []*Tag `locationNameList:"Tag" min:"1" type:"list"` + + // A list of Tag keys that you want to remove from the specified resource. + RemoveTagKeys []*string `locationNameList:"Key" min:"1" type:"list"` + + // The ID of the resource for which you want to add, change, or delete tags. + ResourceId *string `location:"uri" locationName:"ResourceId" type:"string" required:"true"` + + // The type of the resource. + // + // - The resource type for health checks is healthcheck. + // + // - The resource type for hosted zones is hostedzone. + ResourceType *string `location:"uri" locationName:"ResourceType" type:"string" required:"true" enum:"TagResourceType"` +} + +// String returns the string representation +func (s ChangeTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeTagsForResourceInput) GoString() string { + return s.String() +} + +// Empty response for the request. +type ChangeTagsForResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ChangeTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeTagsForResourceOutput) GoString() string { + return s.String() +} + +// >A complex type that contains information about the request to create a health +// check. +type CreateHealthCheckInput struct { + _ struct{} `locationName:"CreateHealthCheckRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A unique string that identifies the request and that allows failed CreateHealthCheck + // requests to be retried without the risk of executing the operation twice. + // You must use a unique CallerReference string every time you create a health + // check. CallerReference can be any unique string; you might choose to use + // a string that identifies your project. + // + // Valid characters are any Unicode code points that are legal in an XML 1.0 + // document. The UTF-8 encoding of the value must be less than 128 bytes. + CallerReference *string `min:"1" type:"string" required:"true"` + + // A complex type that contains health check configuration. + HealthCheckConfig *HealthCheckConfig `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateHealthCheckInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHealthCheckInput) GoString() string { + return s.String() +} + +// A complex type containing the response information for the new health check. +type CreateHealthCheckOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains identifying information about the health check. + HealthCheck *HealthCheck `type:"structure" required:"true"` + + // The unique URL representing the new health check. + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateHealthCheckOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHealthCheckOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to create a hosted +// zone. +type CreateHostedZoneInput struct { + _ struct{} `locationName:"CreateHostedZoneRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A unique string that identifies the request and that allows failed CreateHostedZone + // requests to be retried without the risk of executing the operation twice. + // You must use a unique CallerReference string every time you create a hosted + // zone. CallerReference can be any unique string; you might choose to use a + // string that identifies your project, such as DNSMigration_01. + // + // Valid characters are any Unicode code points that are legal in an XML 1.0 + // document. The UTF-8 encoding of the value must be less than 128 bytes. + CallerReference *string `min:"1" type:"string" required:"true"` + + // The delegation set id of the reusable delgation set whose NS records you + // want to assign to the new hosted zone. + DelegationSetId *string `type:"string"` + + // A complex type that contains an optional comment about your hosted zone. + HostedZoneConfig *HostedZoneConfig `type:"structure"` + + // The name of the domain. This must be a fully-specified domain, for example, + // www.example.com. The trailing dot is optional; Amazon Route 53 assumes that + // the domain name is fully qualified. This means that Amazon Route 53 treats + // www.example.com (without a trailing dot) and www.example.com. (with a trailing + // dot) as identical. + // + // This is the name you have registered with your DNS registrar. You should + // ask your registrar to change the authoritative name servers for your domain + // to the set of NameServers elements returned in DelegationSet. + Name *string `type:"string" required:"true"` + + // The VPC that you want your hosted zone to be associated with. By providing + // this parameter, your newly created hosted cannot be resolved anywhere other + // than the given VPC. + VPC *VPC `type:"structure"` +} + +// String returns the string representation +func (s CreateHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHostedZoneInput) GoString() string { + return s.String() +} + +// A complex type containing the response information for the new hosted zone. +type CreateHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the request to create a hosted + // zone. This includes an ID that you use when you call the GetChange action + // to get the current status of the change request. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` + + // A complex type that contains name server information. + DelegationSet *DelegationSet `type:"structure" required:"true"` + + // A complex type that contains identifying information about the hosted zone. + HostedZone *HostedZone `type:"structure" required:"true"` + + // The unique URL representing the new hosted zone. + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` + + VPC *VPC `type:"structure"` +} + +// String returns the string representation +func (s CreateHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHostedZoneOutput) GoString() string { + return s.String() +} + +type CreateReusableDelegationSetInput struct { + _ struct{} `locationName:"CreateReusableDelegationSetRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A unique string that identifies the request and that allows failed CreateReusableDelegationSet + // requests to be retried without the risk of executing the operation twice. + // You must use a unique CallerReference string every time you create a reusable + // delegation set. CallerReference can be any unique string; you might choose + // to use a string that identifies your project, such as DNSMigration_01. + // + // Valid characters are any Unicode code points that are legal in an XML 1.0 + // document. The UTF-8 encoding of the value must be less than 128 bytes. + CallerReference *string `min:"1" type:"string" required:"true"` + + // The ID of the hosted zone whose delegation set you want to mark as reusable. + // It is an optional parameter. + HostedZoneId *string `type:"string"` +} + +// String returns the string representation +func (s CreateReusableDelegationSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReusableDelegationSetInput) GoString() string { + return s.String() +} + +type CreateReusableDelegationSetOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains name server information. + DelegationSet *DelegationSet `type:"structure" required:"true"` + + // The unique URL representing the new reusbale delegation set. + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateReusableDelegationSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReusableDelegationSetOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the traffic policy that you +// want to create. +type CreateTrafficPolicyInput struct { + _ struct{} `locationName:"CreateTrafficPolicyRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // Any comments that you want to include about the traffic policy. + Comment *string `type:"string"` + + // The definition of this traffic policy in JSON format. + Document *string `type:"string" required:"true"` + + // The name of the traffic policy. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficPolicyInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the resource record sets that +// you want to create based on a specified traffic policy. +type CreateTrafficPolicyInstanceInput struct { + _ struct{} `locationName:"CreateTrafficPolicyInstanceRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // The ID of the hosted zone in which you want Amazon Route 53 to create resource + // record sets by using the configuration in a traffic policy. + HostedZoneId *string `type:"string" required:"true"` + + // The domain name (such as example.com) or subdomain name (such as www.example.com) + // for which Amazon Route 53 responds to DNS queries by using the resource record + // sets that Amazon Route 53 creates for this traffic policy instance. + Name *string `type:"string" required:"true"` + + // The TTL that you want Amazon Route 53 to assign to all of the resource record + // sets that it creates in the specified hosted zone. + TTL *int64 `type:"long" required:"true"` + + // The ID of the traffic policy that you want to use to create resource record + // sets in the specified hosted zone. + TrafficPolicyId *string `type:"string" required:"true"` + + // The version of the traffic policy that you want to use to create resource + // record sets in the specified hosted zone. + TrafficPolicyVersion *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficPolicyInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficPolicyInstanceInput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the CreateTrafficPolicyInstance +// request. +type CreateTrafficPolicyInstanceOutput struct { + _ struct{} `type:"structure"` + + // A unique URL that represents a new traffic policy instance. + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` + + // A complex type that contains settings for the new traffic policy instance. + TrafficPolicyInstance *TrafficPolicyInstance `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficPolicyInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficPolicyInstanceOutput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the CreateTrafficPolicy +// request. +type CreateTrafficPolicyOutput struct { + _ struct{} `type:"structure"` + + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` + + // A complex type that contains settings for the new traffic policy. + TrafficPolicy *TrafficPolicy `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficPolicyOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the traffic policy for which +// you want to create a new version. +type CreateTrafficPolicyVersionInput struct { + _ struct{} `locationName:"CreateTrafficPolicyVersionRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // Any comments that you want to include about the new traffic policy version. + Comment *string `type:"string"` + + // The definition of a new traffic policy version, in JSON format. You must + // specify the full definition of the new traffic policy. You cannot specify + // just the differences between the new version and a previous version. + Document *string `type:"string" required:"true"` + + // The ID of the traffic policy for which you want to create a new version. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficPolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficPolicyVersionInput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the CreateTrafficPolicyVersion +// request. +type CreateTrafficPolicyVersionOutput struct { + _ struct{} `type:"structure"` + + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` + + // A complex type that contains settings for the new version of the traffic + // policy. + TrafficPolicy *TrafficPolicy `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficPolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficPolicyVersionOutput) GoString() string { + return s.String() +} + +// A complex type that contains name server information. +type DelegationSet struct { + _ struct{} `type:"structure"` + + CallerReference *string `min:"1" type:"string"` + + Id *string `type:"string"` + + // A complex type that contains the authoritative name servers for the hosted + // zone. Use the method provided by your domain registrar to add an NS record + // to your domain for each NameServer that is assigned to your hosted zone. + NameServers []*string `locationNameList:"NameServer" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s DelegationSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DelegationSet) GoString() string { + return s.String() +} + +// A complex type containing the request information for delete health check. +type DeleteHealthCheckInput struct { + _ struct{} `type:"structure"` + + // The ID of the health check to delete. + HealthCheckId *string `location:"uri" locationName:"HealthCheckId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteHealthCheckInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHealthCheckInput) GoString() string { + return s.String() +} + +// Empty response for the request. +type DeleteHealthCheckOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteHealthCheckOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHealthCheckOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the hosted zone that you want +// to delete. +type DeleteHostedZoneInput struct { + _ struct{} `type:"structure"` + + // The ID of the hosted zone you want to delete. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHostedZoneInput) GoString() string { + return s.String() +} + +// A complex type containing the response information for the request. +type DeleteHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains the ID, the status, and the date and time of + // your delete request. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DeleteHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHostedZoneOutput) GoString() string { + return s.String() +} + +// A complex type containing the information for the delete request. +type DeleteReusableDelegationSetInput struct { + _ struct{} `type:"structure"` + + // The ID of the reusable delegation set you want to delete. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteReusableDelegationSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReusableDelegationSetInput) GoString() string { + return s.String() +} + +// Empty response for the request. +type DeleteReusableDelegationSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteReusableDelegationSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReusableDelegationSetOutput) GoString() string { + return s.String() +} + +// A request to delete a specified traffic policy version. +type DeleteTrafficPolicyInput struct { + _ struct{} `type:"structure"` + + // The ID of the traffic policy that you want to delete. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The version number of the traffic policy that you want to delete. + Version *int64 `location:"uri" locationName:"Version" min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s DeleteTrafficPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficPolicyInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the traffic policy instance +// that you want to delete. +type DeleteTrafficPolicyInstanceInput struct { + _ struct{} `type:"structure"` + + // The ID of the traffic policy instance that you want to delete. + // + // When you delete a traffic policy instance, Amazon Route 53 also deletes + // all of the resource record sets that were created when you created the traffic + // policy instance. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTrafficPolicyInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficPolicyInstanceInput) GoString() string { + return s.String() +} + +// An empty element. +type DeleteTrafficPolicyInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTrafficPolicyInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficPolicyInstanceOutput) GoString() string { + return s.String() +} + +// An empty element. +type DeleteTrafficPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTrafficPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficPolicyOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to disassociate +// a VPC from an hosted zone. +type DisassociateVPCFromHostedZoneInput struct { + _ struct{} `locationName:"DisassociateVPCFromHostedZoneRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // Optional: Any comments you want to include about a DisassociateVPCFromHostedZoneRequest. + Comment *string `type:"string"` + + // The ID of the hosted zone you want to disassociate your VPC from. + // + // Note that you cannot disassociate the last VPC from a hosted zone. + HostedZoneId *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The VPC that you want your hosted zone to be disassociated from. + VPC *VPC `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DisassociateVPCFromHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateVPCFromHostedZoneInput) GoString() string { + return s.String() +} + +// A complex type containing the response information for the request. +type DisassociateVPCFromHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains the ID, the status, and the date and time of + // your DisassociateVPCFromHostedZoneRequest. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DisassociateVPCFromHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateVPCFromHostedZoneOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about a geo location. +type GeoLocation struct { + _ struct{} `type:"structure"` + + // The code for a continent geo location. Note: only continent locations have + // a continent code. + // + // Valid values: AF | AN | AS | EU | OC | NA | SA + // + // Constraint: Specifying ContinentCode with either CountryCode or SubdivisionCode + // returns an InvalidInput error. + ContinentCode *string `min:"2" type:"string"` + + // The code for a country geo location. The default location uses '*' for the + // country code and will match all locations that are not matched by a geo location. + // + // The default geo location uses a * for the country code. All other country + // codes follow the ISO 3166 two-character code. + CountryCode *string `min:"1" type:"string"` + + // The code for a country's subdivision (e.g., a province of Canada). A subdivision + // code is only valid with the appropriate country code. + // + // Constraint: Specifying SubdivisionCode without CountryCode returns an InvalidInput + // error. + SubdivisionCode *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GeoLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GeoLocation) GoString() string { + return s.String() +} + +// A complex type that contains information about a GeoLocation. +type GeoLocationDetails struct { + _ struct{} `type:"structure"` + + // The code for a continent geo location. Note: only continent locations have + // a continent code. + ContinentCode *string `min:"2" type:"string"` + + // The name of the continent. This element is only present if ContinentCode + // is also present. + ContinentName *string `min:"1" type:"string"` + + // The code for a country geo location. The default location uses '*' for the + // country code and will match all locations that are not matched by a geo location. + // + // The default geo location uses a * for the country code. All other country + // codes follow the ISO 3166 two-character code. + CountryCode *string `min:"1" type:"string"` + + // The name of the country. This element is only present if CountryCode is also + // present. + CountryName *string `min:"1" type:"string"` + + // The code for a country's subdivision (e.g., a province of Canada). A subdivision + // code is only valid with the appropriate country code. + SubdivisionCode *string `min:"1" type:"string"` + + // The name of the subdivision. This element is only present if SubdivisionCode + // is also present. + SubdivisionName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GeoLocationDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GeoLocationDetails) GoString() string { + return s.String() +} + +// The input for a GetChangeDetails request. +type GetChangeDetailsInput struct { + _ struct{} `deprecated:"true" type:"structure"` + + // The ID of the change batch request. The value that you specify here is the + // value that ChangeResourceRecordSets returned in the Id element when you submitted + // the request. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetChangeDetailsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetChangeDetailsInput) GoString() string { + return s.String() +} + +// A complex type that contains the ChangeBatchRecord element. +type GetChangeDetailsOutput struct { + _ struct{} `deprecated:"true" type:"structure"` + + // A complex type that contains information about the specified change batch, + // including the change batch ID, the status of the change, and the contained + // changes. + ChangeBatchRecord *ChangeBatchRecord `deprecated:"true" type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetChangeDetailsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetChangeDetailsOutput) GoString() string { + return s.String() +} + +// The input for a GetChange request. +type GetChangeInput struct { + _ struct{} `type:"structure"` + + // The ID of the change batch request. The value that you specify here is the + // value that ChangeResourceRecordSets returned in the Id element when you submitted + // the request. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetChangeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetChangeInput) GoString() string { + return s.String() +} + +// A complex type that contains the ChangeInfo element. +type GetChangeOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the specified change batch, + // including the change batch ID, the status of the change, and the date and + // time of the request. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetChangeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetChangeOutput) GoString() string { + return s.String() +} + +// Empty request. +type GetCheckerIpRangesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetCheckerIpRangesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCheckerIpRangesInput) GoString() string { + return s.String() +} + +// A complex type that contains the CheckerIpRanges element. +type GetCheckerIpRangesOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains sorted list of IP ranges in CIDR format for + // Amazon Route 53 health checkers. + CheckerIpRanges []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetCheckerIpRangesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCheckerIpRangesOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to get a geo location. +type GetGeoLocationInput struct { + _ struct{} `type:"structure"` + + // The code for a continent geo location. Note: only continent locations have + // a continent code. + // + // Valid values: AF | AN | AS | EU | OC | NA | SA + // + // Constraint: Specifying ContinentCode with either CountryCode or SubdivisionCode + // returns an InvalidInput error. + ContinentCode *string `location:"querystring" locationName:"continentcode" min:"2" type:"string"` + + // The code for a country geo location. The default location uses '*' for the + // country code and will match all locations that are not matched by a geo location. + // + // The default geo location uses a * for the country code. All other country + // codes follow the ISO 3166 two-character code. + CountryCode *string `location:"querystring" locationName:"countrycode" min:"1" type:"string"` + + // The code for a country's subdivision (e.g., a province of Canada). A subdivision + // code is only valid with the appropriate country code. + // + // Constraint: Specifying SubdivisionCode without CountryCode returns an InvalidInput + // error. + SubdivisionCode *string `location:"querystring" locationName:"subdivisioncode" min:"1" type:"string"` +} + +// String returns the string representation +func (s GetGeoLocationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGeoLocationInput) GoString() string { + return s.String() +} + +// A complex type containing information about the specified geo location. +type GetGeoLocationOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains the information about the specified geo location. + GeoLocationDetails *GeoLocationDetails `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetGeoLocationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGeoLocationOutput) GoString() string { + return s.String() +} + +// To retrieve a count of all your health checks, send a GET request to the +// /Route 53 API version/healthcheckcount resource. +type GetHealthCheckCountInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetHealthCheckCountInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckCountInput) GoString() string { + return s.String() +} + +// A complex type that contains the count of health checks associated with the +// current AWS account. +type GetHealthCheckCountOutput struct { + _ struct{} `type:"structure"` + + // The number of health checks associated with the current AWS account. + HealthCheckCount *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckCountOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckCountOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to get a health +// check. +type GetHealthCheckInput struct { + _ struct{} `type:"structure"` + + // The ID of the health check to retrieve. + HealthCheckId *string `location:"uri" locationName:"HealthCheckId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to get the most +// recent failure reason for a health check. +type GetHealthCheckLastFailureReasonInput struct { + _ struct{} `type:"structure"` + + // The ID of the health check for which you want to retrieve the reason for + // the most recent failure. + HealthCheckId *string `location:"uri" locationName:"HealthCheckId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckLastFailureReasonInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckLastFailureReasonInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the most recent failure for +// the specified health check. +type GetHealthCheckLastFailureReasonOutput struct { + _ struct{} `type:"structure"` + + // A list that contains one HealthCheckObservation element for each Amazon Route + // 53 health checker. + HealthCheckObservations []*HealthCheckObservation `locationNameList:"HealthCheckObservation" type:"list" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckLastFailureReasonOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckLastFailureReasonOutput) GoString() string { + return s.String() +} + +// A complex type containing information about the specified health check. +type GetHealthCheckOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains the information about the specified health check. + HealthCheck *HealthCheck `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to get health +// check status for a health check. +type GetHealthCheckStatusInput struct { + _ struct{} `type:"structure"` + + // If you want Amazon Route 53 to return this resource record set in response + // to a DNS query only when a health check is passing, include the HealthCheckId + // element and specify the ID of the applicable health check. + // + // Amazon Route 53 determines whether a resource record set is healthy by periodically + // sending a request to the endpoint that is specified in the health check. + // If that endpoint returns an HTTP status code of 2xx or 3xx, the endpoint + // is healthy. If the endpoint returns an HTTP status code of 400 or greater, + // or if the endpoint doesn't respond for a certain amount of time, Amazon Route + // 53 considers the endpoint unhealthy and also considers the resource record + // set unhealthy. + // + // The HealthCheckId element is only useful when Amazon Route 53 is choosing + // between two or more resource record sets to respond to a DNS query, and you + // want Amazon Route 53 to base the choice in part on the status of a health + // check. Configuring health checks only makes sense in the following configurations: + // + // You're checking the health of the resource record sets in a weighted, latency, + // geolocation, or failover resource record set, and you specify health check + // IDs for all of the resource record sets. If the health check for one resource + // record set specifies an endpoint that is not healthy, Amazon Route 53 stops + // responding to queries using the value for that resource record set. You set + // EvaluateTargetHealth to true for the resource record sets in an alias, weighted + // alias, latency alias, geolocation alias, or failover alias resource record + // set, and you specify health check IDs for all of the resource record sets + // that are referenced by the alias resource record sets. For more information + // about this configuration, see EvaluateTargetHealth. + // + // Amazon Route 53 doesn't check the health of the endpoint specified in the + // resource record set, for example, the endpoint specified by the IP address + // in the Value element. When you add a HealthCheckId element to a resource + // record set, Amazon Route 53 checks the health of the endpoint that you specified + // in the health check. + // + // For geolocation resource record sets, if an endpoint is unhealthy, Amazon + // Route 53 looks for a resource record set for the larger, associated geographic + // region. For example, suppose you have resource record sets for a state in + // the United States, for the United States, for North America, and for all + // locations. If the endpoint for the state resource record set is unhealthy, + // Amazon Route 53 checks the resource record sets for the United States, for + // North America, and for all locations (a resource record set for which the + // value of CountryCode is *), in that order, until it finds a resource record + // set for which the endpoint is healthy. + // + // If your health checks specify the endpoint only by domain name, we recommend + // that you create a separate health check for each endpoint. For example, create + // a health check for each HTTP server that is serving content for www.example.com. + // For the value of FullyQualifiedDomainName, specify the domain name of the + // server (such as us-east-1-www.example.com), not the name of the resource + // record sets (example.com). + // + // In this configuration, if you create a health check for which the value + // of FullyQualifiedDomainName matches the name of the resource record sets + // and then associate the health check with those resource record sets, health + // check results will be unpredictable. + HealthCheckId *string `location:"uri" locationName:"HealthCheckId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckStatusInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the status of the specified +// health check. +type GetHealthCheckStatusOutput struct { + _ struct{} `type:"structure"` + + // A list that contains one HealthCheckObservation element for each Amazon Route + // 53 health checker. + HealthCheckObservations []*HealthCheckObservation `locationNameList:"HealthCheckObservation" type:"list" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckStatusOutput) GoString() string { + return s.String() +} + +// To retrieve a count of all your hosted zones, send a GET request to the /Route +// 53 API version/hostedzonecount resource. +type GetHostedZoneCountInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetHostedZoneCountInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHostedZoneCountInput) GoString() string { + return s.String() +} + +// A complex type that contains the count of hosted zones associated with the +// current AWS account. +type GetHostedZoneCountOutput struct { + _ struct{} `type:"structure"` + + // The number of hosted zones associated with the current AWS account. + HostedZoneCount *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s GetHostedZoneCountOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHostedZoneCountOutput) GoString() string { + return s.String() +} + +// The input for a GetHostedZone request. +type GetHostedZoneInput struct { + _ struct{} `type:"structure"` + + // The ID of the hosted zone for which you want to get a list of the name servers + // in the delegation set. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHostedZoneInput) GoString() string { + return s.String() +} + +// A complex type containing information about the specified hosted zone. +type GetHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the name servers for the specified + // hosted zone. + DelegationSet *DelegationSet `type:"structure"` + + // A complex type that contains the information about the specified hosted zone. + HostedZone *HostedZone `type:"structure" required:"true"` + + // A complex type that contains information about VPCs associated with the specified + // hosted zone. + VPCs []*VPC `locationNameList:"VPC" min:"1" type:"list"` +} + +// String returns the string representation +func (s GetHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHostedZoneOutput) GoString() string { + return s.String() +} + +// The input for a GetReusableDelegationSet request. +type GetReusableDelegationSetInput struct { + _ struct{} `type:"structure"` + + // The ID of the reusable delegation set for which you want to get a list of + // the name server. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetReusableDelegationSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetReusableDelegationSetInput) GoString() string { + return s.String() +} + +// A complex type containing information about the specified reusable delegation +// set. +type GetReusableDelegationSetOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains the information about the nameservers for the + // specified delegation set ID. + DelegationSet *DelegationSet `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetReusableDelegationSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetReusableDelegationSetOutput) GoString() string { + return s.String() +} + +// Gets information about a specific traffic policy version. To get the information, +// send a GET request to the /Route 53 API version/trafficpolicy resource, and +// specify the ID and the version of the traffic policy. +type GetTrafficPolicyInput struct { + _ struct{} `type:"structure"` + + // The ID of the traffic policy that you want to get information about. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The version number of the traffic policy that you want to get information + // about. + Version *int64 `location:"uri" locationName:"Version" min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s GetTrafficPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrafficPolicyInput) GoString() string { + return s.String() +} + +// To retrieve a count of all your traffic policy instances, send a GET request +// to the /Route 53 API version/trafficpolicyinstancecount resource. +type GetTrafficPolicyInstanceCountInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetTrafficPolicyInstanceCountInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrafficPolicyInstanceCountInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the number of traffic policy +// instances that are associated with the current AWS account. +type GetTrafficPolicyInstanceCountOutput struct { + _ struct{} `type:"structure"` + + // The number of traffic policy instances that are associated with the current + // AWS account. + TrafficPolicyInstanceCount *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s GetTrafficPolicyInstanceCountOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrafficPolicyInstanceCountOutput) GoString() string { + return s.String() +} + +// Gets information about a specified traffic policy instance. +// +// To get information about a traffic policy instance, send a GET request to +// the /Route 53 API version/trafficpolicyinstance/Id resource. +type GetTrafficPolicyInstanceInput struct { + _ struct{} `type:"structure"` + + // The ID of the traffic policy instance that you want to get information about. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTrafficPolicyInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrafficPolicyInstanceInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the resource record sets that +// Amazon Route 53 created based on a specified traffic policy. +type GetTrafficPolicyInstanceOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains settings for the traffic policy instance. + TrafficPolicyInstance *TrafficPolicyInstance `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetTrafficPolicyInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrafficPolicyInstanceOutput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the request. +type GetTrafficPolicyOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains settings for the specified traffic policy. + TrafficPolicy *TrafficPolicy `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetTrafficPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrafficPolicyOutput) GoString() string { + return s.String() +} + +// A complex type that contains identifying information about the health check. +type HealthCheck struct { + _ struct{} `type:"structure"` + + // A unique string that identifies the request to create the health check. + CallerReference *string `min:"1" type:"string" required:"true"` + + // A complex type that contains the health check configuration. + HealthCheckConfig *HealthCheckConfig `type:"structure" required:"true"` + + // The version of the health check. You can optionally pass this value in a + // call to UpdateHealthCheck to prevent overwriting another change to the health + // check. + HealthCheckVersion *int64 `min:"1" type:"long" required:"true"` + + // The ID of the specified health check. + Id *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s HealthCheck) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HealthCheck) GoString() string { + return s.String() +} + +// A complex type that contains the health check configuration. +type HealthCheckConfig struct { + _ struct{} `type:"structure"` + + // For a specified parent health check, a list of HealthCheckId values for the + // associated child health checks. + ChildHealthChecks []*string `locationNameList:"ChildHealthCheck" type:"list"` + + // Specify whether you want Amazon Route 53 to send the value of FullyQualifiedDomainName + // to the endpoint in the client_hello message during TLS negotiation. If you + // don't specify a value for EnableSNI, Amazon Route 53 defaults to true when + // Type is HTTPS or HTTPS_STR_MATCH and defaults to false when Type is any other + // value. + EnableSNI *bool `type:"boolean"` + + // The number of consecutive health checks that an endpoint must pass or fail + // for Amazon Route 53 to change the current status of the endpoint from unhealthy + // to healthy or vice versa. + // + // Valid values are integers between 1 and 10. For more information, see "How + // Amazon Route 53 Determines Whether an Endpoint Is Healthy" in the Amazon + // Route 53 Developer Guide. + FailureThreshold *int64 `min:"1" type:"integer"` + + // Fully qualified domain name of the instance to be health checked. + FullyQualifiedDomainName *string `type:"string"` + + // The minimum number of child health checks that must be healthy for Amazon + // Route 53 to consider the parent health check to be healthy. Valid values + // are integers between 0 and 256, inclusive. + HealthThreshold *int64 `type:"integer"` + + // IP Address of the instance being checked. + IPAddress *string `type:"string"` + + // A boolean value that indicates whether the status of health check should + // be inverted. For example, if a health check is healthy but Inverted is True, + // then Amazon Route 53 considers the health check to be unhealthy. + Inverted *bool `type:"boolean"` + + // A Boolean value that indicates whether you want Amazon Route 53 to measure + // the latency between health checkers in multiple AWS regions and your endpoint + // and to display CloudWatch latency graphs in the Amazon Route 53 console. + MeasureLatency *bool `type:"boolean"` + + // Port on which connection will be opened to the instance to health check. + // For HTTP and HTTP_STR_MATCH this defaults to 80 if the port is not specified. + // For HTTPS and HTTPS_STR_MATCH this defaults to 443 if the port is not specified. + Port *int64 `min:"1" type:"integer"` + + // The number of seconds between the time that Amazon Route 53 gets a response + // from your endpoint and the time that it sends the next health-check request. + // + // Each Amazon Route 53 health checker makes requests at this interval. Valid + // values are 10 and 30. The default value is 30. + RequestInterval *int64 `min:"10" type:"integer"` + + // Path to ping on the instance to check the health. Required for HTTP, HTTPS, + // HTTP_STR_MATCH, and HTTPS_STR_MATCH health checks. The HTTP request is issued + // to the instance on the given port and path. + ResourcePath *string `type:"string"` + + // A string to search for in the body of a health check response. Required for + // HTTP_STR_MATCH and HTTPS_STR_MATCH health checks. Amazon Route 53 considers + // case when searching for SearchString in the response body. + SearchString *string `type:"string"` + + // The type of health check to be performed. Currently supported types are TCP, + // HTTP, HTTPS, HTTP_STR_MATCH, and HTTPS_STR_MATCH. + Type *string `type:"string" required:"true" enum:"HealthCheckType"` +} + +// String returns the string representation +func (s HealthCheckConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HealthCheckConfig) GoString() string { + return s.String() +} + +// A complex type that contains the IP address of a Amazon Route 53 health checker +// and the reason for the health check status. +type HealthCheckObservation struct { + _ struct{} `type:"structure"` + + // The IP address of the Amazon Route 53 health checker that performed the health + // check. + IPAddress *string `type:"string"` + + // A complex type that contains information about the health check status for + // the current observation. + StatusReport *StatusReport `type:"structure"` +} + +// String returns the string representation +func (s HealthCheckObservation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HealthCheckObservation) GoString() string { + return s.String() +} + +// A complex type that contain information about the specified hosted zone. +type HostedZone struct { + _ struct{} `type:"structure"` + + // A unique string that identifies the request to create the hosted zone. + CallerReference *string `min:"1" type:"string" required:"true"` + + // A complex type that contains the Comment element. + Config *HostedZoneConfig `type:"structure"` + + // The ID of the specified hosted zone. + Id *string `type:"string" required:"true"` + + // The name of the domain. This must be a fully-specified domain, for example, + // www.example.com. The trailing dot is optional; Amazon Route 53 assumes that + // the domain name is fully qualified. This means that Amazon Route 53 treats + // www.example.com (without a trailing dot) and www.example.com. (with a trailing + // dot) as identical. + // + // This is the name you have registered with your DNS registrar. You should + // ask your registrar to change the authoritative name servers for your domain + // to the set of NameServers elements returned in DelegationSet. + Name *string `type:"string" required:"true"` + + // Total number of resource record sets in the hosted zone. + ResourceRecordSetCount *int64 `type:"long"` +} + +// String returns the string representation +func (s HostedZone) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HostedZone) GoString() string { + return s.String() +} + +// A complex type that contains an optional comment about your hosted zone. +// If you don't want to specify a comment, you can omit the HostedZoneConfig +// and Comment elements from the XML document. +type HostedZoneConfig struct { + _ struct{} `type:"structure"` + + // An optional comment about your hosted zone. If you don't want to specify + // a comment, you can omit the HostedZoneConfig and Comment elements from the + // XML document. + Comment *string `type:"string"` + + PrivateZone *bool `type:"boolean"` +} + +// String returns the string representation +func (s HostedZoneConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HostedZoneConfig) GoString() string { + return s.String() +} + +// The input for a ListChangeBatchesByHostedZone request. +type ListChangeBatchesByHostedZoneInput struct { + _ struct{} `deprecated:"true" type:"structure"` + + // The end of the time period you want to see changes for. + EndDate *string `location:"querystring" locationName:"endDate" deprecated:"true" type:"string" required:"true"` + + // The ID of the hosted zone that you want to see changes for. + HostedZoneId *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The page marker. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // The maximum number of items on a page. + MaxItems *string `location:"querystring" locationName:"maxItems" type:"string"` + + // The start of the time period you want to see changes for. + StartDate *string `location:"querystring" locationName:"startDate" deprecated:"true" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListChangeBatchesByHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListChangeBatchesByHostedZoneInput) GoString() string { + return s.String() +} + +// The input for a ListChangeBatchesByHostedZone request. +type ListChangeBatchesByHostedZoneOutput struct { + _ struct{} `deprecated:"true" type:"structure"` + + // The change batches within the given hosted zone and time period. + ChangeBatchRecords []*ChangeBatchRecord `locationNameList:"ChangeBatchRecord" min:"1" deprecated:"true" type:"list" required:"true"` + + // A flag that indicates if there are more change batches to list. + IsTruncated *bool `type:"boolean"` + + // The page marker. + Marker *string `type:"string" required:"true"` + + // The maximum number of items on a page. + MaxItems *string `type:"string" required:"true"` + + // The next page marker. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListChangeBatchesByHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListChangeBatchesByHostedZoneOutput) GoString() string { + return s.String() +} + +// The input for a ListChangeBatchesByRRSet request. +type ListChangeBatchesByRRSetInput struct { + _ struct{} `deprecated:"true" type:"structure"` + + // The end of the time period you want to see changes for. + EndDate *string `location:"querystring" locationName:"endDate" deprecated:"true" type:"string" required:"true"` + + // The ID of the hosted zone that you want to see changes for. + HostedZoneId *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The page marker. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // The maximum number of items on a page. + MaxItems *string `location:"querystring" locationName:"maxItems" type:"string"` + + // The name of the RRSet that you want to see changes for. + Name *string `location:"querystring" locationName:"rrSet_name" type:"string" required:"true"` + + // The identifier of the RRSet that you want to see changes for. + SetIdentifier *string `location:"querystring" locationName:"identifier" min:"1" type:"string"` + + // The start of the time period you want to see changes for. + StartDate *string `location:"querystring" locationName:"startDate" deprecated:"true" type:"string" required:"true"` + + // The type of the RRSet that you want to see changes for. + Type *string `location:"querystring" locationName:"type" type:"string" required:"true" enum:"RRType"` +} + +// String returns the string representation +func (s ListChangeBatchesByRRSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListChangeBatchesByRRSetInput) GoString() string { + return s.String() +} + +// The input for a ListChangeBatchesByRRSet request. +type ListChangeBatchesByRRSetOutput struct { + _ struct{} `deprecated:"true" type:"structure"` + + // The change batches within the given hosted zone and time period. + ChangeBatchRecords []*ChangeBatchRecord `locationNameList:"ChangeBatchRecord" min:"1" deprecated:"true" type:"list" required:"true"` + + // A flag that indicates if there are more change batches to list. + IsTruncated *bool `type:"boolean"` + + // The page marker. + Marker *string `type:"string" required:"true"` + + // The maximum number of items on a page. + MaxItems *string `type:"string" required:"true"` + + // The next page marker. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListChangeBatchesByRRSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListChangeBatchesByRRSetOutput) GoString() string { + return s.String() +} + +// The input for a ListGeoLocations request. +type ListGeoLocationsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of geo locations you want in the response body. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // The first continent code in the lexicographic ordering of geo locations that + // you want the ListGeoLocations request to list. For non-continent geo locations, + // this should be null. + // + // Valid values: AF | AN | AS | EU | OC | NA | SA + // + // Constraint: Specifying ContinentCode with either CountryCode or SubdivisionCode + // returns an InvalidInput error. + StartContinentCode *string `location:"querystring" locationName:"startcontinentcode" min:"2" type:"string"` + + // The first country code in the lexicographic ordering of geo locations that + // you want the ListGeoLocations request to list. + // + // The default geo location uses a * for the country code. All other country + // codes follow the ISO 3166 two-character code. + StartCountryCode *string `location:"querystring" locationName:"startcountrycode" min:"1" type:"string"` + + // The first subdivision code in the lexicographic ordering of geo locations + // that you want the ListGeoLocations request to list. + // + // Constraint: Specifying SubdivisionCode without CountryCode returns an InvalidInput + // error. + StartSubdivisionCode *string `location:"querystring" locationName:"startsubdivisioncode" min:"1" type:"string"` +} + +// String returns the string representation +func (s ListGeoLocationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGeoLocationsInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the geo locations that are +// returned by the request and information about the response. +type ListGeoLocationsOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the geo locations that are + // returned by the request. + GeoLocationDetailsList []*GeoLocationDetails `locationNameList:"GeoLocationDetails" type:"list" required:"true"` + + // A flag that indicates whether there are more geo locations to be listed. + // If your results were truncated, you can make a follow-up request for the + // next page of results by using the values included in the ListGeoLocationsResponse$NextContinentCode, + // ListGeoLocationsResponse$NextCountryCode and ListGeoLocationsResponse$NextSubdivisionCode + // elements. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The maximum number of records you requested. The maximum value of MaxItems + // is 100. + MaxItems *string `type:"string" required:"true"` + + // If the results were truncated, the continent code of the next geo location + // in the list. This element is present only if ListGeoLocationsResponse$IsTruncated + // is true and the next geo location to list is a continent location. + NextContinentCode *string `min:"2" type:"string"` + + // If the results were truncated, the country code of the next geo location + // in the list. This element is present only if ListGeoLocationsResponse$IsTruncated + // is true and the next geo location to list is not a continent location. + NextCountryCode *string `min:"1" type:"string"` + + // If the results were truncated, the subdivision code of the next geo location + // in the list. This element is present only if ListGeoLocationsResponse$IsTruncated + // is true and the next geo location has a subdivision. + NextSubdivisionCode *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListGeoLocationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGeoLocationsOutput) GoString() string { + return s.String() +} + +// To retrieve a list of your health checks, send a GET request to the /Route +// 53 API version/healthcheck resource. The response to this request includes +// a HealthChecks element with zero or more HealthCheck child elements. By default, +// the list of health checks is displayed on a single page. You can control +// the length of the page that is displayed by using the MaxItems parameter. +// You can use the Marker parameter to control the health check that the list +// begins with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +type ListHealthChecksInput struct { + _ struct{} `type:"structure"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Specify the maximum number of health checks to return per page of results. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` +} + +// String returns the string representation +func (s ListHealthChecksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHealthChecksInput) GoString() string { + return s.String() +} + +// A complex type that contains the response for the request. +type ListHealthChecksOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the health checks associated + // with the current AWS account. + HealthChecks []*HealthCheck `locationNameList:"HealthCheck" type:"list" required:"true"` + + // A flag indicating whether there are more health checks to be listed. If your + // results were truncated, you can make a follow-up request for the next page + // of results by using the Marker element. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `type:"string" required:"true"` + + // The maximum number of health checks to be included in the response body. + // If the number of health checks associated with this AWS account exceeds MaxItems, + // the value of ListHealthChecksResponse$IsTruncated in the response is true. + // Call ListHealthChecks again and specify the value of ListHealthChecksResponse$NextMarker + // in the ListHostedZonesRequest$Marker element to get the next page of results. + MaxItems *string `type:"string" required:"true"` + + // Indicates where to continue listing health checks. If ListHealthChecksResponse$IsTruncated + // is true, make another request to ListHealthChecks and include the value of + // the NextMarker element in the Marker element to get the next page of results. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListHealthChecksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHealthChecksOutput) GoString() string { + return s.String() +} + +// To retrieve a list of your hosted zones in lexicographic order, send a GET +// request to the /Route 53 API version/hostedzonesbyname resource. The response +// to this request includes a HostedZones element with zero or more HostedZone +// child elements lexicographically ordered by DNS name. By default, the list +// of hosted zones is displayed on a single page. You can control the length +// of the page that is displayed by using the MaxItems parameter. You can use +// the DNSName and HostedZoneId parameters to control the hosted zone that the +// list begins with. +// +// For more information about listing hosted zones, see Listing the Hosted +// Zones for an AWS Account (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ListInfoOnHostedZone.html) +// in the Amazon Route 53 Developer Guide. +type ListHostedZonesByNameInput struct { + _ struct{} `type:"structure"` + + // The first name in the lexicographic ordering of domain names that you want + // the ListHostedZonesByNameRequest request to list. + // + // If the request returned more than one page of results, submit another request + // and specify the value of NextDNSName and NextHostedZoneId from the last response + // in the DNSName and HostedZoneId parameters to get the next page of results. + DNSName *string `location:"querystring" locationName:"dnsname" type:"string"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextDNSName and NextHostedZoneId from the last response + // in the DNSName and HostedZoneId parameters to get the next page of results. + HostedZoneId *string `location:"querystring" locationName:"hostedzoneid" type:"string"` + + // Specify the maximum number of hosted zones to return per page of results. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` +} + +// String returns the string representation +func (s ListHostedZonesByNameInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHostedZonesByNameInput) GoString() string { + return s.String() +} + +// A complex type that contains the response for the request. +type ListHostedZonesByNameOutput struct { + _ struct{} `type:"structure"` + + // The DNSName value sent in the request. + DNSName *string `type:"string"` + + // The HostedZoneId value sent in the request. + HostedZoneId *string `type:"string"` + + // A complex type that contains information about the hosted zones associated + // with the current AWS account. + HostedZones []*HostedZone `locationNameList:"HostedZone" type:"list" required:"true"` + + // A flag indicating whether there are more hosted zones to be listed. If your + // results were truncated, you can make a follow-up request for the next page + // of results by using the NextDNSName and NextHostedZoneId elements. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The maximum number of hosted zones to be included in the response body. If + // the number of hosted zones associated with this AWS account exceeds MaxItems, + // the value of ListHostedZonesByNameResponse$IsTruncated in the response is + // true. Call ListHostedZonesByName again and specify the value of ListHostedZonesByNameResponse$NextDNSName + // and ListHostedZonesByNameResponse$NextHostedZoneId elements respectively + // to get the next page of results. + MaxItems *string `type:"string" required:"true"` + + // If ListHostedZonesByNameResponse$IsTruncated is true, there are more hosted + // zones associated with the current AWS account. To get the next page of results, + // make another request to ListHostedZonesByName. Specify the value of ListHostedZonesByNameResponse$NextDNSName + // in the ListHostedZonesByNameRequest$DNSName element and ListHostedZonesByNameResponse$NextHostedZoneId + // in the ListHostedZonesByNameRequest$HostedZoneId element. + NextDNSName *string `type:"string"` + + // If ListHostedZonesByNameResponse$IsTruncated is true, there are more hosted + // zones associated with the current AWS account. To get the next page of results, + // make another request to ListHostedZonesByName. Specify the value of ListHostedZonesByNameResponse$NextDNSName + // in the ListHostedZonesByNameRequest$DNSName element and ListHostedZonesByNameResponse$NextHostedZoneId + // in the ListHostedZonesByNameRequest$HostedZoneId element. + NextHostedZoneId *string `type:"string"` +} + +// String returns the string representation +func (s ListHostedZonesByNameOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHostedZonesByNameOutput) GoString() string { + return s.String() +} + +// To retrieve a list of your hosted zones, send a GET request to the /Route +// 53 API version/hostedzone resource. The response to this request includes +// a HostedZones element with zero or more HostedZone child elements. By default, +// the list of hosted zones is displayed on a single page. You can control the +// length of the page that is displayed by using the MaxItems parameter. You +// can use the Marker parameter to control the hosted zone that the list begins +// with. For more information about listing hosted zones, see Listing the Hosted +// Zones for an AWS Account (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ListInfoOnHostedZone.html) +// in the Amazon Route 53 Developer Guide. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +type ListHostedZonesInput struct { + _ struct{} `type:"structure"` + + DelegationSetId *string `location:"querystring" locationName:"delegationsetid" type:"string"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Specify the maximum number of hosted zones to return per page of results. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` +} + +// String returns the string representation +func (s ListHostedZonesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHostedZonesInput) GoString() string { + return s.String() +} + +// A complex type that contains the response for the request. +type ListHostedZonesOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the hosted zones associated + // with the current AWS account. + HostedZones []*HostedZone `locationNameList:"HostedZone" type:"list" required:"true"` + + // A flag indicating whether there are more hosted zones to be listed. If your + // results were truncated, you can make a follow-up request for the next page + // of results by using the Marker element. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `type:"string" required:"true"` + + // The maximum number of hosted zones to be included in the response body. If + // the number of hosted zones associated with this AWS account exceeds MaxItems, + // the value of ListHostedZonesResponse$IsTruncated in the response is true. + // Call ListHostedZones again and specify the value of ListHostedZonesResponse$NextMarker + // in the ListHostedZonesRequest$Marker element to get the next page of results. + MaxItems *string `type:"string" required:"true"` + + // Indicates where to continue listing hosted zones. If ListHostedZonesResponse$IsTruncated + // is true, make another request to ListHostedZones and include the value of + // the NextMarker element in the Marker element to get the next page of results. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListHostedZonesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHostedZonesOutput) GoString() string { + return s.String() +} + +// The input for a ListResourceRecordSets request. +type ListResourceRecordSetsInput struct { + _ struct{} `type:"structure"` + + // The ID of the hosted zone that contains the resource record sets that you + // want to get. + HostedZoneId *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The maximum number of records you want in the response body. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // Weighted resource record sets only: If results were truncated for a given + // DNS name and type, specify the value of ListResourceRecordSetsResponse$NextRecordIdentifier + // from the previous response to get the next resource record set that has the + // current DNS name and type. + StartRecordIdentifier *string `location:"querystring" locationName:"identifier" min:"1" type:"string"` + + // The first name in the lexicographic ordering of domain names that you want + // the ListResourceRecordSets request to list. + StartRecordName *string `location:"querystring" locationName:"name" type:"string"` + + // The DNS type at which to begin the listing of resource record sets. + // + // Valid values: A | AAAA | CNAME | MX | NS | PTR | SOA | SPF | SRV | TXT + // + // Values for Weighted Resource Record Sets: A | AAAA | CNAME | TXT + // + // Values for Regional Resource Record Sets: A | AAAA | CNAME | TXT + // + // Values for Alias Resource Record Sets: A | AAAA + // + // Constraint: Specifying type without specifying name returns an InvalidInput + // error. + StartRecordType *string `location:"querystring" locationName:"type" type:"string" enum:"RRType"` +} + +// String returns the string representation +func (s ListResourceRecordSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListResourceRecordSetsInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the resource record sets that +// are returned by the request and information about the response. +type ListResourceRecordSetsOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more resource record sets to be listed. + // If your results were truncated, you can make a follow-up request for the + // next page of results by using the ListResourceRecordSetsResponse$NextRecordName + // element. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The maximum number of records you requested. The maximum value of MaxItems + // is 100. + MaxItems *string `type:"string" required:"true"` + + // Weighted resource record sets only: If results were truncated for a given + // DNS name and type, the value of SetIdentifier for the next resource record + // set that has the current DNS name and type. + NextRecordIdentifier *string `min:"1" type:"string"` + + // If the results were truncated, the name of the next record in the list. This + // element is present only if ListResourceRecordSetsResponse$IsTruncated is + // true. + NextRecordName *string `type:"string"` + + // If the results were truncated, the type of the next record in the list. This + // element is present only if ListResourceRecordSetsResponse$IsTruncated is + // true. + NextRecordType *string `type:"string" enum:"RRType"` + + // A complex type that contains information about the resource record sets that + // are returned by the request. + ResourceRecordSets []*ResourceRecordSet `locationNameList:"ResourceRecordSet" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListResourceRecordSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListResourceRecordSetsOutput) GoString() string { + return s.String() +} + +// To retrieve a list of your reusable delegation sets, send a GET request to +// the /Route 53 API version/delegationset resource. The response to this request +// includes a DelegationSets element with zero or more DelegationSet child elements. +// By default, the list of reusable delegation sets is displayed on a single +// page. You can control the length of the page that is displayed by using the +// MaxItems parameter. You can use the Marker parameter to control the delegation +// set that the list begins with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +type ListReusableDelegationSetsInput struct { + _ struct{} `type:"structure"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Specify the maximum number of reusable delegation sets to return per page + // of results. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` +} + +// String returns the string representation +func (s ListReusableDelegationSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListReusableDelegationSetsInput) GoString() string { + return s.String() +} + +// A complex type that contains the response for the request. +type ListReusableDelegationSetsOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the reusable delegation sets + // associated with the current AWS account. + DelegationSets []*DelegationSet `locationNameList:"DelegationSet" type:"list" required:"true"` + + // A flag indicating whether there are more reusable delegation sets to be listed. + // If your results were truncated, you can make a follow-up request for the + // next page of results by using the Marker element. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `type:"string" required:"true"` + + // The maximum number of reusable delegation sets to be included in the response + // body. If the number of reusable delegation sets associated with this AWS + // account exceeds MaxItems, the value of ListReusablDelegationSetsResponse$IsTruncated + // in the response is true. Call ListReusableDelegationSets again and specify + // the value of ListReusableDelegationSetsResponse$NextMarker in the ListReusableDelegationSetsRequest$Marker + // element to get the next page of results. + MaxItems *string `type:"string" required:"true"` + + // Indicates where to continue listing reusable delegation sets. If ListReusableDelegationSetsResponse$IsTruncated + // is true, make another request to ListReusableDelegationSets and include the + // value of the NextMarker element in the Marker element to get the next page + // of results. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListReusableDelegationSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListReusableDelegationSetsOutput) GoString() string { + return s.String() +} + +// A complex type containing information about a request for a list of the tags +// that are associated with an individual resource. +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The ID of the resource for which you want to retrieve tags. + ResourceId *string `location:"uri" locationName:"ResourceId" type:"string" required:"true"` + + // The type of the resource. + // + // - The resource type for health checks is healthcheck. + // + // - The resource type for hosted zones is hostedzone. + ResourceType *string `location:"uri" locationName:"ResourceType" type:"string" required:"true" enum:"TagResourceType"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// A complex type containing tags for the specified resource. +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // A ResourceTagSet containing tags associated with the specified resource. + ResourceTagSet *ResourceTagSet `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// A complex type containing information about a request for a list of the tags +// that are associated with up to 10 specified resources. +type ListTagsForResourcesInput struct { + _ struct{} `locationName:"ListTagsForResourcesRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A complex type that contains the ResourceId element for each resource for + // which you want to get a list of tags. + ResourceIds []*string `locationNameList:"ResourceId" min:"1" type:"list" required:"true"` + + // The type of the resources. + // + // - The resource type for health checks is healthcheck. + // + // - The resource type for hosted zones is hostedzone. + ResourceType *string `location:"uri" locationName:"ResourceType" type:"string" required:"true" enum:"TagResourceType"` +} + +// String returns the string representation +func (s ListTagsForResourcesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourcesInput) GoString() string { + return s.String() +} + +// A complex type containing tags for the specified resources. +type ListTagsForResourcesOutput struct { + _ struct{} `type:"structure"` + + // A list of ResourceTagSets containing tags associated with the specified resources. + ResourceTagSets []*ResourceTagSet `locationNameList:"ResourceTagSet" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourcesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourcesOutput) GoString() string { + return s.String() +} + +// A complex type that contains the information about the request to list the +// traffic policies that are associated with the current AWS account. +type ListTrafficPoliciesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of traffic policies to be included in the response body + // for this request. If you have more than MaxItems traffic policies, the value + // of the IsTruncated element in the response is true, and the value of the + // TrafficPolicyIdMarker element is the ID of the first traffic policy in the + // next group of MaxItems traffic policies. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // For your first request to ListTrafficPolicies, do not include the TrafficPolicyIdMarker + // parameter. + // + // If you have more traffic policies than the value of MaxItems, ListTrafficPolicies + // returns only the first MaxItems traffic policies. To get the next group of + // MaxItems policies, submit another request to ListTrafficPolicies. For the + // value of TrafficPolicyIdMarker, specify the value of the TrafficPolicyIdMarker + // element that was returned in the previous response. + // + // Policies are listed in the order in which they were created. + TrafficPolicyIdMarker *string `location:"querystring" locationName:"trafficpolicyid" type:"string"` +} + +// String returns the string representation +func (s ListTrafficPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPoliciesInput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the request. +type ListTrafficPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more traffic policies to be listed. + // If the response was truncated, you can get the next group of MaxItems traffic + // policies by calling ListTrafficPolicies again and specifying the value of + // the TrafficPolicyIdMarker element in the TrafficPolicyIdMarker request parameter. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The value that you specified for the MaxItems parameter in the call to ListTrafficPolicies + // that produced the current response. + MaxItems *string `type:"string" required:"true"` + + // If the value of IsTruncated is true, TrafficPolicyIdMarker is the ID of the + // first traffic policy in the next group of MaxItems traffic policies. + TrafficPolicyIdMarker *string `type:"string" required:"true"` + + // A list that contains one TrafficPolicySummary element for each traffic policy + // that was created by the current AWS account. + TrafficPolicySummaries []*TrafficPolicySummary `locationNameList:"TrafficPolicySummary" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTrafficPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPoliciesOutput) GoString() string { + return s.String() +} + +// A request for the traffic policy instances that you created in a specified +// hosted zone. +type ListTrafficPolicyInstancesByHostedZoneInput struct { + _ struct{} `type:"structure"` + + // The ID of the hosted zone for which you want to list traffic policy instances. + HostedZoneId *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // The maximum number of traffic policy instances to be included in the response + // body for this request. If you have more than MaxItems traffic policy instances, + // the value of the IsTruncated element in the response is true, and the values + // of HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker + // represent the first traffic policy instance in the next group of MaxItems + // traffic policy instances. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // For the first request to ListTrafficPolicyInstancesByHostedZone, omit this + // value. + // + // If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceNameMarker + // is the name of the first traffic policy instance in the next group of MaxItems + // traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get for this hosted zone. + // + // If the value of IsTruncated in the previous response was false, omit this + // value. + TrafficPolicyInstanceNameMarker *string `location:"querystring" locationName:"trafficpolicyinstancename" type:"string"` + + // For the first request to ListTrafficPolicyInstancesByHostedZone, omit this + // value. + // + // If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceTypeMarker + // is the DNS type of the first traffic policy instance in the next group of + // MaxItems traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get for this hosted zone. + TrafficPolicyInstanceTypeMarker *string `location:"querystring" locationName:"trafficpolicyinstancetype" type:"string" enum:"RRType"` +} + +// String returns the string representation +func (s ListTrafficPolicyInstancesByHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyInstancesByHostedZoneInput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the request. +type ListTrafficPolicyInstancesByHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more traffic policy instances to + // be listed. If the response was truncated, you can get the next group of MaxItems + // traffic policy instances by calling ListTrafficPolicyInstancesByHostedZone + // again and specifying the values of the HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, + // and TrafficPolicyInstanceTypeMarker elements in the corresponding request + // parameters. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The value that you specified for the MaxItems parameter in the call to ListTrafficPolicyInstancesByHostedZone + // that produced the current response. + MaxItems *string `type:"string" required:"true"` + + // If IsTruncated is true, TrafficPolicyInstanceNameMarker is the name of the + // first traffic policy instance in the next group of MaxItems traffic policy + // instances. + TrafficPolicyInstanceNameMarker *string `type:"string"` + + // If IsTruncated is true, TrafficPolicyInstanceTypeMarker is the DNS type of + // the resource record sets that are associated with the first traffic policy + // instance in the next group of MaxItems traffic policy instances. + TrafficPolicyInstanceTypeMarker *string `type:"string" enum:"RRType"` + + // A list that contains one TrafficPolicyInstance element for each traffic policy + // instance that matches the elements in the request. + TrafficPolicyInstances []*TrafficPolicyInstance `locationNameList:"TrafficPolicyInstance" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTrafficPolicyInstancesByHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyInstancesByHostedZoneOutput) GoString() string { + return s.String() +} + +// A complex type that contains the information about the request to list your +// traffic policy instances. +type ListTrafficPolicyInstancesByPolicyInput struct { + _ struct{} `type:"structure"` + + // For the first request to ListTrafficPolicyInstancesByPolicy, omit this value. + // + // If the value of IsTruncated in the previous response was true, HostedZoneIdMarker + // is the ID of the hosted zone for the first traffic policy instance in the + // next group of MaxItems traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get for this hosted zone. + // + // If the value of IsTruncated in the previous response was false, omit this + // value. + HostedZoneIdMarker *string `location:"querystring" locationName:"hostedzoneid" type:"string"` + + // The maximum number of traffic policy instances to be included in the response + // body for this request. If you have more than MaxItems traffic policy instances, + // the value of the IsTruncated element in the response is true, and the values + // of HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker + // represent the first traffic policy instance in the next group of MaxItems + // traffic policy instances. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // The ID of the traffic policy for which you want to list traffic policy instances. + TrafficPolicyId *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // For the first request to ListTrafficPolicyInstancesByPolicy, omit this value. + // + // If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceNameMarker + // is the name of the first traffic policy instance in the next group of MaxItems + // traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get for this hosted zone. + // + // If the value of IsTruncated in the previous response was false, omit this + // value. + TrafficPolicyInstanceNameMarker *string `location:"querystring" locationName:"trafficpolicyinstancename" type:"string"` + + // For the first request to ListTrafficPolicyInstancesByPolicy, omit this value. + // + // If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceTypeMarker + // is the DNS type of the first traffic policy instance in the next group of + // MaxItems traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get for this hosted zone. + TrafficPolicyInstanceTypeMarker *string `location:"querystring" locationName:"trafficpolicyinstancetype" type:"string" enum:"RRType"` + + // The version of the traffic policy for which you want to list traffic policy + // instances. The version must be associated with the traffic policy that is + // specified by TrafficPolicyId. + TrafficPolicyVersion *int64 `location:"querystring" locationName:"version" min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s ListTrafficPolicyInstancesByPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyInstancesByPolicyInput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the request. +type ListTrafficPolicyInstancesByPolicyOutput struct { + _ struct{} `type:"structure"` + + // If IsTruncated is true, HostedZoneIdMarker is the ID of the hosted zone of + // the first traffic policy instance in the next group of MaxItems traffic policy + // instances. + HostedZoneIdMarker *string `type:"string"` + + // A flag that indicates whether there are more traffic policy instances to + // be listed. If the response was truncated, you can get the next group of MaxItems + // traffic policy instances by calling ListTrafficPolicyInstancesByPolicy again + // and specifying the values of the HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, + // and TrafficPolicyInstanceTypeMarker elements in the corresponding request + // parameters. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The value that you specified for the MaxItems parameter in the call to ListTrafficPolicyInstancesByPolicy + // that produced the current response. + MaxItems *string `type:"string" required:"true"` + + // If IsTruncated is true, TrafficPolicyInstanceNameMarker is the name of the + // first traffic policy instance in the next group of MaxItems traffic policy + // instances. + TrafficPolicyInstanceNameMarker *string `type:"string"` + + // If IsTruncated is true, TrafficPolicyInstanceTypeMarker is the DNS type of + // the resource record sets that are associated with the first traffic policy + // instance in the next group of MaxItems traffic policy instances. + TrafficPolicyInstanceTypeMarker *string `type:"string" enum:"RRType"` + + // A list that contains one TrafficPolicyInstance element for each traffic policy + // instance that matches the elements in the request. + TrafficPolicyInstances []*TrafficPolicyInstance `locationNameList:"TrafficPolicyInstance" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTrafficPolicyInstancesByPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyInstancesByPolicyOutput) GoString() string { + return s.String() +} + +// A complex type that contains the information about the request to list your +// traffic policy instances. +type ListTrafficPolicyInstancesInput struct { + _ struct{} `type:"structure"` + + // For the first request to ListTrafficPolicyInstances, omit this value. + // + // If the value of IsTruncated in the previous response was true, you have + // more traffic policy instances. To get the next group of MaxItems traffic + // policy instances, submit another ListTrafficPolicyInstances request. For + // the value of HostedZoneIdMarker, specify the value of HostedZoneIdMarker + // from the previous response, which is the hosted zone ID of the first traffic + // policy instance in the next group of MaxItems traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get. + HostedZoneIdMarker *string `location:"querystring" locationName:"hostedzoneid" type:"string"` + + // The maximum number of traffic policy instances to be included in the response + // body for this request. If you have more than MaxItems traffic policy instances, + // the value of the IsTruncated element in the response is true, and the values + // of HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker + // represent the first traffic policy instance in the next group of MaxItems + // traffic policy instances. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // For the first request to ListTrafficPolicyInstances, omit this value. + // + // If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceNameMarker + // is the name of the first traffic policy instance in the next group of MaxItems + // traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get. + TrafficPolicyInstanceNameMarker *string `location:"querystring" locationName:"trafficpolicyinstancename" type:"string"` + + // For the first request to ListTrafficPolicyInstances, omit this value. + // + // If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceTypeMarker + // is the DNS type of the first traffic policy instance in the next group of + // MaxItems traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get. + TrafficPolicyInstanceTypeMarker *string `location:"querystring" locationName:"trafficpolicyinstancetype" type:"string" enum:"RRType"` +} + +// String returns the string representation +func (s ListTrafficPolicyInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyInstancesInput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the request. +type ListTrafficPolicyInstancesOutput struct { + _ struct{} `type:"structure"` + + // If IsTruncated is true, HostedZoneIdMarker is the ID of the hosted zone of + // the first traffic policy instance in the next group of MaxItems traffic policy + // instances. + HostedZoneIdMarker *string `type:"string"` + + // A flag that indicates whether there are more traffic policy instances to + // be listed. If the response was truncated, you can get the next group of MaxItems + // traffic policy instances by calling ListTrafficPolicyInstances again and + // specifying the values of the HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, + // and TrafficPolicyInstanceTypeMarker elements in the corresponding request + // parameters. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The value that you specified for the MaxItems parameter in the call to ListTrafficPolicyInstances + // that produced the current response. + MaxItems *string `type:"string" required:"true"` + + // If IsTruncated is true, TrafficPolicyInstanceNameMarker is the name of the + // first traffic policy instance in the next group of MaxItems traffic policy + // instances. + TrafficPolicyInstanceNameMarker *string `type:"string"` + + // If IsTruncated is true, TrafficPolicyInstanceTypeMarker is the DNS type of + // the resource record sets that are associated with the first traffic policy + // instance in the next group of MaxItems traffic policy instances. + TrafficPolicyInstanceTypeMarker *string `type:"string" enum:"RRType"` + + // A list that contains one TrafficPolicyInstance element for each traffic policy + // instance that matches the elements in the request. + TrafficPolicyInstances []*TrafficPolicyInstance `locationNameList:"TrafficPolicyInstance" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTrafficPolicyInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyInstancesOutput) GoString() string { + return s.String() +} + +// A complex type that contains the information about the request to list your +// traffic policies. +type ListTrafficPolicyVersionsInput struct { + _ struct{} `type:"structure"` + + // Specify the value of Id of the traffic policy for which you want to list + // all versions. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The maximum number of traffic policy versions that you want Amazon Route + // 53 to include in the response body for this request. If the specified traffic + // policy has more than MaxItems versions, the value of the IsTruncated element + // in the response is true, and the value of the TrafficPolicyVersionMarker + // element is the ID of the first version in the next group of MaxItems traffic + // policy versions. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // For your first request to ListTrafficPolicyVersions, do not include the TrafficPolicyVersionMarker + // parameter. + // + // If you have more traffic policy versions than the value of MaxItems, ListTrafficPolicyVersions + // returns only the first group of MaxItems versions. To get the next group + // of MaxItems traffic policy versions, submit another request to ListTrafficPolicyVersions. + // For the value of TrafficPolicyVersionMarker, specify the value of the TrafficPolicyVersionMarker + // element that was returned in the previous response. + // + // Traffic policy versions are listed in sequential order. + TrafficPolicyVersionMarker *string `location:"querystring" locationName:"trafficpolicyversion" type:"string"` +} + +// String returns the string representation +func (s ListTrafficPolicyVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyVersionsInput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the request. +type ListTrafficPolicyVersionsOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more traffic policies to be listed. + // If the response was truncated, you can get the next group of maxitems traffic + // policies by calling ListTrafficPolicyVersions again and specifying the value + // of the NextMarker element in the marker parameter. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The value that you specified for the maxitems parameter in the call to ListTrafficPolicyVersions + // that produced the current response. + MaxItems *string `type:"string" required:"true"` + + // A list that contains one TrafficPolicy element for each traffic policy version + // that is associated with the specified traffic policy. + TrafficPolicies []*TrafficPolicy `locationNameList:"TrafficPolicy" type:"list" required:"true"` + + // If IsTruncated is true, the value of TrafficPolicyVersionMarker identifies + // the first traffic policy in the next group of MaxItems traffic policies. + // Call ListTrafficPolicyVersions again and specify the value of TrafficPolicyVersionMarker + // in the TrafficPolicyVersionMarker request parameter. + // + // This element is present only if IsTruncated is true. + TrafficPolicyVersionMarker *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTrafficPolicyVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyVersionsOutput) GoString() string { + return s.String() +} + +// A complex type that contains the value of the Value element for the current +// resource record set. +type ResourceRecord struct { + _ struct{} `type:"structure"` + + // The current or new DNS record value, not to exceed 4,000 characters. In the + // case of a DELETE action, if the current value does not match the actual value, + // an error is returned. For descriptions about how to format Value for different + // record types, see Supported DNS Resource Record Types (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) + // in the Amazon Route 53 Developer Guide. + // + // You can specify more than one value for all record types except CNAME and + // SOA. + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ResourceRecord) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceRecord) GoString() string { + return s.String() +} + +// A complex type that contains information about the current resource record +// set. +type ResourceRecordSet struct { + _ struct{} `type:"structure"` + + // Alias resource record sets only: Information about the AWS resource to which + // you are redirecting traffic. + AliasTarget *AliasTarget `type:"structure"` + + // Failover resource record sets only: To configure failover, you add the Failover + // element to two resource record sets. For one resource record set, you specify + // PRIMARY as the value for Failover; for the other resource record set, you + // specify SECONDARY. In addition, you include the HealthCheckId element and + // specify the health check that you want Amazon Route 53 to perform for each + // resource record set. + // + // You can create failover and failover alias resource record sets only in + // public hosted zones. Except where noted, the following failover behaviors + // assume that you have included the HealthCheckId element in both resource + // record sets: + // + // When the primary resource record set is healthy, Amazon Route 53 responds + // to DNS queries with the applicable value from the primary resource record + // set regardless of the health of the secondary resource record set. When the + // primary resource record set is unhealthy and the secondary resource record + // set is healthy, Amazon Route 53 responds to DNS queries with the applicable + // value from the secondary resource record set. When the secondary resource + // record set is unhealthy, Amazon Route 53 responds to DNS queries with the + // applicable value from the primary resource record set regardless of the health + // of the primary resource record set. If you omit the HealthCheckId element + // for the secondary resource record set, and if the primary resource record + // set is unhealthy, Amazon Route 53 always responds to DNS queries with the + // applicable value from the secondary resource record set. This is true regardless + // of the health of the associated endpoint. You cannot create non-failover + // resource record sets that have the same values for the Name and Type elements + // as failover resource record sets. + // + // For failover alias resource record sets, you must also include the EvaluateTargetHealth + // element and set the value to true. + // + // For more information about configuring failover for Amazon Route 53, see + // Amazon Route 53 Health Checks and DNS Failover (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html) + // in the Amazon Route 53 Developer Guide. + // + // Valid values: PRIMARY | SECONDARY + Failover *string `type:"string" enum:"ResourceRecordSetFailover"` + + // Geo location resource record sets only: A complex type that lets you control + // how Amazon Route 53 responds to DNS queries based on the geographic origin + // of the query. For example, if you want all queries from Africa to be routed + // to a web server with an IP address of 192.0.2.111, create a resource record + // set with a Type of A and a ContinentCode of AF. + // + // You can create geolocation and geolocation alias resource record sets only + // in public hosted zones. If you create separate resource record sets for overlapping + // geographic regions (for example, one resource record set for a continent + // and one for a country on the same continent), priority goes to the smallest + // geographic region. This allows you to route most queries for a continent + // to one resource and to route queries for a country on that continent to a + // different resource. + // + // You cannot create two geolocation resource record sets that specify the + // same geographic location. + // + // The value * in the CountryCode element matches all geographic locations + // that aren't specified in other geolocation resource record sets that have + // the same values for the Name and Type elements. + // + // Geolocation works by mapping IP addresses to locations. However, some IP + // addresses aren't mapped to geographic locations, so even if you create geolocation + // resource record sets that cover all seven continents, Amazon Route 53 will + // receive some DNS queries from locations that it can't identify. We recommend + // that you create a resource record set for which the value of CountryCode + // is *, which handles both queries that come from locations for which you haven't + // created geolocation resource record sets and queries from IP addresses that + // aren't mapped to a location. If you don't create a * resource record set, + // Amazon Route 53 returns a "no answer" response for queries from those locations. + // You cannot create non-geolocation resource record sets that have the same + // values for the Name and Type elements as geolocation resource record sets. + GeoLocation *GeoLocation `type:"structure"` + + // Health Check resource record sets only, not required for alias resource record + // sets: An identifier that is used to identify health check associated with + // the resource record set. + HealthCheckId *string `type:"string"` + + // The name of the domain you want to perform the action on. + // + // Enter a fully qualified domain name, for example, www.example.com. You can + // optionally include a trailing dot. If you omit the trailing dot, Amazon Route + // 53 still assumes that the domain name that you specify is fully qualified. + // This means that Amazon Route 53 treats www.example.com (without a trailing + // dot) and www.example.com. (with a trailing dot) as identical. + // + // For information about how to specify characters other than a-z, 0-9, and + // - (hyphen) and how to specify internationalized domain names, see DNS Domain + // Name Format (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DomainNameFormat.html) + // in the Amazon Route 53 Developer Guide. + // + // You can use an asterisk (*) character in the name. DNS treats the * character + // either as a wildcard or as the * character (ASCII 42), depending on where + // it appears in the name. For more information, see Using an Asterisk (*) in + // the Names of Hosted Zones and Resource Record Sets (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DomainNameFormat.html#domain-name-format-asterisk) + // in the Amazon Route 53 Developer Guide + // + // You can't use the * wildcard for resource records sets that have a type + // of NS. + Name *string `type:"string" required:"true"` + + // Latency-based resource record sets only: The Amazon EC2 region where the + // resource that is specified in this resource record set resides. The resource + // typically is an AWS resource, such as an Amazon EC2 instance or an ELB load + // balancer, and is referred to by an IP address or a DNS domain name, depending + // on the record type. + // + // You can create latency and latency alias resource record sets only in public + // hosted zones. When Amazon Route 53 receives a DNS query for a domain name + // and type for which you have created latency resource record sets, Amazon + // Route 53 selects the latency resource record set that has the lowest latency + // between the end user and the associated Amazon EC2 region. Amazon Route 53 + // then returns the value that is associated with the selected resource record + // set. + // + // Note the following: + // + // You can only specify one ResourceRecord per latency resource record set. + // You can only create one latency resource record set for each Amazon EC2 region. + // You are not required to create latency resource record sets for all Amazon + // EC2 regions. Amazon Route 53 will choose the region with the best latency + // from among the regions for which you create latency resource record sets. + // You cannot create non-latency resource record sets that have the same values + // for the Name and Type elements as latency resource record sets. + Region *string `min:"1" type:"string" enum:"ResourceRecordSetRegion"` + + // A complex type that contains the resource records for the current resource + // record set. + ResourceRecords []*ResourceRecord `locationNameList:"ResourceRecord" min:"1" type:"list"` + + // Weighted, Latency, Geo, and Failover resource record sets only: An identifier + // that differentiates among multiple resource record sets that have the same + // combination of DNS name and type. The value of SetIdentifier must be unique + // for each resource record set that has the same combination of DNS name and + // type. + SetIdentifier *string `min:"1" type:"string"` + + // The cache time to live for the current resource record set. Note the following: + // + // If you're creating an alias resource record set, omit TTL. Amazon Route + // 53 uses the value of TTL for the alias target. If you're associating this + // resource record set with a health check (if you're adding a HealthCheckId + // element), we recommend that you specify a TTL of 60 seconds or less so clients + // respond quickly to changes in health status. All of the resource record sets + // in a group of weighted, latency, geolocation, or failover resource record + // sets must have the same value for TTL. If a group of weighted resource record + // sets includes one or more weighted alias resource record sets for which the + // alias target is an ELB load balancer, we recommend that you specify a TTL + // of 60 seconds for all of the non-alias weighted resource record sets that + // have the same name and type. Values other than 60 seconds (the TTL for load + // balancers) will change the effect of the values that you specify for Weight. + TTL *int64 `type:"long"` + + TrafficPolicyInstanceId *string `type:"string"` + + // The DNS record type. For information about different record types and how + // data is encoded for them, see Supported DNS Resource Record Types (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) + // in the Amazon Route 53 Developer Guide. + // + // Valid values for basic resource record sets: A | AAAA | CNAME | MX | NS + // | PTR | SOA | SPF | SRV | TXT + // + // Values for weighted, latency, geolocation, and failover resource record + // sets: A | AAAA | CNAME | MX | PTR | SPF | SRV | TXT. When creating a group + // of weighted, latency, geolocation, or failover resource record sets, specify + // the same value for all of the resource record sets in the group. + // + // SPF records were formerly used to verify the identity of the sender of email + // messages. However, we no longer recommend that you create resource record + // sets for which the value of Type is SPF. RFC 7208, Sender Policy Framework + // (SPF) for Authorizing Use of Domains in Email, Version 1, has been updated + // to say, "...[I]ts existence and mechanism defined in [RFC4408] have led to + // some interoperability issues. Accordingly, its use is no longer appropriate + // for SPF version 1; implementations are not to use it." In RFC 7208, see section + // 14.1, The SPF DNS Record Type (http://tools.ietf.org/html/rfc7208#section-14.1). + // Values for alias resource record sets: + // + // CloudFront distributions: A ELB load balancers: A | AAAA Amazon S3 buckets: + // A Another resource record set in this hosted zone: Specify the type of the + // resource record set for which you're creating the alias. Specify any value + // except NS or SOA. + Type *string `type:"string" required:"true" enum:"RRType"` + + // Weighted resource record sets only: Among resource record sets that have + // the same combination of DNS name and type, a value that determines the proportion + // of DNS queries that Amazon Route 53 responds to using the current resource + // record set. Amazon Route 53 calculates the sum of the weights for the resource + // record sets that have the same combination of DNS name and type. Amazon Route + // 53 then responds to queries based on the ratio of a resource's weight to + // the total. Note the following: + // + // You must specify a value for the Weight element for every weighted resource + // record set. You can only specify one ResourceRecord per weighted resource + // record set. You cannot create latency, failover, or geolocation resource + // record sets that have the same values for the Name and Type elements as weighted + // resource record sets. You can create a maximum of 100 weighted resource record + // sets that have the same values for the Name and Type elements. For weighted + // (but not weighted alias) resource record sets, if you set Weight to 0 for + // a resource record set, Amazon Route 53 never responds to queries with the + // applicable value for that resource record set. However, if you set Weight + // to 0 for all resource record sets that have the same combination of DNS name + // and type, traffic is routed to all resources with equal probability. + // + // The effect of setting Weight to 0 is different when you associate health + // checks with weighted resource record sets. For more information, see Options + // for Configuring Amazon Route 53 Active-Active and Active-Passive Failover + // (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-configuring-options.html) + // in the Amazon Route 53 Developer Guide. + Weight *int64 `type:"long"` +} + +// String returns the string representation +func (s ResourceRecordSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceRecordSet) GoString() string { + return s.String() +} + +// A complex type containing a resource and its associated tags. +type ResourceTagSet struct { + _ struct{} `type:"structure"` + + // The ID for the specified resource. + ResourceId *string `type:"string"` + + // The type of the resource. + // + // - The resource type for health checks is healthcheck. + // + // - The resource type for hosted zones is hostedzone. + ResourceType *string `type:"string" enum:"TagResourceType"` + + // The tags associated with the specified resource. + Tags []*Tag `locationNameList:"Tag" min:"1" type:"list"` +} + +// String returns the string representation +func (s ResourceTagSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceTagSet) GoString() string { + return s.String() +} + +// A complex type that contains information about the health check status for +// the current observation. +type StatusReport struct { + _ struct{} `type:"structure"` + + // The date and time the health check status was observed, in the format YYYY-MM-DDThh:mm:ssZ, + // as specified in the ISO 8601 standard (for example, 2009-11-19T19:37:58Z). + // The Z after the time indicates that the time is listed in Coordinated Universal + // Time (UTC). + CheckedTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The observed health check status. + Status *string `type:"string"` +} + +// String returns the string representation +func (s StatusReport) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StatusReport) GoString() string { + return s.String() +} + +// A single tag containing a key and value. +type Tag struct { + _ struct{} `type:"structure"` + + // The key for a Tag. + Key *string `type:"string"` + + // The value for a Tag. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +type TrafficPolicy struct { + _ struct{} `type:"structure"` + + Comment *string `type:"string"` + + Document *string `type:"string" required:"true"` + + Id *string `type:"string" required:"true"` + + Name *string `type:"string" required:"true"` + + Type *string `type:"string" required:"true" enum:"RRType"` + + Version *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s TrafficPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrafficPolicy) GoString() string { + return s.String() +} + +type TrafficPolicyInstance struct { + _ struct{} `type:"structure"` + + HostedZoneId *string `type:"string" required:"true"` + + Id *string `type:"string" required:"true"` + + Message *string `type:"string" required:"true"` + + Name *string `type:"string" required:"true"` + + State *string `type:"string" required:"true"` + + TTL *int64 `type:"long" required:"true"` + + TrafficPolicyId *string `type:"string" required:"true"` + + TrafficPolicyType *string `type:"string" required:"true" enum:"RRType"` + + TrafficPolicyVersion *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s TrafficPolicyInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrafficPolicyInstance) GoString() string { + return s.String() +} + +type TrafficPolicySummary struct { + _ struct{} `type:"structure"` + + Id *string `type:"string" required:"true"` + + LatestVersion *int64 `min:"1" type:"integer" required:"true"` + + Name *string `type:"string" required:"true"` + + TrafficPolicyCount *int64 `min:"1" type:"integer" required:"true"` + + Type *string `type:"string" required:"true" enum:"RRType"` +} + +// String returns the string representation +func (s TrafficPolicySummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrafficPolicySummary) GoString() string { + return s.String() +} + +// >A complex type that contains information about the request to update a health +// check. +type UpdateHealthCheckInput struct { + _ struct{} `locationName:"UpdateHealthCheckRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // For a specified parent health check, a list of HealthCheckId values for the + // associated child health checks. + // + // Specify this value only if you want to change it. + ChildHealthChecks []*string `locationNameList:"ChildHealthCheck" type:"list"` + + // Specify whether you want Amazon Route 53 to send the value of FullyQualifiedDomainName + // to the endpoint in the client_hello message during TLS negotiation. If you + // don't specify a value for EnableSNI, Amazon Route 53 defaults to true when + // Type is HTTPS or HTTPS_STR_MATCH and defaults to false when Type is any other + // value. + // + // Specify this value only if you want to change it. + EnableSNI *bool `type:"boolean"` + + // The number of consecutive health checks that an endpoint must pass or fail + // for Amazon Route 53 to change the current status of the endpoint from unhealthy + // to healthy or vice versa. + // + // Valid values are integers between 1 and 10. For more information, see "How + // Amazon Route 53 Determines Whether an Endpoint Is Healthy" in the Amazon + // Route 53 Developer Guide. + // + // Specify this value only if you want to change it. + FailureThreshold *int64 `min:"1" type:"integer"` + + // Fully qualified domain name of the instance to be health checked. + // + // Specify this value only if you want to change it. + FullyQualifiedDomainName *string `type:"string"` + + // The ID of the health check to update. + HealthCheckId *string `location:"uri" locationName:"HealthCheckId" type:"string" required:"true"` + + // Optional. When you specify a health check version, Amazon Route 53 compares + // this value with the current value in the health check, which prevents you + // from updating the health check when the versions don't match. Using HealthCheckVersion + // lets you prevent overwriting another change to the health check. + HealthCheckVersion *int64 `min:"1" type:"long"` + + // The minimum number of child health checks that must be healthy for Amazon + // Route 53 to consider the parent health check to be healthy. Valid values + // are integers between 0 and 256, inclusive. + // + // Specify this value only if you want to change it. + HealthThreshold *int64 `type:"integer"` + + // The IP address of the resource that you want to check. + // + // Specify this value only if you want to change it. + IPAddress *string `type:"string"` + + // A boolean value that indicates whether the status of health check should + // be inverted. For example, if a health check is healthy but Inverted is True, + // then Amazon Route 53 considers the health check to be unhealthy. + // + // Specify this value only if you want to change it. + Inverted *bool `type:"boolean"` + + // The port on which you want Amazon Route 53 to open a connection to perform + // health checks. + // + // Specify this value only if you want to change it. + Port *int64 `min:"1" type:"integer"` + + // The path that you want Amazon Route 53 to request when performing health + // checks. The path can be any value for which your endpoint will return an + // HTTP status code of 2xx or 3xx when the endpoint is healthy, for example + // the file /docs/route53-health-check.html. + // + // Specify this value only if you want to change it. + ResourcePath *string `type:"string"` + + // If the value of Type is HTTP_STR_MATCH or HTTP_STR_MATCH, the string that + // you want Amazon Route 53 to search for in the response body from the specified + // resource. If the string appears in the response body, Amazon Route 53 considers + // the resource healthy. Amazon Route 53 considers case when searching for SearchString + // in the response body. + // + // Specify this value only if you want to change it. + SearchString *string `type:"string"` +} + +// String returns the string representation +func (s UpdateHealthCheckInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateHealthCheckInput) GoString() string { + return s.String() +} + +type UpdateHealthCheckOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains identifying information about the health check. + HealthCheck *HealthCheck `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateHealthCheckOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateHealthCheckOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to update a hosted +// zone comment. +type UpdateHostedZoneCommentInput struct { + _ struct{} `locationName:"UpdateHostedZoneCommentRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A comment about your hosted zone. + Comment *string `type:"string"` + + // The ID of the hosted zone you want to update. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateHostedZoneCommentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateHostedZoneCommentInput) GoString() string { + return s.String() +} + +// A complex type containing information about the specified hosted zone after +// the update. +type UpdateHostedZoneCommentOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contain information about the specified hosted zone. + HostedZone *HostedZone `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateHostedZoneCommentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateHostedZoneCommentOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the traffic policy for which +// you want to update the comment. +type UpdateTrafficPolicyCommentInput struct { + _ struct{} `locationName:"UpdateTrafficPolicyCommentRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // The new comment for the specified traffic policy and version. + Comment *string `type:"string" required:"true"` + + // The value of Id for the traffic policy for which you want to update the comment. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The value of Version for the traffic policy for which you want to update + // the comment. + Version *int64 `location:"uri" locationName:"Version" min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s UpdateTrafficPolicyCommentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTrafficPolicyCommentInput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the traffic policy. +type UpdateTrafficPolicyCommentOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains settings for the specified traffic policy. + TrafficPolicy *TrafficPolicy `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateTrafficPolicyCommentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTrafficPolicyCommentOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the resource record sets that +// you want to update based on a specified traffic policy instance. +type UpdateTrafficPolicyInstanceInput struct { + _ struct{} `locationName:"UpdateTrafficPolicyInstanceRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // The ID of the traffic policy instance that you want to update. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The TTL that you want Amazon Route 53 to assign to all of the updated resource + // record sets. + TTL *int64 `type:"long" required:"true"` + + // The ID of the traffic policy that you want Amazon Route 53 to use to update + // resource record sets for the specified traffic policy instance. + TrafficPolicyId *string `type:"string" required:"true"` + + // The version of the traffic policy that you want Amazon Route 53 to use to + // update resource record sets for the specified traffic policy instance. + TrafficPolicyVersion *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s UpdateTrafficPolicyInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTrafficPolicyInstanceInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the resource record sets that +// Amazon Route 53 created based on a specified traffic policy. +type UpdateTrafficPolicyInstanceOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains settings for the updated traffic policy instance. + TrafficPolicyInstance *TrafficPolicyInstance `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateTrafficPolicyInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTrafficPolicyInstanceOutput) GoString() string { + return s.String() +} + +type VPC struct { + _ struct{} `type:"structure"` + + // A VPC ID + VPCId *string `type:"string"` + + VPCRegion *string `min:"1" type:"string" enum:"VPCRegion"` +} + +// String returns the string representation +func (s VPC) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VPC) GoString() string { + return s.String() +} + +const ( + // @enum ChangeAction + ChangeActionCreate = "CREATE" + // @enum ChangeAction + ChangeActionDelete = "DELETE" + // @enum ChangeAction + ChangeActionUpsert = "UPSERT" +) + +const ( + // @enum ChangeStatus + ChangeStatusPending = "PENDING" + // @enum ChangeStatus + ChangeStatusInsync = "INSYNC" +) + +const ( + // @enum HealthCheckType + HealthCheckTypeHttp = "HTTP" + // @enum HealthCheckType + HealthCheckTypeHttps = "HTTPS" + // @enum HealthCheckType + HealthCheckTypeHttpStrMatch = "HTTP_STR_MATCH" + // @enum HealthCheckType + HealthCheckTypeHttpsStrMatch = "HTTPS_STR_MATCH" + // @enum HealthCheckType + HealthCheckTypeTcp = "TCP" + // @enum HealthCheckType + HealthCheckTypeCalculated = "CALCULATED" +) + +const ( + // @enum RRType + RRTypeSoa = "SOA" + // @enum RRType + RRTypeA = "A" + // @enum RRType + RRTypeTxt = "TXT" + // @enum RRType + RRTypeNs = "NS" + // @enum RRType + RRTypeCname = "CNAME" + // @enum RRType + RRTypeMx = "MX" + // @enum RRType + RRTypePtr = "PTR" + // @enum RRType + RRTypeSrv = "SRV" + // @enum RRType + RRTypeSpf = "SPF" + // @enum RRType + RRTypeAaaa = "AAAA" +) + +const ( + // @enum ResourceRecordSetFailover + ResourceRecordSetFailoverPrimary = "PRIMARY" + // @enum ResourceRecordSetFailover + ResourceRecordSetFailoverSecondary = "SECONDARY" +) + +const ( + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionUsEast1 = "us-east-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionUsWest1 = "us-west-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionUsWest2 = "us-west-2" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionEuWest1 = "eu-west-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionEuCentral1 = "eu-central-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionApSoutheast1 = "ap-southeast-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionApSoutheast2 = "ap-southeast-2" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionApNortheast1 = "ap-northeast-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionApNortheast2 = "ap-northeast-2" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionSaEast1 = "sa-east-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionCnNorth1 = "cn-north-1" +) + +const ( + // @enum TagResourceType + TagResourceTypeHealthcheck = "healthcheck" + // @enum TagResourceType + TagResourceTypeHostedzone = "hostedzone" +) + +const ( + // @enum VPCRegion + VPCRegionUsEast1 = "us-east-1" + // @enum VPCRegion + VPCRegionUsWest1 = "us-west-1" + // @enum VPCRegion + VPCRegionUsWest2 = "us-west-2" + // @enum VPCRegion + VPCRegionEuWest1 = "eu-west-1" + // @enum VPCRegion + VPCRegionEuCentral1 = "eu-central-1" + // @enum VPCRegion + VPCRegionApSoutheast1 = "ap-southeast-1" + // @enum VPCRegion + VPCRegionApSoutheast2 = "ap-southeast-2" + // @enum VPCRegion + VPCRegionApNortheast1 = "ap-northeast-1" + // @enum VPCRegion + VPCRegionApNortheast2 = "ap-northeast-2" + // @enum VPCRegion + VPCRegionSaEast1 = "sa-east-1" + // @enum VPCRegion + VPCRegionCnNorth1 = "cn-north-1" +) diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/route53/customizations.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/route53/customizations.go new file mode 100644 index 0000000..91af196 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/route53/customizations.go @@ -0,0 +1,30 @@ +package route53 + +import ( + "regexp" + + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +func init() { + initClient = func(c *client.Client) { + c.Handlers.Build.PushBack(sanitizeURL) + } + + initRequest = func(r *request.Request) { + switch r.Operation.Name { + case opChangeResourceRecordSets: + r.Handlers.UnmarshalError.Remove(restxml.UnmarshalErrorHandler) + r.Handlers.UnmarshalError.PushBack(unmarshalChangeResourceRecordSetsError) + } + } +} + +var reSanitizeURL = regexp.MustCompile(`\/%2F\w+%2F`) + +func sanitizeURL(r *request.Request) { + r.HTTPRequest.URL.Opaque = + reSanitizeURL.ReplaceAllString(r.HTTPRequest.URL.Opaque, "/") +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/route53/service.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/route53/service.go new file mode 100644 index 0000000..f4a82bd --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/route53/service.go @@ -0,0 +1,86 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package route53 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/restxml" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Route53 is a client for Route 53. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type Route53 struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "route53" + +// New creates a new instance of the Route53 client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a Route53 client from just a session. +// svc := route53.New(mySession) +// +// // Create a Route53 client with additional configuration +// svc := route53.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *Route53 { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *Route53 { + svc := &Route53{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2013-04-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a Route53 operation and runs any +// custom request initialization. +func (c *Route53) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/route53/unmarshal_error.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/route53/unmarshal_error.go new file mode 100644 index 0000000..e91375d --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/route53/unmarshal_error.go @@ -0,0 +1,77 @@ +package route53 + +import ( + "bytes" + "encoding/xml" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +type baseXMLErrorResponse struct { + XMLName xml.Name +} + +type standardXMLErrorResponse struct { + XMLName xml.Name `xml:"ErrorResponse"` + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +type invalidChangeBatchXMLErrorResponse struct { + XMLName xml.Name `xml:"InvalidChangeBatch"` + Messages []string `xml:"Messages>Message"` +} + +func unmarshalChangeResourceRecordSetsError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + responseBody, err := ioutil.ReadAll(r.HTTPResponse.Body) + + if err != nil { + r.Error = awserr.New("SerializationError", "failed to read Route53 XML error response", err) + return + } + + baseError := &baseXMLErrorResponse{} + + if err := xml.Unmarshal(responseBody, baseError); err != nil { + r.Error = awserr.New("SerializationError", "failed to decode Route53 XML error response", err) + return + } + + switch baseError.XMLName.Local { + case "InvalidChangeBatch": + unmarshalInvalidChangeBatchError(r, responseBody) + default: + r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewReader(responseBody)) + restxml.UnmarshalError(r) + } +} + +func unmarshalInvalidChangeBatchError(r *request.Request, requestBody []byte) { + resp := &invalidChangeBatchXMLErrorResponse{} + err := xml.Unmarshal(requestBody, resp) + + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode query XML error response", err) + return + } + + const errorCode = "InvalidChangeBatch" + errors := []error{} + + for _, msg := range resp.Messages { + errors = append(errors, awserr.New(errorCode, msg, nil)) + } + + r.Error = awserr.NewRequestFailure( + awserr.NewBatchError(errorCode, "ChangeBatch errors occured", errors), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/route53/waiters.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/route53/waiters.go new file mode 100644 index 0000000..0478616 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/route53/waiters.go @@ -0,0 +1,30 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package route53 + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *Route53) WaitUntilResourceRecordSetsChanged(input *GetChangeInput) error { + waiterCfg := waiter.Config{ + Operation: "GetChange", + Delay: 30, + MaxAttempts: 60, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "ChangeInfo.Status", + Expected: "INSYNC", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/.gitignore b/Godeps/_workspace/src/github.com/go-ini/ini/.gitignore new file mode 100644 index 0000000..7adca94 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/.gitignore @@ -0,0 +1,4 @@ +testdata/conf_out.ini +ini.sublime-project +ini.sublime-workspace +testdata/conf_reflect.ini diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/LICENSE b/Godeps/_workspace/src/github.com/go-ini/ini/LICENSE new file mode 100644 index 0000000..37ec93a --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/Makefile b/Godeps/_workspace/src/github.com/go-ini/ini/Makefile new file mode 100644 index 0000000..ac034e5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/Makefile @@ -0,0 +1,12 @@ +.PHONY: build test bench vet + +build: vet bench + +test: + go test -v -cover -race + +bench: + go test -v -cover -race -test.bench=. -test.benchmem + +vet: + go vet diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/README.md b/Godeps/_workspace/src/github.com/go-ini/ini/README.md new file mode 100644 index 0000000..a87cca2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/README.md @@ -0,0 +1,632 @@ +ini [![Build Status](https://drone.io/github.com/go-ini/ini/status.png)](https://drone.io/github.com/go-ini/ini/latest) [![](http://gocover.io/_badge/github.com/go-ini/ini)](http://gocover.io/github.com/go-ini/ini) +=== + +![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) + +Package ini provides INI file read and write functionality in Go. + +[简体中文](README_ZH.md) + +## Feature + +- Load multiple data sources(`[]byte` or file) with overwrites. +- Read with recursion values. +- Read with parent-child sections. +- Read with auto-increment key names. +- Read with multiple-line values. +- Read with tons of helper methods. +- Read and convert values to Go types. +- Read and **WRITE** comments of sections and keys. +- Manipulate sections, keys and comments with ease. +- Keep sections and keys in order as you parse and save. + +## Installation + +To use a tagged revision: + + go get gopkg.in/ini.v1 + +To use with latest changes: + + go get github.com/go-ini/ini + +Please add `-u` flag to update in the future. + +### Testing + +If you want to test on your machine, please apply `-t` flag: + + go get -t gopkg.in/ini.v1 + +Please add `-u` flag to update in the future. + +## Getting Started + +### Loading from data sources + +A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many as** data sources you want. Passing other types will simply return an error. + +```go +cfg, err := ini.Load([]byte("raw data"), "filename") +``` + +Or start with an empty object: + +```go +cfg := ini.Empty() +``` + +When you cannot decide how many data sources to load at the beginning, you still able to **Append()** them later. + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +If you have a list of files with possibilities that some of them may not available at the time, and you don't know exactly which ones, you can use `LooseLoad` to ignore nonexistent files without returning error. + +```go +cfg, err := ini.LooseLoad("filename", "filename_404") +``` + +The cool thing is, whenever the file is available to load while you're calling `Reload` method, it will be counted as usual. + +### Working with sections + +To get a section, you would need to: + +```go +section, err := cfg.GetSection("section name") +``` + +For a shortcut for default section, just give an empty string as name: + +```go +section, err := cfg.GetSection("") +``` + +When you're pretty sure the section exists, following code could make your life easier: + +```go +section := cfg.Section("") +``` + +What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you. + +To create a new section: + +```go +err := cfg.NewSection("new section") +``` + +To get a list of sections or section names: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### Working with keys + +To get a key under a section: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +Same rule applies to key operations: + +```go +key := cfg.Section("").Key("key name") +``` + +To check if a key exists: + +```go +yes := cfg.Section("").HasKey("key name") +``` + +To create a new key: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +To get a list of keys or key names: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +To get a clone hash of keys and corresponding values: + +```go +hash := cfg.GetSection("").KeysHash() +``` + +### Working with values + +To get a string value: + +```go +val := cfg.Section("").Key("key name").String() +``` + +To validate key value on the fly: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance): + +```go +val := cfg.Section("").Key("key name").Value() +``` + +To check if raw value exists: + +```go +yes := cfg.Section("").HasValue("test value") +``` + +To get value with types: + +```go +// For boolean values: +// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// Methods start with Must also accept one argument for default value +// when key not found or fail to parse value to given type. +// Except method MustString, which you have to pass a default value. + +v = cfg.Section("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +What if my value is three-line long? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +Not a problem! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +That's cool, how about continuation lines? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +Piece of cake! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +Note that single quotes around values will be stripped: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +That's all? Hmm, no. + +#### Helper methods of working with values + +To get value with given candidates: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates. + +To validate value in a given range: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +##### Auto-split values into a slice + +To use zero value of type for invalid inputs: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +To exclude invalid values out of result slice: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [2.2] +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") +vals = cfg.Section("").Key("INTS").ValidInts(",") +vals = cfg.Section("").Key("INT64S").ValidInt64s(",") +vals = cfg.Section("").Key("UINTS").ValidUints(",") +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") +vals = cfg.Section("").Key("TIMES").ValidTimes(",") +``` + +Or to return nothing but error when have invalid inputs: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> error +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") +vals = cfg.Section("").Key("INTS").StrictInts(",") +vals = cfg.Section("").Key("INT64S").StrictInt64s(",") +vals = cfg.Section("").Key("UINTS").StrictUints(",") +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") +vals = cfg.Section("").Key("TIMES").StrictTimes(",") +``` + +### Save your configuration + +Finally, it's time to save your configuration to somewhere. + +A typical way to save configuration is writing it to a file: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +Another way to save is writing to a `io.Writer` interface: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +## Advanced Usage + +### Recursive Values + +For all value of keys, there is a special syntax `%()s`, where `` is the key name in same section or default section, and `%()s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions. + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +### Parent-child Sections + +You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section. + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +### Auto-increment Key Names + +If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter. + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### Map To Struct + +Want more objective way to play with INI? Cool. + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // Things can be simpler. + err = ini.MapTo(p, "path/to/ini") + // ... + + // Just map a section? Fine. + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +Can I have default value for field? Absolutely. + +Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type. + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +It's really cool, but what's the point if you can't give me my file back from struct? + +### Reflect From Struct + +Why not? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string + None []int +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +So, what do I get? + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +Places = HangZhou,Boston +None = +``` + +#### Name Mapper + +To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name. + +There are 2 built-in name mappers: + +- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key. +- `TitleUnderscore`: it converts to format `title_underscore` then match section or key. + +To use them: + +```go +type Info struct { + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +Same rules of name mapper apply to `ini.ReflectFromWithMapper` function. + +#### Other Notes On Map/Reflect + +Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome. + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## Getting Help + +- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) +- [File An Issue](https://github.com/go-ini/ini/issues/new) + +## FAQs + +### What does `BlockMode` field do? + +By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster. + +### Why another INI library? + +Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster. + +To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path) + +## License + +This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/README_ZH.md b/Godeps/_workspace/src/github.com/go-ini/ini/README_ZH.md new file mode 100644 index 0000000..75c1005 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/README_ZH.md @@ -0,0 +1,619 @@ +本包ćŹäľ›äş† Go 语言中读写 INI 文件的功č˝ă€‚ + +## 功č˝ç‰ąć€§ + +- 支ćŚč¦†ç›–加载多个数据ćşďĽ`[]byte` ć–文件) +- 支ćŚé€’归读取键值 +- 支ćŚčŻ»ĺŹ–ç¶ĺ­ĺ†ĺŚş +- 支ćŚčŻ»ĺŹ–č‡Şĺ˘žé”®ĺŤ +- 支ćŚčŻ»ĺŹ–多行的键值 +- 支ćŚĺ¤§é‡Źčľ…助方法 +- 支ćŚĺś¨čŻ»ĺŹ–时直接转换为 Go 语言类型 +- 支ćŚčŻ»ĺŹ–ĺ’Ś **写入** ĺ†ĺŚşĺ’Śé”®çš„注释 +- 轻松操作ĺ†ĺŚşă€é”®ĺ€Ľĺ’Śćł¨é‡Š +- 在保ĺ­ć–‡ä»¶ć—¶ĺ†ĺŚşĺ’Śé”®ĺ€ĽäĽšäżťćŚĺŽźćś‰çš„顺序 + +## 下载安装 + +使用一个特定ç‰ćś¬ďĽš + + go get gopkg.in/ini.v1 + +使用最新ç‰ďĽš + + go get github.com/go-ini/ini + +如需更新请添加 `-u` 选项。 + +### 测试安装 + +如果您ćłč¦ĺś¨č‡Şĺ·±çš„机器上čżčˇŚćµ‹čŻ•ďĽŚčŻ·ä˝żç”¨ `-t` 标记: + + go get -t gopkg.in/ini.v1 + +如需更新请添加 `-u` 选项。 + +## 开始使用 + +### 从数据ćşĺŠ č˝˝ + +一个 **数据ćş** ĺŹŻä»ĄćŻ `[]byte` ç±»ĺž‹çš„ĺŽźĺ§‹ć•°ćŤ®ďĽŚć– `string` 类型的文件路径。您可以加载 **任意多个** 数据ćşă€‚如果您传递其ĺ®ç±»ĺž‹çš„数据ćşďĽŚĺ™äĽšç›´ćŽĄčż”回错误。 + +```go +cfg, err := ini.Load([]byte("raw data"), "filename") +``` + +ć–者从一个空白的文件开始: + +```go +cfg := ini.Empty() +``` + +当您在一开始无法决定需č¦ĺŠ č˝˝ĺ“Şäş›ć•°ćŤ®ćşć—¶ďĽŚä»ŤĺŹŻä»Ąä˝żç”¨ **Append()** 在需č¦çš„时候加载ĺ®ä»¬ă€‚ + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +当您ćłč¦ĺŠ č˝˝ä¸€çł»ĺ—文件,但ćŻä¸Ťč˝ĺ¤źçˇ®ĺ®šĺ…¶ä¸­ĺ“Şäş›ć–‡ä»¶ćŻä¸Ťĺ­ĺś¨çš„,可以通过č°ç”¨ĺ‡˝ć•° `LooseLoad` 来忽略ĺ®ä»¬ďĽ`Load` 会因为文件不ĺ­ĺś¨č€Śčż”回错误): + +```go +cfg, err := ini.LooseLoad("filename", "filename_404") +``` + +更牛逼的ćŻďĽŚĺ˝“那些之前不ĺ­ĺś¨çš„文件在重新č°ç”¨ `Reload` 方法的时候çŞç„¶ĺ‡şçŽ°äş†ďĽŚé‚Łäąĺ®ä»¬äĽšč˘«ć­Łĺ¸¸ĺŠ č˝˝ă€‚ + +### 操作ĺ†ĺŚşďĽSection) + +获取指定ĺ†ĺŚşďĽš + +```go +section, err := cfg.GetSection("section name") +``` + +如果您ćłč¦čŽ·ĺŹ–é»č®¤ĺ†ĺŚşďĽŚĺ™ĺŹŻä»Ąç”¨ç©şĺ­—符串代替ĺ†ĺŚşĺŤďĽš + +```go +section, err := cfg.GetSection("") +``` + +当您非常确定ćźä¸Şĺ†ĺŚşćŻĺ­ĺś¨çš„,可以使用以下简便方法: + +```go +section := cfg.Section("") +``` + +如果不小ĺżĺ¤ć–­é”™äş†ďĽŚč¦čŽ·ĺŹ–çš„ĺ†ĺŚşĺ…¶ĺ®žćŻä¸Ťĺ­ĺś¨çš„,那会发生什äąĺ‘˘ďĽźć˛ˇäş‹çš„,ĺ®äĽšč‡ŞĺŠ¨ĺ›ĺ»şĺą¶čż”回一个对应的ĺ†ĺŚşĺŻąč±ˇç»™ć‚¨ă€‚ + +ĺ›ĺ»şä¸€ä¸Şĺ†ĺŚşďĽš + +```go +err := cfg.NewSection("new section") +``` + +获取所有ĺ†ĺŚşĺŻąč±ˇć–ĺŤç§°ďĽš + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### 操作键ďĽKey) + +获取ćźä¸Şĺ†ĺŚşä¸‹çš„键: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +ĺ’Śĺ†ĺŚşä¸€ć ·ďĽŚć‚¨äąźĺŹŻä»Ąç›´ćŽĄčŽ·ĺŹ–键而忽略错误处ç†ďĽš + +```go +key := cfg.Section("").Key("key name") +``` + +ĺ¤ć–­ćźä¸Şé”®ćŻĺ¦ĺ­ĺś¨ďĽš + +```go +yes := cfg.Section("").HasKey("key name") +``` + +ĺ›ĺ»şä¸€ä¸Şć–°çš„键: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +获取ĺ†ĺŚşä¸‹çš„所有键ć–é”®ĺŤďĽš + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +获取ĺ†ĺŚşä¸‹çš„所有键值对的克隆: + +```go +hash := cfg.GetSection("").KeysHash() +``` + +### 操作键值ďĽValue) + +获取一个类型为字符串ďĽstring)的值: + +```go +val := cfg.Section("").Key("key name").String() +``` + +获取值的ĺŚć—¶é€ščż‡č‡Şĺ®šäą‰ĺ‡˝ć•°čż›čˇŚĺ¤„ç†éŞŚčŻďĽš + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +如果您不需č¦ä»»ä˝•ĺŻąĺ€Ľçš„自动转ĺŹĺŠźč˝ďĽäľ‹ĺ¦‚递归读取),可以直接获取原值ďĽčż™ç§Ťć–ąĺĽŹć€§č˝ćś€ä˝łďĽ‰ďĽš + +```go +val := cfg.Section("").Key("key name").Value() +``` + +ĺ¤ć–­ćźä¸ŞĺŽźĺ€ĽćŻĺ¦ĺ­ĺś¨ďĽš + +```go +yes := cfg.Section("").HasValue("test value") +``` + +获取其ĺ®ç±»ĺž‹çš„值: + +```go +// ĺ¸ĺ°”值的规ĺ™ďĽš +// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// ç”± Must 开头的方法ĺŤĺ…许接收一个相ĺŚç±»ĺž‹çš„参数来作为é»č®¤ĺ€ĽďĽŚ +// 当键不ĺ­ĺś¨ć–者转换失败时,ĺ™äĽšç›´ćŽĄčż”回该é»č®¤ĺ€Ľă€‚ +// 但ćŻďĽŚMustString 方法必须传递一个é»č®¤ĺ€Ľă€‚ + +v = cfg.Seciont("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +如果ć‘的值有好多行怎äąĺŠžďĽź + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +嗯哼?小 caseďĽ + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +赞ç†äş†ďĽé‚Łč¦ćŻć‘属于一行的内容写不下ćłč¦ĺ†™ĺ°ç¬¬äşŚčˇŚć€ŽäąĺŠžďĽź + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +简直ćŻĺ°ŹčŹśä¸€ç˘źďĽ + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +需č¦ćł¨ć„Źçš„ćŻďĽŚĺ€Ľä¸¤äľ§çš„单引号会被自动剔除: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +这就ćŻĺ…¨é¨äş†ďĽźĺ“ĺ“,当然不ćŻă€‚ + +#### 操作键值的辅助方法 + +获取键值时设定候选值: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +如果获取ĺ°çš„值不ćŻĺ€™é€‰ĺ€Ľçš„任意一个,ĺ™äĽščż”回é»č®¤ĺ€ĽďĽŚč€Śé»č®¤ĺ€Ľä¸Ťéś€č¦ćŻĺ€™é€‰ĺ€Ľä¸­çš„一ĺ‘。 + +验čŻčŽ·ĺŹ–的值ćŻĺ¦ĺś¨ćŚ‡ĺ®ščŚĺ›´ĺ†…: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +##### 自动ĺ†ĺ‰˛é”®ĺ€Ľĺ°ĺ‡ç‰‡ďĽslice) + +当ĺ­ĺś¨ć— ć•čľ“入时,使用零值代替: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +从结果ĺ‡ç‰‡ä¸­ĺ‰”除无ć•čľ“入: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [2.2] +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") +vals = cfg.Section("").Key("INTS").ValidInts(",") +vals = cfg.Section("").Key("INT64S").ValidInt64s(",") +vals = cfg.Section("").Key("UINTS").ValidUints(",") +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") +vals = cfg.Section("").Key("TIMES").ValidTimes(",") +``` + +当ĺ­ĺś¨ć— ć•čľ“入时,直接返回错误: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> error +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") +vals = cfg.Section("").Key("INTS").StrictInts(",") +vals = cfg.Section("").Key("INT64S").StrictInt64s(",") +vals = cfg.Section("").Key("UINTS").StrictUints(",") +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") +vals = cfg.Section("").Key("TIMES").StrictTimes(",") +``` + +### äżťĺ­é…Ťç˝® + +ç»äşŽĺ°äş†čż™ä¸Şć—¶ĺ»ďĽŚćŻć—¶ĺ€™äżťĺ­ä¸€ä¸‹é…Ťç˝®äş†ă€‚ + +比čľĺŽźĺ§‹çš„ĺšćł•ćŻčľ“出配置ĺ°ćźä¸Şć–‡ä»¶ďĽš + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +另一个比čľé«çş§çš„ĺšćł•ćŻĺ†™ĺ…Ąĺ°ä»»ä˝•ĺ®žçŽ° `io.Writer` 接口的对象中: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +### é«çş§ç”¨ćł• + +#### 递归读取键值 + +在获取所有键值的过程中,特殊语法 `%()s` 会被应用,其中 `` 可以ćŻç›¸ĺŚĺ†ĺŚşć–者é»č®¤ĺ†ĺŚşä¸‹çš„é”®ĺŤă€‚字符串 `%()s` 会被相应的键值所替代,如果指定的键不ĺ­ĺś¨ďĽŚĺ™äĽšç”¨ç©şĺ­—符串替代。您可以最多使用 99 层的递归嵌套。 + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +#### 读取ç¶ĺ­ĺ†ĺŚş + +您可以在ĺ†ĺŚşĺŤç§°ä¸­ä˝żç”¨ `.` 来表示两个ć–多个ĺ†ĺŚşäą‹é—´çš„ç¶ĺ­ĺ…łçł»ă€‚如果ćźä¸Şé”®ĺś¨ĺ­ĺ†ĺŚşä¸­ä¸Ťĺ­ĺś¨ďĽŚĺ™äĽšĺŽ»ĺ®çš„ç¶ĺ†ĺŚşä¸­ĺ†Ťć¬ˇĺŻ»ć‰ľďĽŚç›´ĺ°ć˛ˇćś‰ç¶ĺ†ĺŚşä¸şć­˘ă€‚ + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +#### čŻ»ĺŹ–č‡Şĺ˘žé”®ĺŤ + +如果数据ćşä¸­çš„é”®ĺŤä¸ş `-`,ĺ™č®¤ä¸şčŻĄé”®ä˝żç”¨äş†č‡Şĺ˘žé”®ĺŤçš„特殊语法。计数器从 1 开始,并且ĺ†ĺŚşäą‹é—´ćŻç›¸äş’独立的。 + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### ć ĺ°„ĺ°ç»“ćž„ + +ćłč¦ä˝żç”¨ć›´ĺŠ éť˘ĺ‘对象的方式玩转 INI ĺ—?好主意。 + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // 一ĺ‡ç«źĺŹŻä»Ąĺ¦‚此的简单。 + err = ini.MapTo(p, "path/to/ini") + // ... + + // 嗯哼?只需č¦ć ĺ°„一个ĺ†ĺŚşĺ—? + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +结构的字段怎äąč®ľç˝®é»č®¤ĺ€Ľĺ‘˘ďĽźĺľç®€ĺŤ•ďĽŚĺŹŞč¦ĺś¨ć ĺ°„之前对指定字段进行赋值就可以了。如果键未找ĺ°ć–者类型错误,该值不会发生改ĺŹă€‚ + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +这样玩 INI 真的好酷啊ďĽç„¶č€ŚďĽŚĺ¦‚果不č˝čżç»™ć‘原来的配置文件,有什äąĺŤµç”¨ďĽź + +### 从结构反射 + +可ćŻďĽŚć‘有说不č˝ĺ—? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string + None []int +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +瞧瞧,奇迹发生了。 + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +Places = HangZhou,Boston +None = +``` + +#### ĺŤç§°ć ĺ°„器ďĽName Mapper) + +为了节çść‚¨çš„时间并简化代ç ďĽŚćś¬ĺş“支ćŚç±»ĺž‹ä¸ş [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) çš„ĺŤç§°ć ĺ°„器,该ć ĺ°„器负责结构字段ĺŤä¸Žĺ†ĺŚşĺŤĺ’Śé”®ĺŤäą‹é—´çš„ć ĺ°„。 + +目前有 2 款内置的ć ĺ°„器: + +- `AllCapsUnderscore`:该ć ĺ°„器将字段ĺŤč˝¬ćŤ˘č‡łć ĽĺĽŹ `ALL_CAPS_UNDERSCORE` ĺŽĺ†ŤĺŽ»ĺŚąé…Ťĺ†ĺŚşĺŤĺ’Śé”®ĺŤă€‚ +- `TitleUnderscore`:该ć ĺ°„器将字段ĺŤč˝¬ćŤ˘č‡łć ĽĺĽŹ `title_underscore` ĺŽĺ†ŤĺŽ»ĺŚąé…Ťĺ†ĺŚşĺŤĺ’Śé”®ĺŤă€‚ + +使用方法: + +```go +type Info struct{ + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +使用函数 `ini.ReflectFromWithMapper` 时也可应用相ĺŚçš„规ĺ™ă€‚ + +#### ć ĺ°„/反射的其ĺ®čŻ´ćŽ + +任何嵌入的结构é˝äĽšč˘«é»č®¤č®¤ä˝śä¸€ä¸Şä¸ŤĺŚçš„ĺ†ĺŚşďĽŚĺą¶ä¸”不会自动产生所谓的ç¶ĺ­ĺ†ĺŚşĺ…łč”: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +示例配置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +ĺľĺĄ˝ďĽŚä˝†ćŻďĽŚć‘ĺ°±ćŻč¦ĺµŚĺ…Ąç»“构也在ĺŚä¸€ä¸Şĺ†ĺŚşă€‚好ĺ§ďĽŚä˝ çąćŻćťŽĺšďĽ + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +示例配置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## 获取帮助 + +- [API 文档](https://gowalker.org/gopkg.in/ini.v1) +- [ĺ›ĺ»şĺ·ĄĺŤ•](https://github.com/go-ini/ini/issues/new) + +## 常č§é—®é˘ + +### 字段 `BlockMode` ćŻä»€äąďĽź + +é»č®¤ć…况下,本库会在您进行读写操作时采用é”ćśşĺ¶ćťĄçˇ®äżťć•°ćŤ®ć—¶é—´ă€‚但在ćźäş›ć…况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作ćŹĺŤ‡ĺ¤§çş¦ **50-70%** 的性č˝ă€‚ + +### 为什äąč¦ĺ†™ĺŹ¦ä¸€ä¸Ş INI 解ćžĺş“? + +许多人é˝ĺś¨ä˝żç”¨ć‘çš„ [goconfig](https://github.com/Unknwon/goconfig) 来完ć对 INI 文件的操作,但ć‘希望使用更加 Go 风格的代ç ă€‚并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性č˝ćŹĺŤ‡ă€‚ + +为了ĺšĺ‡şčż™äş›ć”ąĺŹďĽŚć‘必须对 API 进行破坏,所以新开一个仓库ćŻćś€ĺ®‰ĺ…¨çš„ĺšćł•ă€‚除此之外,本库直接使用 `gopkg.in` 来进行ç‰ćś¬ĺŚ–发ĺ¸ă€‚ďĽĺ…¶ĺ®žçśźç›¸ćŻĺŻĽĺ…Ąč·Żĺľ„更短了) diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/ini.go b/Godeps/_workspace/src/github.com/go-ini/ini/ini.go new file mode 100644 index 0000000..f186148 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/ini.go @@ -0,0 +1,465 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package ini provides INI file read and write functionality in Go. +package ini + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +const ( + // Name for default section. You can use this constant or the string literal. + // In most of cases, an empty string is all you need to access the section. + DEFAULT_SECTION = "DEFAULT" + + // Maximum allowed depth when recursively substituing variable names. + _DEPTH_VALUES = 99 + _VERSION = "1.11.0" +) + +// Version returns current package version literal. +func Version() string { + return _VERSION +} + +var ( + // Delimiter to determine or compose a new line. + // This variable will be changed to "\r\n" automatically on Windows + // at package init time. + LineBreak = "\n" + + // Variable regexp pattern: %(variable)s + varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) + + // Indicate whether to align "=" sign with spaces to produce pretty output + // or reduce all possible spaces for compact format. + PrettyFormat = true + + // Explicitly write DEFAULT section header + DefaultHeader = false +) + +func init() { + if runtime.GOOS == "windows" { + LineBreak = "\r\n" + } +} + +func inSlice(str string, s []string) bool { + for _, v := range s { + if str == v { + return true + } + } + return false +} + +// dataSource is an interface that returns object which can be read and closed. +type dataSource interface { + ReadCloser() (io.ReadCloser, error) +} + +// sourceFile represents an object that contains content on the local file system. +type sourceFile struct { + name string +} + +func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { + return os.Open(s.name) +} + +type bytesReadCloser struct { + reader io.Reader +} + +func (rc *bytesReadCloser) Read(p []byte) (n int, err error) { + return rc.reader.Read(p) +} + +func (rc *bytesReadCloser) Close() error { + return nil +} + +// sourceData represents an object that contains content in memory. +type sourceData struct { + data []byte +} + +func (s *sourceData) ReadCloser() (io.ReadCloser, error) { + return &bytesReadCloser{bytes.NewReader(s.data)}, nil +} + +// File represents a combination of a or more INI file(s) in memory. +type File struct { + // Should make things safe, but sometimes doesn't matter. + BlockMode bool + // Make sure data is safe in multiple goroutines. + lock sync.RWMutex + + // Allow combination of multiple data sources. + dataSources []dataSource + // Actual data is stored here. + sections map[string]*Section + + // To keep data in order. + sectionList []string + + // Whether the parser should ignore nonexistent files or return error. + looseMode bool + + NameMapper +} + +// newFile initializes File object with given data sources. +func newFile(dataSources []dataSource, looseMode bool) *File { + return &File{ + BlockMode: true, + dataSources: dataSources, + sections: make(map[string]*Section), + sectionList: make([]string, 0, 10), + looseMode: looseMode, + } +} + +func parseDataSource(source interface{}) (dataSource, error) { + switch s := source.(type) { + case string: + return sourceFile{s}, nil + case []byte: + return &sourceData{s}, nil + default: + return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) + } +} + +func loadSources(looseMode bool, source interface{}, others ...interface{}) (_ *File, err error) { + sources := make([]dataSource, len(others)+1) + sources[0], err = parseDataSource(source) + if err != nil { + return nil, err + } + for i := range others { + sources[i+1], err = parseDataSource(others[i]) + if err != nil { + return nil, err + } + } + f := newFile(sources, looseMode) + if err = f.Reload(); err != nil { + return nil, err + } + return f, nil +} + +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +// It will return error if list contains nonexistent files. +func Load(source interface{}, others ...interface{}) (*File, error) { + return loadSources(false, source, others...) +} + +// LooseLoad has exactly same functionality as Load function +// except it ignores nonexistent files instead of returning error. +func LooseLoad(source interface{}, others ...interface{}) (*File, error) { + return loadSources(true, source, others...) +} + +// Empty returns an empty file object. +func Empty() *File { + // Ignore error here, we sure our data is good. + f, _ := Load([]byte("")) + return f +} + +// NewSection creates a new section. +func (f *File) NewSection(name string) (*Section, error) { + if len(name) == 0 { + return nil, errors.New("error creating new section: empty section name") + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if inSlice(name, f.sectionList) { + return f.sections[name], nil + } + + f.sectionList = append(f.sectionList, name) + f.sections[name] = newSection(f, name) + return f.sections[name], nil +} + +// NewSections creates a list of sections. +func (f *File) NewSections(names ...string) (err error) { + for _, name := range names { + if _, err = f.NewSection(name); err != nil { + return err + } + } + return nil +} + +// GetSection returns section by given name. +func (f *File) GetSection(name string) (*Section, error) { + if len(name) == 0 { + name = DEFAULT_SECTION + } + + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sec := f.sections[name] + if sec == nil { + return nil, fmt.Errorf("section '%s' does not exist", name) + } + return sec, nil +} + +// Section assumes named section exists and returns a zero-value when not. +func (f *File) Section(name string) *Section { + sec, err := f.GetSection(name) + if err != nil { + // Note: It's OK here because the only possible error is empty section name, + // but if it's empty, this piece of code won't be executed. + sec, _ = f.NewSection(name) + return sec + } + return sec +} + +// Section returns list of Section. +func (f *File) Sections() []*Section { + sections := make([]*Section, len(f.sectionList)) + for i := range f.sectionList { + sections[i] = f.Section(f.sectionList[i]) + } + return sections +} + +// SectionStrings returns list of section names. +func (f *File) SectionStrings() []string { + list := make([]string, len(f.sectionList)) + copy(list, f.sectionList) + return list +} + +// DeleteSection deletes a section. +func (f *File) DeleteSection(name string) { + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if len(name) == 0 { + name = DEFAULT_SECTION + } + + for i, s := range f.sectionList { + if s == name { + f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) + delete(f.sections, name) + return + } + } +} + +func (f *File) reload(s dataSource) error { + r, err := s.ReadCloser() + if err != nil { + return err + } + defer r.Close() + + return f.parse(r) +} + +// Reload reloads and parses all data sources. +func (f *File) Reload() (err error) { + for _, s := range f.dataSources { + if err = f.reload(s); err != nil { + // In loose mode, we create an empty default section for nonexistent files. + if os.IsNotExist(err) && f.looseMode { + f.parse(bytes.NewBuffer(nil)) + continue + } + return err + } + } + return nil +} + +// Append appends one or more data sources and reloads automatically. +func (f *File) Append(source interface{}, others ...interface{}) error { + ds, err := parseDataSource(source) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + for _, s := range others { + ds, err = parseDataSource(s) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + } + return f.Reload() +} + +// WriteToIndent writes content into io.Writer with given indention. +// If PrettyFormat has been set to be true, +// it will align "=" sign with spaces under each section. +func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { + equalSign := "=" + if PrettyFormat { + equalSign = " = " + } + + // Use buffer to make sure target is safe until finish encoding. + buf := bytes.NewBuffer(nil) + for i, sname := range f.sectionList { + sec := f.Section(sname) + if len(sec.Comment) > 0 { + if sec.Comment[0] != '#' && sec.Comment[0] != ';' { + sec.Comment = "; " + sec.Comment + } + if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil { + return 0, err + } + } + + if i > 0 || DefaultHeader { + if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return 0, err + } + } else { + // Write nothing if default section is empty + if len(sec.keyList) == 0 { + continue + } + } + + // Count and generate alignment length and buffer spaces + alignLength := 0 + if PrettyFormat { + for i := 0; i < len(sec.keyList); i++ { + if len(sec.keyList[i]) > alignLength { + alignLength = len(sec.keyList[i]) + } + } + } + alignSpaces := bytes.Repeat([]byte(" "), alignLength) + + for _, kname := range sec.keyList { + key := sec.Key(kname) + if len(key.Comment) > 0 { + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + if key.Comment[0] != '#' && key.Comment[0] != ';' { + key.Comment = "; " + key.Comment + } + if _, err = buf.WriteString(key.Comment + LineBreak); err != nil { + return 0, err + } + } + + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + + switch { + case key.isAutoIncr: + kname = "-" + case strings.ContainsAny(kname, "\"=:"): + kname = "`" + kname + "`" + case strings.Contains(kname, "`"): + kname = `"""` + kname + `"""` + } + if _, err = buf.WriteString(kname); err != nil { + return 0, err + } + + // Write out alignment spaces before "=" sign + if PrettyFormat { + buf.Write(alignSpaces[:alignLength-len(kname)]) + } + + val := key.value + // In case key value contains "\n", "`", "\"", "#" or ";" + if strings.ContainsAny(val, "\n`") { + val = `"""` + val + `"""` + } else if strings.ContainsAny(val, "#;") { + val = "`" + val + "`" + } + if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil { + return 0, err + } + } + + // Put a line between sections + if _, err = buf.WriteString(LineBreak); err != nil { + return 0, err + } + } + + return buf.WriteTo(w) +} + +// WriteTo writes file content into io.Writer. +func (f *File) WriteTo(w io.Writer) (int64, error) { + return f.WriteToIndent(w, "") +} + +// SaveToIndent writes content to file system with given value indention. +func (f *File) SaveToIndent(filename, indent string) error { + // Note: Because we are truncating with os.Create, + // so it's safer to save to a temporary file location and rename afte done. + tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp" + defer os.Remove(tmpPath) + + fw, err := os.Create(tmpPath) + if err != nil { + return err + } + + if _, err = f.WriteToIndent(fw, indent); err != nil { + fw.Close() + return err + } + fw.Close() + + // Remove old file and rename the new one. + os.Remove(filename) + return os.Rename(tmpPath, filename) +} + +// SaveTo writes content to file system. +func (f *File) SaveTo(filename string) error { + return f.SaveToIndent(filename, "") +} diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/key.go b/Godeps/_workspace/src/github.com/go-ini/ini/key.go new file mode 100644 index 0000000..7cbccd3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/key.go @@ -0,0 +1,616 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "fmt" + "strconv" + "strings" + "time" +) + +// Key represents a key under a section. +type Key struct { + s *Section + Comment string + name string + value string + isAutoIncr bool +} + +// Name returns name of key. +func (k *Key) Name() string { + return k.name +} + +// Value returns raw value of key for performance purpose. +func (k *Key) Value() string { + return k.value +} + +// String returns string representation of value. +func (k *Key) String() string { + val := k.value + if strings.Index(val, "%") == -1 { + return val + } + + for i := 0; i < _DEPTH_VALUES; i++ { + vr := varPattern.FindString(val) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := strings.TrimLeft(vr, "%(") + noption = strings.TrimRight(noption, ")s") + + // Search in the same section. + nk, err := k.s.GetKey(noption) + if err != nil { + // Search again in default section. + nk, _ = k.s.f.Section("").GetKey(noption) + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + val = strings.Replace(val, vr, nk.value, -1) + } + return val +} + +// Validate accepts a validate function which can +// return modifed result as key value. +func (k *Key) Validate(fn func(string) string) string { + return fn(k.String()) +} + +// parseBool returns the boolean value represented by the string. +// +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, +// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. +// Any other value returns an error. +func parseBool(str string) (value bool, err error) { + switch str { + case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": + return true, nil + case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": + return false, nil + } + return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) +} + +// Bool returns bool type value. +func (k *Key) Bool() (bool, error) { + return parseBool(k.String()) +} + +// Float64 returns float64 type value. +func (k *Key) Float64() (float64, error) { + return strconv.ParseFloat(k.String(), 64) +} + +// Int returns int type value. +func (k *Key) Int() (int, error) { + return strconv.Atoi(k.String()) +} + +// Int64 returns int64 type value. +func (k *Key) Int64() (int64, error) { + return strconv.ParseInt(k.String(), 10, 64) +} + +// Uint returns uint type valued. +func (k *Key) Uint() (uint, error) { + u, e := strconv.ParseUint(k.String(), 10, 64) + return uint(u), e +} + +// Uint64 returns uint64 type value. +func (k *Key) Uint64() (uint64, error) { + return strconv.ParseUint(k.String(), 10, 64) +} + +// Duration returns time.Duration type value. +func (k *Key) Duration() (time.Duration, error) { + return time.ParseDuration(k.String()) +} + +// TimeFormat parses with given format and returns time.Time type value. +func (k *Key) TimeFormat(format string) (time.Time, error) { + return time.Parse(format, k.String()) +} + +// Time parses with RFC3339 format and returns time.Time type value. +func (k *Key) Time() (time.Time, error) { + return k.TimeFormat(time.RFC3339) +} + +// MustString returns default value if key value is empty. +func (k *Key) MustString(defaultVal string) string { + val := k.String() + if len(val) == 0 { + return defaultVal + } + return val +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (k *Key) MustBool(defaultVal ...bool) bool { + val, err := k.Bool() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (k *Key) MustFloat64(defaultVal ...float64) float64 { + val, err := k.Float64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt(defaultVal ...int) int { + val, err := k.Int() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt64(defaultVal ...int64) int64 { + val, err := k.Int64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustUint always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint(defaultVal ...uint) uint { + val, err := k.Uint() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustUint64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint64(defaultVal ...uint64) uint64 { + val, err := k.Uint64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustDuration always returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { + val, err := k.Duration() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustTimeFormat always parses with given format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { + val, err := k.TimeFormat(format) + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustTime always parses with RFC3339 format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTime(defaultVal ...time.Time) time.Time { + return k.MustTimeFormat(time.RFC3339, defaultVal...) +} + +// In always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) In(defaultVal string, candidates []string) string { + val := k.String() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InFloat64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { + val := k.MustFloat64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt(defaultVal int, candidates []int) int { + val := k.MustInt() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { + val := k.MustInt64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint(defaultVal uint, candidates []uint) uint { + val := k.MustUint() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { + val := k.MustUint64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTimeFormat always parses with given format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { + val := k.MustTimeFormat(format) + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTime always parses with RFC3339 format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { + return k.InTimeFormat(time.RFC3339, defaultVal, candidates) +} + +// RangeFloat64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { + val := k.MustFloat64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt(defaultVal, min, max int) int { + val := k.MustInt() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { + val := k.MustInt64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeTimeFormat checks if value with given format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { + val := k.MustTimeFormat(format) + if val.Unix() < min.Unix() || val.Unix() > max.Unix() { + return defaultVal + } + return val +} + +// RangeTime checks if value with RFC3339 format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { + return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) +} + +// Strings returns list of string divided by given delimiter. +func (k *Key) Strings(delim string) []string { + str := k.String() + if len(str) == 0 { + return []string{} + } + + vals := strings.Split(str, delim) + for i := range vals { + vals[i] = strings.TrimSpace(vals[i]) + } + return vals +} + +// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Float64s(delim string) []float64 { + vals, _ := k.getFloat64s(delim, true, false) + return vals +} + +// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Ints(delim string) []int { + vals, _ := k.getInts(delim, true, false) + return vals +} + +// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Int64s(delim string) []int64 { + vals, _ := k.getInt64s(delim, true, false) + return vals +} + +// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uints(delim string) []uint { + vals, _ := k.getUints(delim, true, false) + return vals +} + +// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uint64s(delim string) []uint64 { + vals, _ := k.getUint64s(delim, true, false) + return vals +} + +// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) TimesFormat(format, delim string) []time.Time { + vals, _ := k.getTimesFormat(format, delim, true, false) + return vals +} + +// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) Times(delim string) []time.Time { + return k.TimesFormat(time.RFC3339, delim) +} + +// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then +// it will not be included to result list. +func (k *Key) ValidFloat64s(delim string) []float64 { + vals, _ := k.getFloat64s(delim, false, false) + return vals +} + +// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will +// not be included to result list. +func (k *Key) ValidInts(delim string) []int { + vals, _ := k.getInts(delim, false, false) + return vals +} + +// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, +// then it will not be included to result list. +func (k *Key) ValidInt64s(delim string) []int64 { + vals, _ := k.getInt64s(delim, false, false) + return vals +} + +// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, +// then it will not be included to result list. +func (k *Key) ValidUints(delim string) []uint { + vals, _ := k.getUints(delim, false, false) + return vals +} + +// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned +// integer, then it will not be included to result list. +func (k *Key) ValidUint64s(delim string) []uint64 { + vals, _ := k.getUint64s(delim, false, false) + return vals +} + +// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimesFormat(format, delim string) []time.Time { + vals, _ := k.getTimesFormat(format, delim, false, false) + return vals +} + +// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimes(delim string) []time.Time { + return k.ValidTimesFormat(time.RFC3339, delim) +} + +// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictFloat64s(delim string) ([]float64, error) { + return k.getFloat64s(delim, false, true) +} + +// StrictInts returns list of int divided by given delimiter or error on first invalid input. +func (k *Key) StrictInts(delim string) ([]int, error) { + return k.getInts(delim, false, true) +} + +// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictInt64s(delim string) ([]int64, error) { + return k.getInt64s(delim, false, true) +} + +// StrictUints returns list of uint divided by given delimiter or error on first invalid input. +func (k *Key) StrictUints(delim string) ([]uint, error) { + return k.getUints(delim, false, true) +} + +// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictUint64s(delim string) ([]uint64, error) { + return k.getUint64s(delim, false, true) +} + +// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { + return k.getTimesFormat(format, delim, false, true) +} + +// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimes(delim string) ([]time.Time, error) { + return k.StrictTimesFormat(time.RFC3339, delim) +} + +// getFloat64s returns list of float64 divided by given delimiter. +func (k *Key) getFloat64s(delim string, addInvalid, returnOnInvalid bool) ([]float64, error) { + strs := k.Strings(delim) + vals := make([]float64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseFloat(str, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getInts returns list of int divided by given delimiter. +func (k *Key) getInts(delim string, addInvalid, returnOnInvalid bool) ([]int, error) { + strs := k.Strings(delim) + vals := make([]int, 0, len(strs)) + for _, str := range strs { + val, err := strconv.Atoi(str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getInt64s returns list of int64 divided by given delimiter. +func (k *Key) getInt64s(delim string, addInvalid, returnOnInvalid bool) ([]int64, error) { + strs := k.Strings(delim) + vals := make([]int64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseInt(str, 10, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getUints returns list of uint divided by given delimiter. +func (k *Key) getUints(delim string, addInvalid, returnOnInvalid bool) ([]uint, error) { + strs := k.Strings(delim) + vals := make([]uint, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 10, 0) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, uint(val)) + } + } + return vals, nil +} + +// getUint64s returns list of uint64 divided by given delimiter. +func (k *Key) getUint64s(delim string, addInvalid, returnOnInvalid bool) ([]uint64, error) { + strs := k.Strings(delim) + vals := make([]uint64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 10, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) getTimesFormat(format, delim string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { + strs := k.Strings(delim) + vals := make([]time.Time, 0, len(strs)) + for _, str := range strs { + val, err := time.Parse(format, str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// SetValue changes key value. +func (k *Key) SetValue(v string) { + if k.s.f.BlockMode { + k.s.f.lock.Lock() + defer k.s.f.lock.Unlock() + } + + k.value = v + k.s.keysHash[k.name] = v +} diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/parser.go b/Godeps/_workspace/src/github.com/go-ini/ini/parser.go new file mode 100644 index 0000000..1c1bf91 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/parser.go @@ -0,0 +1,312 @@ +// Copyright 2015 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + "unicode" +) + +type tokenType int + +const ( + _TOKEN_INVALID tokenType = iota + _TOKEN_COMMENT + _TOKEN_SECTION + _TOKEN_KEY +) + +type parser struct { + buf *bufio.Reader + isEOF bool + count int + comment *bytes.Buffer +} + +func newParser(r io.Reader) *parser { + return &parser{ + buf: bufio.NewReader(r), + count: 1, + comment: &bytes.Buffer{}, + } +} + +// BOM handles header of BOM-UTF8 format. +// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding +func (p *parser) BOM() error { + mask, err := p.buf.Peek(3) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 3 { + return nil + } else if mask[0] == 239 && mask[1] == 187 && mask[2] == 191 { + p.buf.Read(mask) + } + return nil +} + +func (p *parser) readUntil(delim byte) ([]byte, error) { + data, err := p.buf.ReadBytes(delim) + if err != nil { + if err == io.EOF { + p.isEOF = true + } else { + return nil, err + } + } + return data, nil +} + +func cleanComment(in []byte) ([]byte, bool) { + i := bytes.IndexAny(in, "#;") + if i == -1 { + return nil, false + } + return in[i:], true +} + +func readKeyName(in []byte) (string, int, error) { + line := string(in) + + // Check if key name surrounded by quotes. + var keyQuote string + if line[0] == '"' { + if len(line) > 6 && string(line[0:3]) == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + + // Get out key name + endIdx := -1 + if len(keyQuote) > 0 { + startIdx := len(keyQuote) + // FIXME: fail case -> """"""name"""=value + pos := strings.Index(line[startIdx:], keyQuote) + if pos == -1 { + return "", -1, fmt.Errorf("missing closing key quote: %s", line) + } + pos += startIdx + + // Find key-value delimiter + i := strings.IndexAny(line[pos+startIdx:], "=:") + if i < 0 { + return "", -1, fmt.Errorf("key-value delimiter not found: %s", line) + } + endIdx = pos + i + return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil + } + + endIdx = strings.IndexAny(line, "=:") + if endIdx < 0 { + return "", -1, fmt.Errorf("key-value delimiter not found: %s", line) + } + return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil +} + +func (p *parser) readMultilines(line, val, valQuote string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := string(data) + + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + + comment, has := cleanComment([]byte(next[pos:])) + if has { + p.comment.Write(bytes.TrimSpace(comment)) + } + break + } + val += next + if p.isEOF { + return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next) + } + } + return val, nil +} + +func (p *parser) readContinuationLines(val string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := strings.TrimSpace(string(data)) + + if len(next) == 0 { + break + } + val += next + if val[len(val)-1] != '\\' { + break + } + val = val[:len(val)-1] + } + return val, nil +} + +// hasSurroundedQuote check if and only if the first and last characters +// are quotes \" or \'. +// It returns false if any other parts also contain same kind of quotes. +func hasSurroundedQuote(in string, quote byte) bool { + return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote && + strings.IndexByte(in[1:], quote) == len(in)-2 +} + +func (p *parser) readValue(in []byte) (string, error) { + line := strings.TrimLeftFunc(string(in), unicode.IsSpace) + if len(line) == 0 { + return "", nil + } + + var valQuote string + if len(line) > 3 && string(line[0:3]) == `"""` { + valQuote = `"""` + } else if line[0] == '`' { + valQuote = "`" + } + + if len(valQuote) > 0 { + startIdx := len(valQuote) + pos := strings.LastIndex(line[startIdx:], valQuote) + // Check for multi-line value + if pos == -1 { + return p.readMultilines(line, line[startIdx:], valQuote) + } + + return line[startIdx : pos+startIdx], nil + } + + // Won't be able to reach here if value only contains whitespace. + line = strings.TrimSpace(line) + + // Check continuation lines + if line[len(line)-1] == '\\' { + return p.readContinuationLines(line[:len(line)-1]) + } + + i := strings.IndexAny(line, "#;") + if i > -1 { + p.comment.WriteString(line[i:]) + line = strings.TrimSpace(line[:i]) + } + + // Trim single quotes + if hasSurroundedQuote(line, '\'') || + hasSurroundedQuote(line, '"') { + line = line[1 : len(line)-1] + } + return line, nil +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) (err error) { + p := newParser(reader) + if err = p.BOM(); err != nil { + return fmt.Errorf("BOM: %v", err) + } + + // Ignore error because default section name is never empty string. + section, _ := f.NewSection(DEFAULT_SECTION) + + var line []byte + for !p.isEOF { + line, err = p.readUntil('\n') + if err != nil { + return err + } + + line = bytes.TrimLeftFunc(line, unicode.IsSpace) + if len(line) == 0 { + continue + } + + // Comments + if line[0] == '#' || line[0] == ';' { + // Note: we do not care ending line break, + // it is needed for adding second line, + // so just clean it once at the end when set to value. + p.comment.Write(line) + continue + } + + // Section + if line[0] == '[' { + // Read to the next ']' (TODO: support quoted strings) + closeIdx := bytes.IndexByte(line, ']') + if closeIdx == -1 { + return fmt.Errorf("unclosed section: %s", line) + } + + section, err = f.NewSection(string(line[1:closeIdx])) + if err != nil { + return err + } + + comment, has := cleanComment(line[closeIdx+1:]) + if has { + p.comment.Write(comment) + } + + section.Comment = strings.TrimSpace(p.comment.String()) + + // Reset aotu-counter and comments + p.comment.Reset() + p.count = 1 + continue + } + + kname, offset, err := readKeyName(line) + if err != nil { + return err + } + + // Auto increment. + isAutoIncr := false + if kname == "-" { + isAutoIncr = true + kname = "#" + strconv.Itoa(p.count) + p.count++ + } + + key, err := section.NewKey(kname, "") + if err != nil { + return err + } + key.isAutoIncr = isAutoIncr + + value, err := p.readValue(line[offset:]) + if err != nil { + return err + } + key.SetValue(value) + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/section.go b/Godeps/_workspace/src/github.com/go-ini/ini/section.go new file mode 100644 index 0000000..ed8cbdb --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/section.go @@ -0,0 +1,177 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "errors" + "fmt" + "strings" +) + +// Section represents a config section. +type Section struct { + f *File + Comment string + name string + keys map[string]*Key + keyList []string + keysHash map[string]string +} + +func newSection(f *File, name string) *Section { + return &Section{f, "", name, make(map[string]*Key), make([]string, 0, 10), make(map[string]string)} +} + +// Name returns name of Section. +func (s *Section) Name() string { + return s.name +} + +// NewKey creates a new key to given section. +func (s *Section) NewKey(name, val string) (*Key, error) { + if len(name) == 0 { + return nil, errors.New("error creating new key: empty key name") + } + + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + if inSlice(name, s.keyList) { + s.keys[name].value = val + return s.keys[name], nil + } + + s.keyList = append(s.keyList, name) + s.keys[name] = &Key{s, "", name, val, false} + s.keysHash[name] = val + return s.keys[name], nil +} + +// GetKey returns key in section by given name. +func (s *Section) GetKey(name string) (*Key, error) { + // FIXME: change to section level lock? + if s.f.BlockMode { + s.f.lock.RLock() + } + key := s.keys[name] + if s.f.BlockMode { + s.f.lock.RUnlock() + } + + if key == nil { + // Check if it is a child-section. + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + return sec.GetKey(name) + } else { + break + } + } + return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) + } + return key, nil +} + +// HasKey returns true if section contains a key with given name. +func (s *Section) HasKey(name string) bool { + key, _ := s.GetKey(name) + return key != nil +} + +// Haskey is a backwards-compatible name for HasKey. +func (s *Section) Haskey(name string) bool { + return s.HasKey(name) +} + +// HasValue returns true if section contains given raw value. +func (s *Section) HasValue(value string) bool { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + for _, k := range s.keys { + if value == k.value { + return true + } + } + return false +} + +// Key assumes named Key exists in section and returns a zero-value when not. +func (s *Section) Key(name string) *Key { + key, err := s.GetKey(name) + if err != nil { + // It's OK here because the only possible error is empty key name, + // but if it's empty, this piece of code won't be executed. + key, _ = s.NewKey(name, "") + return key + } + return key +} + +// Keys returns list of keys of section. +func (s *Section) Keys() []*Key { + keys := make([]*Key, len(s.keyList)) + for i := range s.keyList { + keys[i] = s.Key(s.keyList[i]) + } + return keys +} + +// KeyStrings returns list of key names of section. +func (s *Section) KeyStrings() []string { + list := make([]string, len(s.keyList)) + copy(list, s.keyList) + return list +} + +// KeysHash returns keys hash consisting of names and values. +func (s *Section) KeysHash() map[string]string { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + hash := map[string]string{} + for key, value := range s.keysHash { + hash[key] = value + } + return hash +} + +// DeleteKey deletes a key from section. +func (s *Section) DeleteKey(name string) { + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + for i, k := range s.keyList { + if k == name { + s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) + delete(s.keys, name) + return + } + } +} diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/struct.go b/Godeps/_workspace/src/github.com/go-ini/ini/struct.go new file mode 100644 index 0000000..3fb92c3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/struct.go @@ -0,0 +1,351 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "time" + "unicode" +) + +// NameMapper represents a ini tag name mapper. +type NameMapper func(string) string + +// Built-in name getters. +var ( + // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. + AllCapsUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + } + newstr = append(newstr, unicode.ToUpper(chr)) + } + return string(newstr) + } + // TitleUnderscore converts to format title_underscore. + TitleUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + chr -= ('A' - 'a') + } + newstr = append(newstr, chr) + } + return string(newstr) + } +) + +func (s *Section) parseFieldName(raw, actual string) string { + if len(actual) > 0 { + return actual + } + if s.f.NameMapper != nil { + return s.f.NameMapper(raw) + } + return raw +} + +func parseDelim(actual string) string { + if len(actual) > 0 { + return actual + } + return "," +} + +var reflectTime = reflect.TypeOf(time.Now()).Kind() + +// setWithProperType sets proper value to field based on its type, +// but it does not return error for failing parsing, +// because we want to use default value that is already assigned to strcut. +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { + switch t.Kind() { + case reflect.String: + if len(key.String()) == 0 { + return nil + } + field.SetString(key.String()) + case reflect.Bool: + boolVal, err := key.Bool() + if err != nil { + return nil + } + field.SetBool(boolVal) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && int(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + intVal, err := key.Int64() + if err != nil || intVal == 0 { + return nil + } + field.SetInt(intVal) + // byte is an alias for uint8, so supporting uint8 breaks support for byte + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + durationVal, err := key.Duration() + if err == nil { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + uintVal, err := key.Uint64() + if err != nil { + return nil + } + field.SetUint(uintVal) + + case reflect.Float64: + floatVal, err := key.Float64() + if err != nil { + return nil + } + field.SetFloat(floatVal) + case reflectTime: + timeVal, err := key.Time() + if err != nil { + return nil + } + field.Set(reflect.ValueOf(timeVal)) + case reflect.Slice: + vals := key.Strings(delim) + numVals := len(vals) + if numVals == 0 { + return nil + } + + sliceOf := field.Type().Elem().Kind() + + var times []time.Time + if sliceOf == reflectTime { + times = key.Times(delim) + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(times[i])) + default: + slice.Index(i).Set(reflect.ValueOf(vals[i])) + } + } + field.Set(slice) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +func (s *Section) mapTo(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + fieldName := s.parseFieldName(tpField.Name, tag) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous + isStruct := tpField.Type.Kind() == reflect.Struct + if isAnonymous { + field.Set(reflect.New(tpField.Type.Elem())) + } + + if isAnonymous || isStruct { + if sec, err := s.f.GetSection(fieldName); err == nil { + if err = sec.mapTo(field); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + continue + } + } + + if key, err := s.GetKey(fieldName); err == nil { + if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + } + } + return nil +} + +// MapTo maps section to given struct. +func (s *Section) MapTo(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot map to non-pointer struct") + } + + return s.mapTo(val) +} + +// MapTo maps file to given struct. +func (f *File) MapTo(v interface{}) error { + return f.Section("").MapTo(v) +} + +// MapTo maps data sources to given struct with name mapper. +func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.MapTo(v) +} + +// MapTo maps data sources to given struct. +func MapTo(v, source interface{}, others ...interface{}) error { + return MapToWithMapper(v, nil, source, others...) +} + +// reflectWithProperType does the opposite thing with setWithProperType. +func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { + switch t.Kind() { + case reflect.String: + key.SetValue(field.String()) + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float64, + reflectTime: + key.SetValue(fmt.Sprint(field)) + case reflect.Slice: + vals := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + + var buf bytes.Buffer + isTime := fmt.Sprint(field.Type()) == "[]time.Time" + for i := 0; i < field.Len(); i++ { + if isTime { + buf.WriteString(vals.Index(i).Interface().(time.Time).Format(time.RFC3339)) + } else { + buf.WriteString(fmt.Sprint(vals.Index(i))) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-1]) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +func (s *Section) reflectFrom(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + fieldName := s.parseFieldName(tpField.Name, tag) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || + (tpField.Type.Kind() == reflect.Struct) { + // Note: The only error here is section doesn't exist. + sec, err := s.f.GetSection(fieldName) + if err != nil { + // Note: fieldName can never be empty here, ignore error. + sec, _ = s.f.NewSection(fieldName) + } + if err = sec.reflectFrom(field); err != nil { + return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) + } + continue + } + + // Note: Same reason as secion. + key, err := s.GetKey(fieldName) + if err != nil { + key, _ = s.NewKey(fieldName, "") + } + if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) + } + + } + return nil +} + +// ReflectFrom reflects secion from given struct. +func (s *Section) ReflectFrom(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot reflect from non-pointer struct") + } + + return s.reflectFrom(val) +} + +// ReflectFrom reflects file from given struct. +func (f *File) ReflectFrom(v interface{}) error { + return f.Section("").ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct with name mapper. +func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { + cfg.NameMapper = mapper + return cfg.ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct. +func ReflectFrom(cfg *File, v interface{}) error { + return ReflectFromWithMapper(cfg, v, nil) +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.gitignore b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.gitignore new file mode 100644 index 0000000..531fcc1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.gitignore @@ -0,0 +1,4 @@ +jpgo +jmespath-fuzz.zip +cpu.out +go-jmespath.test diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.travis.yml b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.travis.yml new file mode 100644 index 0000000..1f98077 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.travis.yml @@ -0,0 +1,9 @@ +language: go + +sudo: false + +go: + - 1.4 + +install: go get -v -t ./... +script: make test diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/LICENSE b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/LICENSE new file mode 100644 index 0000000..b03310a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/LICENSE @@ -0,0 +1,13 @@ +Copyright 2015 James Saryerwinnie + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/Makefile b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/Makefile new file mode 100644 index 0000000..a828d28 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/Makefile @@ -0,0 +1,44 @@ + +CMD = jpgo + +help: + @echo "Please use \`make ' where is one of" + @echo " test to run all the tests" + @echo " build to build the library and jp executable" + @echo " generate to run codegen" + + +generate: + go generate ./... + +build: + rm -f $(CMD) + go build ./... + rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... + mv cmd/$(CMD)/$(CMD) . + +test: + go test -v ./... + +check: + go vet ./... + @echo "golint ./..." + @lint=`golint ./...`; \ + lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ + echo "$$lint"; \ + if [ "$$lint" != "" ]; then exit 1; fi + +htmlc: + go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov + +buildfuzz: + go-fuzz-build github.com/jmespath/go-jmespath/fuzz + +fuzz: buildfuzz + go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata + +bench: + go test -bench . -cpuprofile cpu.out + +pprof-cpu: + go tool pprof ./go-jmespath.test ./cpu.out diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/README.md b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/README.md new file mode 100644 index 0000000..187ef67 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/README.md @@ -0,0 +1,7 @@ +# go-jmespath - A JMESPath implementation in Go + +[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath) + + + +See http://jmespath.org for more info. diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/api.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/api.go new file mode 100644 index 0000000..9cfa988 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/api.go @@ -0,0 +1,49 @@ +package jmespath + +import "strconv" + +// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is +// safe for concurrent use by multiple goroutines. +type JMESPath struct { + ast ASTNode + intr *treeInterpreter +} + +// Compile parses a JMESPath expression and returns, if successful, a JMESPath +// object that can be used to match against data. +func Compile(expression string) (*JMESPath, error) { + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + jmespath := &JMESPath{ast: ast, intr: newInterpreter()} + return jmespath, nil +} + +// MustCompile is like Compile but panics if the expression cannot be parsed. +// It simplifies safe initialization of global variables holding compiled +// JMESPaths. +func MustCompile(expression string) *JMESPath { + jmespath, err := Compile(expression) + if err != nil { + panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) + } + return jmespath +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func (jp *JMESPath) Search(data interface{}) (interface{}, error) { + return jp.intr.Execute(jp.ast, data) +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func Search(expression string, data interface{}) (interface{}, error) { + intr := newInterpreter() + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + return intr.Execute(ast, data) +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/astnodetype_string.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/astnodetype_string.go new file mode 100644 index 0000000..1cd2d23 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/astnodetype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type astNodeType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" + +var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} + +func (i astNodeType) String() string { + if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { + return fmt.Sprintf("astNodeType(%d)", i) + } + return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/functions.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/functions.go new file mode 100644 index 0000000..9b7cd89 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/functions.go @@ -0,0 +1,842 @@ +package jmespath + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "unicode/utf8" +) + +type jpFunction func(arguments []interface{}) (interface{}, error) + +type jpType string + +const ( + jpUnknown jpType = "unknown" + jpNumber jpType = "number" + jpString jpType = "string" + jpArray jpType = "array" + jpObject jpType = "object" + jpArrayNumber jpType = "array[number]" + jpArrayString jpType = "array[string]" + jpExpref jpType = "expref" + jpAny jpType = "any" +) + +type functionEntry struct { + name string + arguments []argSpec + handler jpFunction + hasExpRef bool +} + +type argSpec struct { + types []jpType + variadic bool +} + +type byExprString struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprString) Len() int { + return len(a.items) +} +func (a *byExprString) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprString) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(string) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(string) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type byExprFloat struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprFloat) Len() int { + return len(a.items) +} +func (a *byExprFloat) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprFloat) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(float64) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(float64) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type functionCaller struct { + functionTable map[string]functionEntry +} + +func newFunctionCaller() *functionCaller { + caller := &functionCaller{} + caller.functionTable = map[string]functionEntry{ + "length": { + name: "length", + arguments: []argSpec{ + {types: []jpType{jpString, jpArray, jpObject}}, + }, + handler: jpfLength, + }, + "starts_with": { + name: "starts_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfStartsWith, + }, + "abs": { + name: "abs", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfAbs, + }, + "avg": { + name: "avg", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfAvg, + }, + "ceil": { + name: "ceil", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfCeil, + }, + "contains": { + name: "contains", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + {types: []jpType{jpAny}}, + }, + handler: jpfContains, + }, + "ends_with": { + name: "ends_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfEndsWith, + }, + "floor": { + name: "floor", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfFloor, + }, + "map": { + name: "amp", + arguments: []argSpec{ + {types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, + }, + handler: jpfMap, + hasExpRef: true, + }, + "max": { + name: "max", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMax, + }, + "merge": { + name: "merge", + arguments: []argSpec{ + {types: []jpType{jpObject}, variadic: true}, + }, + handler: jpfMerge, + }, + "max_by": { + name: "max_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMaxBy, + hasExpRef: true, + }, + "sum": { + name: "sum", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfSum, + }, + "min": { + name: "min", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMin, + }, + "min_by": { + name: "min_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMinBy, + hasExpRef: true, + }, + "type": { + name: "type", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfType, + }, + "keys": { + name: "keys", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfKeys, + }, + "values": { + name: "values", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfValues, + }, + "sort": { + name: "sort", + arguments: []argSpec{ + {types: []jpType{jpArrayString, jpArrayNumber}}, + }, + handler: jpfSort, + }, + "sort_by": { + name: "sort_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfSortBy, + hasExpRef: true, + }, + "join": { + name: "join", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpArrayString}}, + }, + handler: jpfJoin, + }, + "reverse": { + name: "reverse", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + }, + handler: jpfReverse, + }, + "to_array": { + name: "to_array", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToArray, + }, + "to_string": { + name: "to_string", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToString, + }, + "to_number": { + name: "to_number", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToNumber, + }, + "not_null": { + name: "not_null", + arguments: []argSpec{ + {types: []jpType{jpAny}, variadic: true}, + }, + handler: jpfNotNull, + }, + } + return caller +} + +func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { + if len(e.arguments) == 0 { + return arguments, nil + } + if !e.arguments[len(e.arguments)-1].variadic { + if len(e.arguments) != len(arguments) { + return nil, errors.New("incorrect number of args") + } + for i, spec := range e.arguments { + userArg := arguments[i] + err := spec.typeCheck(userArg) + if err != nil { + return nil, err + } + } + return arguments, nil + } + if len(arguments) < len(e.arguments) { + return nil, errors.New("Invalid arity.") + } + return arguments, nil +} + +func (a *argSpec) typeCheck(arg interface{}) error { + for _, t := range a.types { + switch t { + case jpNumber: + if _, ok := arg.(float64); ok { + return nil + } + case jpString: + if _, ok := arg.(string); ok { + return nil + } + case jpArray: + if isSliceType(arg) { + return nil + } + case jpObject: + if _, ok := arg.(map[string]interface{}); ok { + return nil + } + case jpArrayNumber: + if _, ok := toArrayNum(arg); ok { + return nil + } + case jpArrayString: + if _, ok := toArrayStr(arg); ok { + return nil + } + case jpAny: + return nil + case jpExpref: + if _, ok := arg.(expRef); ok { + return nil + } + } + } + return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) +} + +func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { + entry, ok := f.functionTable[name] + if !ok { + return nil, errors.New("unknown function: " + name) + } + resolvedArgs, err := entry.resolveArgs(arguments) + if err != nil { + return nil, err + } + if entry.hasExpRef { + var extra []interface{} + extra = append(extra, intr) + resolvedArgs = append(extra, resolvedArgs...) + } + return entry.handler(resolvedArgs) +} + +func jpfAbs(arguments []interface{}) (interface{}, error) { + num := arguments[0].(float64) + return math.Abs(num), nil +} + +func jpfLength(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if c, ok := arg.(string); ok { + return float64(utf8.RuneCountInString(c)), nil + } else if isSliceType(arg) { + v := reflect.ValueOf(arg) + return float64(v.Len()), nil + } else if c, ok := arg.(map[string]interface{}); ok { + return float64(len(c)), nil + } + return nil, errors.New("could not compute length()") +} + +func jpfStartsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + prefix := arguments[1].(string) + return strings.HasPrefix(search, prefix), nil +} + +func jpfAvg(arguments []interface{}) (interface{}, error) { + // We've already type checked the value so we can safely use + // type assertions. + args := arguments[0].([]interface{}) + length := float64(len(args)) + numerator := 0.0 + for _, n := range args { + numerator += n.(float64) + } + return numerator / length, nil +} +func jpfCeil(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Ceil(val), nil +} +func jpfContains(arguments []interface{}) (interface{}, error) { + search := arguments[0] + el := arguments[1] + if searchStr, ok := search.(string); ok { + if elStr, ok := el.(string); ok { + return strings.Index(searchStr, elStr) != -1, nil + } + return false, nil + } + // Otherwise this is a generic contains for []interface{} + general := search.([]interface{}) + for _, item := range general { + if item == el { + return true, nil + } + } + return false, nil +} +func jpfEndsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + suffix := arguments[1].(string) + return strings.HasSuffix(search, suffix), nil +} +func jpfFloor(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Floor(val), nil +} +func jpfMap(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + exp := arguments[1].(expRef) + node := exp.ref + arr := arguments[2].([]interface{}) + mapped := make([]interface{}, 0, len(arr)) + for _, value := range arr { + current, err := intr.Execute(node, value) + if err != nil { + return nil, err + } + mapped = append(mapped, current) + } + return mapped, nil +} +func jpfMax(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil + } + // Otherwise we're dealing with a max() of strings. + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil +} +func jpfMerge(arguments []interface{}) (interface{}, error) { + final := make(map[string]interface{}) + for _, m := range arguments { + mapped := m.(map[string]interface{}) + for key, value := range mapped { + final[key] = value + } + } + return final, nil +} +func jpfMaxBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + switch t := start.(type) { + case float64: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + case string: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + default: + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfSum(arguments []interface{}) (interface{}, error) { + items, _ := toArrayNum(arguments[0]) + sum := 0.0 + for _, item := range items { + sum += item + } + return sum, nil +} + +func jpfMin(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil + } + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil +} + +func jpfMinBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if t, ok := start.(float64); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else if t, ok := start.(string); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfType(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if _, ok := arg.(float64); ok { + return "number", nil + } + if _, ok := arg.(string); ok { + return "string", nil + } + if _, ok := arg.([]interface{}); ok { + return "array", nil + } + if _, ok := arg.(map[string]interface{}); ok { + return "object", nil + } + if arg == nil { + return "null", nil + } + if arg == true || arg == false { + return "boolean", nil + } + return nil, errors.New("unknown type") +} +func jpfKeys(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for key := range arg { + collected = append(collected, key) + } + return collected, nil +} +func jpfValues(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for _, value := range arg { + collected = append(collected, value) + } + return collected, nil +} +func jpfSort(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + d := sort.Float64Slice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil + } + // Otherwise we're dealing with sort()'ing strings. + items, _ := toArrayStr(arguments[0]) + d := sort.StringSlice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil +} +func jpfSortBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return arr, nil + } else if len(arr) == 1 { + return arr, nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if _, ok := start.(float64); ok { + sortable := &byExprFloat{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else if _, ok := start.(string); ok { + sortable := &byExprString{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfJoin(arguments []interface{}) (interface{}, error) { + sep := arguments[0].(string) + // We can't just do arguments[1].([]string), we have to + // manually convert each item to a string. + arrayStr := []string{} + for _, item := range arguments[1].([]interface{}) { + arrayStr = append(arrayStr, item.(string)) + } + return strings.Join(arrayStr, sep), nil +} +func jpfReverse(arguments []interface{}) (interface{}, error) { + if s, ok := arguments[0].(string); ok { + r := []rune(s) + for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r), nil + } + items := arguments[0].([]interface{}) + length := len(items) + reversed := make([]interface{}, length) + for i, item := range items { + reversed[length-(i+1)] = item + } + return reversed, nil +} +func jpfToArray(arguments []interface{}) (interface{}, error) { + if _, ok := arguments[0].([]interface{}); ok { + return arguments[0], nil + } + return arguments[:1:1], nil +} +func jpfToString(arguments []interface{}) (interface{}, error) { + if v, ok := arguments[0].(string); ok { + return v, nil + } + result, err := json.Marshal(arguments[0]) + if err != nil { + return nil, err + } + return string(result), nil +} +func jpfToNumber(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if v, ok := arg.(float64); ok { + return v, nil + } + if v, ok := arg.(string); ok { + conv, err := strconv.ParseFloat(v, 64) + if err != nil { + return nil, nil + } + return conv, nil + } + if _, ok := arg.([]interface{}); ok { + return nil, nil + } + if _, ok := arg.(map[string]interface{}); ok { + return nil, nil + } + if arg == nil { + return nil, nil + } + if arg == true || arg == false { + return nil, nil + } + return nil, errors.New("unknown type") +} +func jpfNotNull(arguments []interface{}) (interface{}, error) { + for _, arg := range arguments { + if arg != nil { + return arg, nil + } + } + return nil, nil +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/interpreter.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/interpreter.go new file mode 100644 index 0000000..13c7460 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/interpreter.go @@ -0,0 +1,418 @@ +package jmespath + +import ( + "errors" + "reflect" + "unicode" + "unicode/utf8" +) + +/* This is a tree based interpreter. It walks the AST and directly + interprets the AST to search through a JSON document. +*/ + +type treeInterpreter struct { + fCall *functionCaller +} + +func newInterpreter() *treeInterpreter { + interpreter := treeInterpreter{} + interpreter.fCall = newFunctionCaller() + return &interpreter +} + +type expRef struct { + ref ASTNode +} + +// Execute takes an ASTNode and input data and interprets the AST directly. +// It will produce the result of applying the JMESPath expression associated +// with the ASTNode to the input data "value". +func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { + switch node.nodeType { + case ASTComparator: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + right, err := intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + switch node.value { + case tEQ: + return objsEqual(left, right), nil + case tNE: + return !objsEqual(left, right), nil + } + leftNum, ok := left.(float64) + if !ok { + return nil, nil + } + rightNum, ok := right.(float64) + if !ok { + return nil, nil + } + switch node.value { + case tGT: + return leftNum > rightNum, nil + case tGTE: + return leftNum >= rightNum, nil + case tLT: + return leftNum < rightNum, nil + case tLTE: + return leftNum <= rightNum, nil + } + case ASTExpRef: + return expRef{ref: node.children[0]}, nil + case ASTFunctionExpression: + resolvedArgs := []interface{}{} + for _, arg := range node.children { + current, err := intr.Execute(arg, value) + if err != nil { + return nil, err + } + resolvedArgs = append(resolvedArgs, current) + } + return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) + case ASTField: + if m, ok := value.(map[string]interface{}); ok { + key := node.value.(string) + return m[key], nil + } + return intr.fieldFromStruct(node.value.(string), value) + case ASTFilterProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.filterProjectionWithReflection(node, left) + } + return nil, nil + } + compareNode := node.children[2] + collected := []interface{}{} + for _, element := range sliceType { + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil + case ASTFlatten: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + // If we can't type convert to []interface{}, there's + // a chance this could still work via reflection if we're + // dealing with user provided types. + if isSliceType(left) { + return intr.flattenWithReflection(left) + } + return nil, nil + } + flattened := []interface{}{} + for _, element := range sliceType { + if elementSlice, ok := element.([]interface{}); ok { + flattened = append(flattened, elementSlice...) + } else if isSliceType(element) { + reflectFlat := []interface{}{} + v := reflect.ValueOf(element) + for i := 0; i < v.Len(); i++ { + reflectFlat = append(reflectFlat, v.Index(i).Interface()) + } + flattened = append(flattened, reflectFlat...) + } else { + flattened = append(flattened, element) + } + } + return flattened, nil + case ASTIdentity, ASTCurrentNode: + return value, nil + case ASTIndex: + if sliceType, ok := value.([]interface{}); ok { + index := node.value.(int) + if index < 0 { + index += len(sliceType) + } + if index < len(sliceType) && index >= 0 { + return sliceType[index], nil + } + return nil, nil + } + // Otherwise try via reflection. + rv := reflect.ValueOf(value) + if rv.Kind() == reflect.Slice { + index := node.value.(int) + if index < 0 { + index += rv.Len() + } + if index < rv.Len() && index >= 0 { + v := rv.Index(index) + return v.Interface(), nil + } + } + return nil, nil + case ASTKeyValPair: + return intr.Execute(node.children[0], value) + case ASTLiteral: + return node.value, nil + case ASTMultiSelectHash: + if value == nil { + return nil, nil + } + collected := make(map[string]interface{}) + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + key := child.value.(string) + collected[key] = current + } + return collected, nil + case ASTMultiSelectList: + if value == nil { + return nil, nil + } + collected := []interface{}{} + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + collected = append(collected, current) + } + return collected, nil + case ASTOrExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + matched, err = intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + } + return matched, nil + case ASTAndExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return matched, nil + } + return intr.Execute(node.children[1], value) + case ASTNotExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return true, nil + } + return false, nil + case ASTPipe: + result := value + var err error + for _, child := range node.children { + result, err = intr.Execute(child, result) + if err != nil { + return nil, err + } + } + return result, nil + case ASTProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.projectWithReflection(node, left) + } + return nil, nil + } + collected := []interface{}{} + var current interface{} + for _, element := range sliceType { + current, err = intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + case ASTSubexpression, ASTIndexExpression: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + return intr.Execute(node.children[1], left) + case ASTSlice: + sliceType, ok := value.([]interface{}) + if !ok { + if isSliceType(value) { + return intr.sliceWithReflection(node, value) + } + return nil, nil + } + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + return slice(sliceType, sliceParams) + case ASTValueProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + mapType, ok := left.(map[string]interface{}) + if !ok { + return nil, nil + } + values := make([]interface{}, len(mapType)) + for _, value := range mapType { + values = append(values, value) + } + collected := []interface{}{} + for _, element := range values { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + } + return nil, errors.New("Unknown AST node: " + node.nodeType.String()) +} + +func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { + rv := reflect.ValueOf(value) + first, n := utf8.DecodeRuneInString(key) + fieldName := string(unicode.ToUpper(first)) + key[n:] + if rv.Kind() == reflect.Struct { + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } else if rv.Kind() == reflect.Ptr { + // Handle multiple levels of indirection? + if rv.IsNil() { + return nil, nil + } + rv = rv.Elem() + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } + return nil, nil +} + +func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + flattened := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + if reflect.TypeOf(element).Kind() == reflect.Slice { + // Then insert the contents of the element + // slice into the flattened slice, + // i.e flattened = append(flattened, mySlice...) + elementV := reflect.ValueOf(element) + for j := 0; j < elementV.Len(); j++ { + flattened = append( + flattened, elementV.Index(j).Interface()) + } + } else { + flattened = append(flattened, element) + } + } + return flattened, nil +} + +func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + final := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + final = append(final, element) + } + return slice(final, sliceParams) +} + +func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { + compareNode := node.children[2] + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil +} + +func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if result != nil { + collected = append(collected, result) + } + } + return collected, nil +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/lexer.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/lexer.go new file mode 100644 index 0000000..817900c --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/lexer.go @@ -0,0 +1,420 @@ +package jmespath + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + "unicode/utf8" +) + +type token struct { + tokenType tokType + value string + position int + length int +} + +type tokType int + +const eof = -1 + +// Lexer contains information about the expression being tokenized. +type Lexer struct { + expression string // The expression provided by the user. + currentPos int // The current position in the string. + lastWidth int // The width of the current rune. This + buf bytes.Buffer // Internal buffer used for building up values. +} + +// SyntaxError is the main error used whenever a lexing or parsing error occurs. +type SyntaxError struct { + msg string // Error message displayed to user + Expression string // Expression that generated a SyntaxError + Offset int // The location in the string where the error occurred +} + +func (e SyntaxError) Error() string { + // In the future, it would be good to underline the specific + // location where the error occurred. + return "SyntaxError: " + e.msg +} + +// HighlightLocation will show where the syntax error occurred. +// It will place a "^" character on a line below the expression +// at the point where the syntax error occurred. +func (e SyntaxError) HighlightLocation() string { + return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" +} + +//go:generate stringer -type=tokType +const ( + tUnknown tokType = iota + tStar + tDot + tFilter + tFlatten + tLparen + tRparen + tLbracket + tRbracket + tLbrace + tRbrace + tOr + tPipe + tNumber + tUnquotedIdentifier + tQuotedIdentifier + tComma + tColon + tLT + tLTE + tGT + tGTE + tEQ + tNE + tJSONLiteral + tStringLiteral + tCurrent + tExpref + tAnd + tNot + tEOF +) + +var basicTokens = map[rune]tokType{ + '.': tDot, + '*': tStar, + ',': tComma, + ':': tColon, + '{': tLbrace, + '}': tRbrace, + ']': tRbracket, // tLbracket not included because it could be "[]" + '(': tLparen, + ')': tRparen, + '@': tCurrent, +} + +// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. +// When using this bitmask just be sure to shift the rune down 64 bits +// before checking against identifierStartBits. +const identifierStartBits uint64 = 576460745995190270 + +// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. +var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} + +var whiteSpace = map[rune]bool{ + ' ': true, '\t': true, '\n': true, '\r': true, +} + +func (t token) String() string { + return fmt.Sprintf("Token{%+v, %s, %d, %d}", + t.tokenType, t.value, t.position, t.length) +} + +// NewLexer creates a new JMESPath lexer. +func NewLexer() *Lexer { + lexer := Lexer{} + return &lexer +} + +func (lexer *Lexer) next() rune { + if lexer.currentPos >= len(lexer.expression) { + lexer.lastWidth = 0 + return eof + } + r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) + lexer.lastWidth = w + lexer.currentPos += w + return r +} + +func (lexer *Lexer) back() { + lexer.currentPos -= lexer.lastWidth +} + +func (lexer *Lexer) peek() rune { + t := lexer.next() + lexer.back() + return t +} + +// tokenize takes an expression and returns corresponding tokens. +func (lexer *Lexer) tokenize(expression string) ([]token, error) { + var tokens []token + lexer.expression = expression + lexer.currentPos = 0 + lexer.lastWidth = 0 +loop: + for { + r := lexer.next() + if identifierStartBits&(1<<(uint64(r)-64)) > 0 { + t := lexer.consumeUnquotedIdentifier() + tokens = append(tokens, t) + } else if val, ok := basicTokens[r]; ok { + // Basic single char token. + t := token{ + tokenType: val, + value: string(r), + position: lexer.currentPos - lexer.lastWidth, + length: 1, + } + tokens = append(tokens, t) + } else if r == '-' || (r >= '0' && r <= '9') { + t := lexer.consumeNumber() + tokens = append(tokens, t) + } else if r == '[' { + t := lexer.consumeLBracket() + tokens = append(tokens, t) + } else if r == '"' { + t, err := lexer.consumeQuotedIdentifier() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '\'' { + t, err := lexer.consumeRawStringLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '`' { + t, err := lexer.consumeLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '|' { + t := lexer.matchOrElse(r, '|', tOr, tPipe) + tokens = append(tokens, t) + } else if r == '<' { + t := lexer.matchOrElse(r, '=', tLTE, tLT) + tokens = append(tokens, t) + } else if r == '>' { + t := lexer.matchOrElse(r, '=', tGTE, tGT) + tokens = append(tokens, t) + } else if r == '!' { + t := lexer.matchOrElse(r, '=', tNE, tNot) + tokens = append(tokens, t) + } else if r == '=' { + t := lexer.matchOrElse(r, '=', tEQ, tUnknown) + tokens = append(tokens, t) + } else if r == '&' { + t := lexer.matchOrElse(r, '&', tAnd, tExpref) + tokens = append(tokens, t) + } else if r == eof { + break loop + } else if _, ok := whiteSpace[r]; ok { + // Ignore whitespace + } else { + return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) + } + } + tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) + return tokens, nil +} + +// Consume characters until the ending rune "r" is reached. +// If the end of the expression is reached before seeing the +// terminating rune "r", then an error is returned. +// If no error occurs then the matching substring is returned. +// The returned string will not include the ending rune. +func (lexer *Lexer) consumeUntil(end rune) (string, error) { + start := lexer.currentPos + current := lexer.next() + for current != end && current != eof { + if current == '\\' && lexer.peek() != eof { + lexer.next() + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return "", SyntaxError{ + msg: "Unclosed delimiter: " + string(end), + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil +} + +func (lexer *Lexer) consumeLiteral() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('`') + if err != nil { + return token{}, err + } + value = strings.Replace(value, "\\`", "`", -1) + return token{ + tokenType: tJSONLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) consumeRawStringLiteral() (token, error) { + start := lexer.currentPos + currentIndex := start + current := lexer.next() + for current != '\'' && lexer.peek() != eof { + if current == '\\' && lexer.peek() == '\'' { + chunk := lexer.expression[currentIndex : lexer.currentPos-1] + lexer.buf.WriteString(chunk) + lexer.buf.WriteString("'") + lexer.next() + currentIndex = lexer.currentPos + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return token{}, SyntaxError{ + msg: "Unclosed delimiter: '", + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + if currentIndex < lexer.currentPos { + lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) + } + value := lexer.buf.String() + // Reset the buffer so it can reused again. + lexer.buf.Reset() + return token{ + tokenType: tStringLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: lexer.expression, + Offset: lexer.currentPos - 1, + } +} + +// Checks for a two char token, otherwise matches a single character +// token. This is used whenever a two char token overlaps a single +// char token, e.g. "||" -> tPipe, "|" -> tOr. +func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == second { + t = token{ + tokenType: matchedType, + value: string(first) + string(second), + position: start, + length: 2, + } + } else { + lexer.back() + t = token{ + tokenType: singleCharType, + value: string(first), + position: start, + length: 1, + } + } + return t +} + +func (lexer *Lexer) consumeLBracket() token { + // There's three options here: + // 1. A filter expression "[?" + // 2. A flatten operator "[]" + // 3. A bare rbracket "[" + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == '?' { + t = token{ + tokenType: tFilter, + value: "[?", + position: start, + length: 2, + } + } else if nextRune == ']' { + t = token{ + tokenType: tFlatten, + value: "[]", + position: start, + length: 2, + } + } else { + t = token{ + tokenType: tLbracket, + value: "[", + position: start, + length: 1, + } + lexer.back() + } + return t +} + +func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('"') + if err != nil { + return token{}, err + } + var decoded string + asJSON := []byte("\"" + value + "\"") + if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { + return token{}, err + } + return token{ + tokenType: tQuotedIdentifier, + value: decoded, + position: start - 1, + length: len(decoded), + }, nil +} + +func (lexer *Lexer) consumeUnquotedIdentifier() token { + // Consume runes until we reach the end of an unquoted + // identifier. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tUnquotedIdentifier, + value: value, + position: start, + length: lexer.currentPos - start, + } +} + +func (lexer *Lexer) consumeNumber() token { + // Consume runes until we reach something that's not a number. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < '0' || r > '9' { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tNumber, + value: value, + position: start, + length: lexer.currentPos - start, + } +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/parser.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/parser.go new file mode 100644 index 0000000..1240a17 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/parser.go @@ -0,0 +1,603 @@ +package jmespath + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +type astNodeType int + +//go:generate stringer -type astNodeType +const ( + ASTEmpty astNodeType = iota + ASTComparator + ASTCurrentNode + ASTExpRef + ASTFunctionExpression + ASTField + ASTFilterProjection + ASTFlatten + ASTIdentity + ASTIndex + ASTIndexExpression + ASTKeyValPair + ASTLiteral + ASTMultiSelectHash + ASTMultiSelectList + ASTOrExpression + ASTAndExpression + ASTNotExpression + ASTPipe + ASTProjection + ASTSubexpression + ASTSlice + ASTValueProjection +) + +// ASTNode represents the abstract syntax tree of a JMESPath expression. +type ASTNode struct { + nodeType astNodeType + value interface{} + children []ASTNode +} + +func (node ASTNode) String() string { + return node.PrettyPrint(0) +} + +// PrettyPrint will pretty print the parsed AST. +// The AST is an implementation detail and this pretty print +// function is provided as a convenience method to help with +// debugging. You should not rely on its output as the internal +// structure of the AST may change at any time. +func (node ASTNode) PrettyPrint(indent int) string { + spaces := strings.Repeat(" ", indent) + output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType) + nextIndent := indent + 2 + if node.value != nil { + if converted, ok := node.value.(fmt.Stringer); ok { + // Account for things like comparator nodes + // that are enums with a String() method. + output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String()) + } else { + output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value) + } + } + lastIndex := len(node.children) + if lastIndex > 0 { + output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) + childIndent := nextIndent + 2 + for _, elem := range node.children { + output += elem.PrettyPrint(childIndent) + } + } + output += fmt.Sprintf("%s}\n", spaces) + return output +} + +var bindingPowers = map[tokType]int{ + tEOF: 0, + tUnquotedIdentifier: 0, + tQuotedIdentifier: 0, + tRbracket: 0, + tRparen: 0, + tComma: 0, + tRbrace: 0, + tNumber: 0, + tCurrent: 0, + tExpref: 0, + tColon: 0, + tPipe: 1, + tOr: 2, + tAnd: 3, + tEQ: 5, + tLT: 5, + tLTE: 5, + tGT: 5, + tGTE: 5, + tNE: 5, + tFlatten: 9, + tStar: 20, + tFilter: 21, + tDot: 40, + tNot: 45, + tLbrace: 50, + tLbracket: 55, + tLparen: 60, +} + +// Parser holds state about the current expression being parsed. +type Parser struct { + expression string + tokens []token + index int +} + +// NewParser creates a new JMESPath parser. +func NewParser() *Parser { + p := Parser{} + return &p +} + +// Parse will compile a JMESPath expression. +func (p *Parser) Parse(expression string) (ASTNode, error) { + lexer := NewLexer() + p.expression = expression + p.index = 0 + tokens, err := lexer.tokenize(expression) + if err != nil { + return ASTNode{}, err + } + p.tokens = tokens + parsed, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() != tEOF { + return ASTNode{}, p.syntaxError(fmt.Sprintf( + "Unexpected token at the end of the expresssion: %s", p.current())) + } + return parsed, nil +} + +func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { + var err error + leftToken := p.lookaheadToken(0) + p.advance() + leftNode, err := p.nud(leftToken) + if err != nil { + return ASTNode{}, err + } + currentToken := p.current() + for bindingPower < bindingPowers[currentToken] { + p.advance() + leftNode, err = p.led(currentToken, leftNode) + if err != nil { + return ASTNode{}, err + } + currentToken = p.current() + } + return leftNode, nil +} + +func (p *Parser) parseIndexExpression() (ASTNode, error) { + if p.lookahead(0) == tColon || p.lookahead(1) == tColon { + return p.parseSliceExpression() + } + indexStr := p.lookaheadToken(0).value + parsedInt, err := strconv.Atoi(indexStr) + if err != nil { + return ASTNode{}, err + } + indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} + p.advance() + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return indexNode, nil +} + +func (p *Parser) parseSliceExpression() (ASTNode, error) { + parts := []*int{nil, nil, nil} + index := 0 + current := p.current() + for current != tRbracket && index < 3 { + if current == tColon { + index++ + p.advance() + } else if current == tNumber { + parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) + if err != nil { + return ASTNode{}, err + } + parts[index] = &parsedInt + p.advance() + } else { + return ASTNode{}, p.syntaxError( + "Expected tColon or tNumber" + ", received: " + p.current().String()) + } + current = p.current() + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTSlice, + value: parts, + }, nil +} + +func (p *Parser) match(tokenType tokType) error { + if p.current() == tokenType { + p.advance() + return nil + } + return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) +} + +func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { + switch tokenType { + case tDot: + if p.current() != tStar { + right, err := p.parseDotRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTSubexpression, + children: []ASTNode{node, right}, + }, err + } + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTValueProjection, + children: []ASTNode{node, right}, + }, err + case tPipe: + right, err := p.parseExpression(bindingPowers[tPipe]) + return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err + case tOr: + right, err := p.parseExpression(bindingPowers[tOr]) + return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err + case tAnd: + right, err := p.parseExpression(bindingPowers[tAnd]) + return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err + case tLparen: + name := node.value + var args []ASTNode + for p.current() != tRparen { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() == tComma { + if err := p.match(tComma); err != nil { + return ASTNode{}, err + } + } + args = append(args, expression) + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTFunctionExpression, + value: name, + children: args, + }, nil + case tFilter: + return p.parseFilter(node) + case tFlatten: + left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{left, right}, + }, err + case tEQ, tNE, tGT, tGTE, tLT, tLTE: + right, err := p.parseExpression(bindingPowers[tokenType]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTComparator, + value: tokenType, + children: []ASTNode{node, right}, + }, nil + case tLbracket: + tokenType := p.current() + var right ASTNode + var err error + if tokenType == tNumber || tokenType == tColon { + right, err = p.parseIndexExpression() + if err != nil { + return ASTNode{}, err + } + return p.projectIfSlice(node, right) + } + // Otherwise this is a projection. + if err := p.match(tStar); err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{node, right}, + }, nil + } + return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) +} + +func (p *Parser) nud(token token) (ASTNode, error) { + switch token.tokenType { + case tJSONLiteral: + var parsed interface{} + err := json.Unmarshal([]byte(token.value), &parsed) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTLiteral, value: parsed}, nil + case tStringLiteral: + return ASTNode{nodeType: ASTLiteral, value: token.value}, nil + case tUnquotedIdentifier: + return ASTNode{ + nodeType: ASTField, + value: token.value, + }, nil + case tQuotedIdentifier: + node := ASTNode{nodeType: ASTField, value: token.value} + if p.current() == tLparen { + return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) + } + return node, nil + case tStar: + left := ASTNode{nodeType: ASTIdentity} + var right ASTNode + var err error + if p.current() == tRbracket { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + } + return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err + case tFilter: + return p.parseFilter(ASTNode{nodeType: ASTIdentity}) + case tLbrace: + return p.parseMultiSelectHash() + case tFlatten: + left := ASTNode{ + nodeType: ASTFlatten, + children: []ASTNode{{nodeType: ASTIdentity}}, + } + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil + case tLbracket: + tokenType := p.current() + //var right ASTNode + if tokenType == tNumber || tokenType == tColon { + right, err := p.parseIndexExpression() + if err != nil { + return ASTNode{}, nil + } + return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) + } else if tokenType == tStar && p.lookahead(1) == tRbracket { + p.advance() + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{{nodeType: ASTIdentity}, right}, + }, nil + } else { + return p.parseMultiSelectList() + } + case tCurrent: + return ASTNode{nodeType: ASTCurrentNode}, nil + case tExpref: + expression, err := p.parseExpression(bindingPowers[tExpref]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil + case tNot: + expression, err := p.parseExpression(bindingPowers[tNot]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil + case tLparen: + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return expression, nil + case tEOF: + return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) + } + + return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) +} + +func (p *Parser) parseMultiSelectList() (ASTNode, error) { + var expressions []ASTNode + for { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + expressions = append(expressions, expression) + if p.current() == tRbracket { + break + } + err = p.match(tComma) + if err != nil { + return ASTNode{}, err + } + } + err := p.match(tRbracket) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTMultiSelectList, + children: expressions, + }, nil +} + +func (p *Parser) parseMultiSelectHash() (ASTNode, error) { + var children []ASTNode + for { + keyToken := p.lookaheadToken(0) + if err := p.match(tUnquotedIdentifier); err != nil { + if err := p.match(tQuotedIdentifier); err != nil { + return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") + } + } + keyName := keyToken.value + err := p.match(tColon) + if err != nil { + return ASTNode{}, err + } + value, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + node := ASTNode{ + nodeType: ASTKeyValPair, + value: keyName, + children: []ASTNode{value}, + } + children = append(children, node) + if p.current() == tComma { + err := p.match(tComma) + if err != nil { + return ASTNode{}, nil + } + } else if p.current() == tRbrace { + err := p.match(tRbrace) + if err != nil { + return ASTNode{}, nil + } + break + } + } + return ASTNode{ + nodeType: ASTMultiSelectHash, + children: children, + }, nil +} + +func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { + indexExpr := ASTNode{ + nodeType: ASTIndexExpression, + children: []ASTNode{left, right}, + } + if right.nodeType == ASTSlice { + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{indexExpr, right}, + }, err + } + return indexExpr, nil +} +func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { + var right, condition ASTNode + var err error + condition, err = p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + if p.current() == tFlatten { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tFilter]) + if err != nil { + return ASTNode{}, err + } + } + + return ASTNode{ + nodeType: ASTFilterProjection, + children: []ASTNode{node, right, condition}, + }, nil +} + +func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { + lookahead := p.current() + if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { + return p.parseExpression(bindingPower) + } else if lookahead == tLbracket { + if err := p.match(tLbracket); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectList() + } else if lookahead == tLbrace { + if err := p.match(tLbrace); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectHash() + } + return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") +} + +func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { + current := p.current() + if bindingPowers[current] < 10 { + return ASTNode{nodeType: ASTIdentity}, nil + } else if current == tLbracket { + return p.parseExpression(bindingPower) + } else if current == tFilter { + return p.parseExpression(bindingPower) + } else if current == tDot { + err := p.match(tDot) + if err != nil { + return ASTNode{}, err + } + return p.parseDotRHS(bindingPower) + } else { + return ASTNode{}, p.syntaxError("Error") + } +} + +func (p *Parser) lookahead(number int) tokType { + return p.lookaheadToken(number).tokenType +} + +func (p *Parser) current() tokType { + return p.lookahead(0) +} + +func (p *Parser) lookaheadToken(number int) token { + return p.tokens[p.index+number] +} + +func (p *Parser) advance() { + p.index++ +} + +func tokensOneOf(elements []tokType, token tokType) bool { + for _, elem := range elements { + if elem == token { + return true + } + } + return false +} + +func (p *Parser) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: p.lookaheadToken(0).position, + } +} + +// Create a SyntaxError based on the provided token. +// This differs from syntaxError() which creates a SyntaxError +// based on the current lookahead token. +func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: t.position, + } +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/toktype_string.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/toktype_string.go new file mode 100644 index 0000000..dae79cb --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/toktype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type=tokType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" + +var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} + +func (i tokType) String() string { + if i < 0 || i >= tokType(len(_tokType_index)-1) { + return fmt.Sprintf("tokType(%d)", i) + } + return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/util.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/util.go new file mode 100644 index 0000000..ddc1b7d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/util.go @@ -0,0 +1,185 @@ +package jmespath + +import ( + "errors" + "reflect" +) + +// IsFalse determines if an object is false based on the JMESPath spec. +// JMESPath defines false values to be any of: +// - An empty string array, or hash. +// - The boolean value false. +// - nil +func isFalse(value interface{}) bool { + switch v := value.(type) { + case bool: + return !v + case []interface{}: + return len(v) == 0 + case map[string]interface{}: + return len(v) == 0 + case string: + return len(v) == 0 + case nil: + return true + } + // Try the reflection cases before returning false. + rv := reflect.ValueOf(value) + switch rv.Kind() { + case reflect.Struct: + // A struct type will never be false, even if + // all of its values are the zero type. + return false + case reflect.Slice, reflect.Map: + return rv.Len() == 0 + case reflect.Ptr: + if rv.IsNil() { + return true + } + // If it's a pointer type, we'll try to deref the pointer + // and evaluate the pointer value for isFalse. + element := rv.Elem() + return isFalse(element.Interface()) + } + return false +} + +// ObjsEqual is a generic object equality check. +// It will take two arbitrary objects and recursively determine +// if they are equal. +func objsEqual(left interface{}, right interface{}) bool { + return reflect.DeepEqual(left, right) +} + +// SliceParam refers to a single part of a slice. +// A slice consists of a start, a stop, and a step, similar to +// python slices. +type sliceParam struct { + N int + Specified bool +} + +// Slice supports [start:stop:step] style slicing that's supported in JMESPath. +func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { + computed, err := computeSliceParams(len(slice), parts) + if err != nil { + return nil, err + } + start, stop, step := computed[0], computed[1], computed[2] + result := []interface{}{} + if step > 0 { + for i := start; i < stop; i += step { + result = append(result, slice[i]) + } + } else { + for i := start; i > stop; i += step { + result = append(result, slice[i]) + } + } + return result, nil +} + +func computeSliceParams(length int, parts []sliceParam) ([]int, error) { + var start, stop, step int + if !parts[2].Specified { + step = 1 + } else if parts[2].N == 0 { + return nil, errors.New("Invalid slice, step cannot be 0") + } else { + step = parts[2].N + } + var stepValueNegative bool + if step < 0 { + stepValueNegative = true + } else { + stepValueNegative = false + } + + if !parts[0].Specified { + if stepValueNegative { + start = length - 1 + } else { + start = 0 + } + } else { + start = capSlice(length, parts[0].N, step) + } + + if !parts[1].Specified { + if stepValueNegative { + stop = -1 + } else { + stop = length + } + } else { + stop = capSlice(length, parts[1].N, step) + } + return []int{start, stop, step}, nil +} + +func capSlice(length int, actual int, step int) int { + if actual < 0 { + actual += length + if actual < 0 { + if step < 0 { + actual = -1 + } else { + actual = 0 + } + } + } else if actual >= length { + if step < 0 { + actual = length - 1 + } else { + actual = length + } + } + return actual +} + +// ToArrayNum converts an empty interface type to a slice of float64. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. +func toArrayNum(data interface{}) ([]float64, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]float64, len(d)) + for i, el := range d { + item, ok := el.(float64) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +// ToArrayStr converts an empty interface type to a slice of strings. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. If the input data could be entirely +// converted, then the converted data, along with a second value of true, +// will be returned. +func toArrayStr(data interface{}) ([]string, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]string, len(d)) + for i, el := range d { + item, ok := el.(string) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +func isSliceType(v interface{}) bool { + if v == nil { + return false + } + return reflect.TypeOf(v).Kind() == reflect.Slice +} diff --git a/Godeps/_workspace/src/github.com/mitchellh/goamz/aws/attempt.go b/Godeps/_workspace/src/github.com/mitchellh/goamz/aws/attempt.go deleted file mode 100644 index c0654f5..0000000 --- a/Godeps/_workspace/src/github.com/mitchellh/goamz/aws/attempt.go +++ /dev/null @@ -1,74 +0,0 @@ -package aws - -import ( - "time" -) - -// AttemptStrategy represents a strategy for waiting for an action -// to complete successfully. This is an internal type used by the -// implementation of other goamz packages. -type AttemptStrategy struct { - Total time.Duration // total duration of attempt. - Delay time.Duration // interval between each try in the burst. - Min int // minimum number of retries; overrides Total -} - -type Attempt struct { - strategy AttemptStrategy - last time.Time - end time.Time - force bool - count int -} - -// Start begins a new sequence of attempts for the given strategy. -func (s AttemptStrategy) Start() *Attempt { - now := time.Now() - return &Attempt{ - strategy: s, - last: now, - end: now.Add(s.Total), - force: true, - } -} - -// Next waits until it is time to perform the next attempt or returns -// false if it is time to stop trying. -func (a *Attempt) Next() bool { - now := time.Now() - sleep := a.nextSleep(now) - if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count { - return false - } - a.force = false - if sleep > 0 && a.count > 0 { - time.Sleep(sleep) - now = time.Now() - } - a.count++ - a.last = now - return true -} - -func (a *Attempt) nextSleep(now time.Time) time.Duration { - sleep := a.strategy.Delay - now.Sub(a.last) - if sleep < 0 { - return 0 - } - return sleep -} - -// HasNext returns whether another attempt will be made if the current -// one fails. If it returns true, the following call to Next is -// guaranteed to return true. -func (a *Attempt) HasNext() bool { - if a.force || a.strategy.Min > a.count { - return true - } - now := time.Now() - if now.Add(a.nextSleep(now)).Before(a.end) { - a.force = true - return true - } - return false -} diff --git a/Godeps/_workspace/src/github.com/mitchellh/goamz/aws/aws.go b/Godeps/_workspace/src/github.com/mitchellh/goamz/aws/aws.go deleted file mode 100644 index cfc42c0..0000000 --- a/Godeps/_workspace/src/github.com/mitchellh/goamz/aws/aws.go +++ /dev/null @@ -1,445 +0,0 @@ -// -// goamz - Go packages to interact with the Amazon Web Services. -// -// https://wiki.ubuntu.com/goamz -// -// Copyright (c) 2011 Canonical Ltd. -// -// Written by Gustavo Niemeyer -// -package aws - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "os" - - "github.com/vaughan0/go-ini" -) - -// Region defines the URLs where AWS services may be accessed. -// -// See http://goo.gl/d8BP1 for more details. -type Region struct { - Name string // the canonical name of this region. - EC2Endpoint string - S3Endpoint string - S3BucketEndpoint string // Not needed by AWS S3. Use ${bucket} for bucket name. - S3LocationConstraint bool // true if this region requires a LocationConstraint declaration. - S3LowercaseBucket bool // true if the region requires bucket names to be lower case. - SDBEndpoint string - SNSEndpoint string - SQSEndpoint string - IAMEndpoint string - ELBEndpoint string - AutoScalingEndpoint string - RdsEndpoint string - Route53Endpoint string -} - -var USGovWest = Region{ - "us-gov-west-1", - "https://ec2.us-gov-west-1.amazonaws.com", - "https://s3-fips-us-gov-west-1.amazonaws.com", - "", - true, - true, - "", - "https://sns.us-gov-west-1.amazonaws.com", - "https://sqs.us-gov-west-1.amazonaws.com", - "https://iam.us-gov.amazonaws.com", - "https://elasticloadbalancing.us-gov-west-1.amazonaws.com", - "https://autoscaling.us-gov-west-1.amazonaws.com", - "https://rds.us-gov-west-1.amazonaws.com", - "https://route53.amazonaws.com", -} - -var USEast = Region{ - "us-east-1", - "https://ec2.us-east-1.amazonaws.com", - "https://s3.amazonaws.com", - "", - false, - false, - "https://sdb.amazonaws.com", - "https://sns.us-east-1.amazonaws.com", - "https://sqs.us-east-1.amazonaws.com", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.us-east-1.amazonaws.com", - "https://autoscaling.us-east-1.amazonaws.com", - "https://rds.us-east-1.amazonaws.com", - "https://route53.amazonaws.com", -} - -var USWest = Region{ - "us-west-1", - "https://ec2.us-west-1.amazonaws.com", - "https://s3-us-west-1.amazonaws.com", - "", - true, - true, - "https://sdb.us-west-1.amazonaws.com", - "https://sns.us-west-1.amazonaws.com", - "https://sqs.us-west-1.amazonaws.com", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.us-west-1.amazonaws.com", - "https://autoscaling.us-west-1.amazonaws.com", - "https://rds.us-west-1.amazonaws.com", - "https://route53.amazonaws.com", -} - -var USWest2 = Region{ - "us-west-2", - "https://ec2.us-west-2.amazonaws.com", - "https://s3-us-west-2.amazonaws.com", - "", - true, - true, - "https://sdb.us-west-2.amazonaws.com", - "https://sns.us-west-2.amazonaws.com", - "https://sqs.us-west-2.amazonaws.com", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.us-west-2.amazonaws.com", - "https://autoscaling.us-west-2.amazonaws.com", - "https://rds.us-west-2.amazonaws.com", - "https://route53.amazonaws.com", -} - -var EUWest = Region{ - "eu-west-1", - "https://ec2.eu-west-1.amazonaws.com", - "https://s3-eu-west-1.amazonaws.com", - "", - true, - true, - "https://sdb.eu-west-1.amazonaws.com", - "https://sns.eu-west-1.amazonaws.com", - "https://sqs.eu-west-1.amazonaws.com", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.eu-west-1.amazonaws.com", - "https://autoscaling.eu-west-1.amazonaws.com", - "https://rds.eu-west-1.amazonaws.com", - "https://route53.amazonaws.com", -} - -var EUCentral = Region{ - "eu-central-1", - "https://ec2.eu-central-1.amazonaws.com", - "https://s3-eu-central-1.amazonaws.com", - "", - true, - true, - "", - "https://sns.eu-central-1.amazonaws.com", - "https://sqs.eu-central-1.amazonaws.com", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.eu-central-1.amazonaws.com", - "https://autoscaling.eu-central-1.amazonaws.com", - "https://rds.eu-central-1.amazonaws.com", - "https://route53.amazonaws.com", -} - -var APSoutheast = Region{ - "ap-southeast-1", - "https://ec2.ap-southeast-1.amazonaws.com", - "https://s3-ap-southeast-1.amazonaws.com", - "", - true, - true, - "https://sdb.ap-southeast-1.amazonaws.com", - "https://sns.ap-southeast-1.amazonaws.com", - "https://sqs.ap-southeast-1.amazonaws.com", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.ap-southeast-1.amazonaws.com", - "https://autoscaling.ap-southeast-1.amazonaws.com", - "https://rds.ap-southeast-1.amazonaws.com", - "https://route53.amazonaws.com", -} - -var APSoutheast2 = Region{ - "ap-southeast-2", - "https://ec2.ap-southeast-2.amazonaws.com", - "https://s3-ap-southeast-2.amazonaws.com", - "", - true, - true, - "https://sdb.ap-southeast-2.amazonaws.com", - "https://sns.ap-southeast-2.amazonaws.com", - "https://sqs.ap-southeast-2.amazonaws.com", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.ap-southeast-2.amazonaws.com", - "https://autoscaling.ap-southeast-2.amazonaws.com", - "https://rds.ap-southeast-2.amazonaws.com", - "https://route53.amazonaws.com", -} - -var APNortheast = Region{ - "ap-northeast-1", - "https://ec2.ap-northeast-1.amazonaws.com", - "https://s3-ap-northeast-1.amazonaws.com", - "", - true, - true, - "https://sdb.ap-northeast-1.amazonaws.com", - "https://sns.ap-northeast-1.amazonaws.com", - "https://sqs.ap-northeast-1.amazonaws.com", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.ap-northeast-1.amazonaws.com", - "https://autoscaling.ap-northeast-1.amazonaws.com", - "https://rds.ap-northeast-1.amazonaws.com", - "https://route53.amazonaws.com", -} - -var SAEast = Region{ - "sa-east-1", - "https://ec2.sa-east-1.amazonaws.com", - "https://s3-sa-east-1.amazonaws.com", - "", - true, - true, - "https://sdb.sa-east-1.amazonaws.com", - "https://sns.sa-east-1.amazonaws.com", - "https://sqs.sa-east-1.amazonaws.com", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.sa-east-1.amazonaws.com", - "https://autoscaling.sa-east-1.amazonaws.com", - "https://rds.sa-east-1.amazonaws.com", - "https://route53.amazonaws.com", -} - -var CNNorth = Region{ - "cn-north-1", - "https://ec2.cn-north-1.amazonaws.com.cn", - "https://s3.cn-north-1.amazonaws.com.cn", - "", - true, - true, - "", - "https://sns.cn-north-1.amazonaws.com.cn", - "https://sqs.cn-north-1.amazonaws.com.cn", - "https://iam.cn-north-1.amazonaws.com.cn", - "https://elasticloadbalancing.cn-north-1.amazonaws.com.cn", - "https://autoscaling.cn-north-1.amazonaws.com.cn", - "https://rds.cn-north-1.amazonaws.com.cn", - "https://route53.amazonaws.com", -} - -var Regions = map[string]Region{ - APNortheast.Name: APNortheast, - APSoutheast.Name: APSoutheast, - APSoutheast2.Name: APSoutheast2, - EUWest.Name: EUWest, - EUCentral.Name: EUCentral, - USEast.Name: USEast, - USWest.Name: USWest, - USWest2.Name: USWest2, - SAEast.Name: SAEast, - USGovWest.Name: USGovWest, - CNNorth.Name: CNNorth, -} - -type Auth struct { - AccessKey, SecretKey, Token string -} - -var unreserved = make([]bool, 128) -var hex = "0123456789ABCDEF" - -func init() { - // RFC3986 - u := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567890-_.~" - for _, c := range u { - unreserved[c] = true - } -} - -type credentials struct { - Code string - LastUpdated string - Type string - AccessKeyId string - SecretAccessKey string - Token string - Expiration string -} - -// GetMetaData retrieves instance metadata about the current machine. -// -// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html for more details. -func GetMetaData(path string) (contents []byte, err error) { - url := "http://169.254.169.254/latest/meta-data/" + path - - resp, err := RetryingClient.Get(url) - if err != nil { - return - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - err = fmt.Errorf("Code %d returned for url %s", resp.StatusCode, url) - return - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return - } - return []byte(body), err -} - -func getInstanceCredentials() (cred credentials, err error) { - credentialPath := "iam/security-credentials/" - - // Get the instance role - role, err := GetMetaData(credentialPath) - if err != nil { - return - } - - // Get the instance role credentials - credentialJSON, err := GetMetaData(credentialPath + string(role)) - if err != nil { - return - } - - err = json.Unmarshal([]byte(credentialJSON), &cred) - return -} - -// GetAuth creates an Auth based on either passed in credentials, -// environment information or instance based role credentials. -func GetAuth(accessKey string, secretKey string) (auth Auth, err error) { - // First try passed in credentials - if accessKey != "" && secretKey != "" { - return Auth{accessKey, secretKey, ""}, nil - } - - // Next try to get auth from the environment - auth, err = SharedAuth() - if err == nil { - // Found auth, return - return - } - - // Next try to get auth from the environment - auth, err = EnvAuth() - if err == nil { - // Found auth, return - return - } - - // Next try getting auth from the instance role - cred, err := getInstanceCredentials() - if err == nil { - // Found auth, return - auth.AccessKey = cred.AccessKeyId - auth.SecretKey = cred.SecretAccessKey - auth.Token = cred.Token - return - } - err = errors.New("No valid AWS authentication found") - return -} - -// SharedAuth creates an Auth based on shared credentials stored in -// $HOME/.aws/credentials. The AWS_PROFILE environment variables is used to -// select the profile. -func SharedAuth() (auth Auth, err error) { - var profileName = os.Getenv("AWS_PROFILE") - - if profileName == "" { - profileName = "default" - } - - var credentialsFile = os.Getenv("AWS_CREDENTIAL_FILE") - if credentialsFile == "" { - var homeDir = os.Getenv("HOME") - if homeDir == "" { - err = errors.New("Could not get HOME") - return - } - credentialsFile = homeDir + "/.aws/credentials" - } - - file, err := ini.LoadFile(credentialsFile) - if err != nil { - err = errors.New("Couldn't parse AWS credentials file") - return - } - - var profile = file[profileName] - if profile == nil { - err = errors.New("Couldn't find profile in AWS credentials file") - return - } - - auth.AccessKey = profile["aws_access_key_id"] - auth.SecretKey = profile["aws_secret_access_key"] - - if auth.AccessKey == "" { - err = errors.New("AWS_ACCESS_KEY_ID not found in environment in credentials file") - } - if auth.SecretKey == "" { - err = errors.New("AWS_SECRET_ACCESS_KEY not found in credentials file") - } - return -} - -// EnvAuth creates an Auth based on environment information. -// The AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment -// For accounts that require a security token, it is read from AWS_SECURITY_TOKEN -// variables are used. -func EnvAuth() (auth Auth, err error) { - auth.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID") - if auth.AccessKey == "" { - auth.AccessKey = os.Getenv("AWS_ACCESS_KEY") - } - - auth.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY") - if auth.SecretKey == "" { - auth.SecretKey = os.Getenv("AWS_SECRET_KEY") - } - - auth.Token = os.Getenv("AWS_SECURITY_TOKEN") - - if auth.AccessKey == "" { - err = errors.New("AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment") - } - if auth.SecretKey == "" { - err = errors.New("AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment") - } - return -} - -// Encode takes a string and URI-encodes it in a way suitable -// to be used in AWS signatures. -func Encode(s string) string { - encode := false - for i := 0; i != len(s); i++ { - c := s[i] - if c > 127 || !unreserved[c] { - encode = true - break - } - } - if !encode { - return s - } - e := make([]byte, len(s)*3) - ei := 0 - for i := 0; i != len(s); i++ { - c := s[i] - if c > 127 || !unreserved[c] { - e[ei] = '%' - e[ei+1] = hex[c>>4] - e[ei+2] = hex[c&0xF] - ei += 3 - } else { - e[ei] = c - ei += 1 - } - } - return string(e[:ei]) -} diff --git a/Godeps/_workspace/src/github.com/mitchellh/goamz/aws/client.go b/Godeps/_workspace/src/github.com/mitchellh/goamz/aws/client.go deleted file mode 100644 index ee53238..0000000 --- a/Godeps/_workspace/src/github.com/mitchellh/goamz/aws/client.go +++ /dev/null @@ -1,125 +0,0 @@ -package aws - -import ( - "math" - "net" - "net/http" - "time" -) - -type RetryableFunc func(*http.Request, *http.Response, error) bool -type WaitFunc func(try int) -type DeadlineFunc func() time.Time - -type ResilientTransport struct { - // Timeout is the maximum amount of time a dial will wait for - // a connect to complete. - // - // The default is no timeout. - // - // With or without a timeout, the operating system may impose - // its own earlier timeout. For instance, TCP timeouts are - // often around 3 minutes. - DialTimeout time.Duration - - // MaxTries, if non-zero, specifies the number of times we will retry on - // failure. Retries are only attempted for temporary network errors or known - // safe failures. - MaxTries int - Deadline DeadlineFunc - ShouldRetry RetryableFunc - Wait WaitFunc - transport *http.Transport -} - -// Convenience method for creating an http client -func NewClient(rt *ResilientTransport) *http.Client { - rt.transport = &http.Transport{ - Dial: func(netw, addr string) (net.Conn, error) { - c, err := net.DialTimeout(netw, addr, rt.DialTimeout) - if err != nil { - return nil, err - } - c.SetDeadline(rt.Deadline()) - return c, nil - }, - DisableKeepAlives: true, - Proxy: http.ProxyFromEnvironment, - } - // TODO: Would be nice is ResilientTransport allowed clients to initialize - // with http.Transport attributes. - return &http.Client{ - Transport: rt, - } -} - -var retryingTransport = &ResilientTransport{ - Deadline: func() time.Time { - return time.Now().Add(5 * time.Second) - }, - DialTimeout: 10 * time.Second, - MaxTries: 3, - ShouldRetry: awsRetry, - Wait: ExpBackoff, -} - -// Exported default client -var RetryingClient = NewClient(retryingTransport) - -func (t *ResilientTransport) RoundTrip(req *http.Request) (*http.Response, error) { - return t.tries(req) -} - -// Retry a request a maximum of t.MaxTries times. -// We'll only retry if the proper criteria are met. -// If a wait function is specified, wait that amount of time -// In between requests. -func (t *ResilientTransport) tries(req *http.Request) (res *http.Response, err error) { - for try := 0; try < t.MaxTries; try += 1 { - res, err = t.transport.RoundTrip(req) - - if !t.ShouldRetry(req, res, err) { - break - } - if res != nil { - res.Body.Close() - } - if t.Wait != nil { - t.Wait(try) - } - } - - return -} - -func ExpBackoff(try int) { - time.Sleep(100 * time.Millisecond * - time.Duration(math.Exp2(float64(try)))) -} - -func LinearBackoff(try int) { - time.Sleep(time.Duration(try*100) * time.Millisecond) -} - -// Decide if we should retry a request. -// In general, the criteria for retrying a request is described here -// http://docs.aws.amazon.com/general/latest/gr/api-retries.html -func awsRetry(req *http.Request, res *http.Response, err error) bool { - retry := false - - // Retry if there's a temporary network error. - if neterr, ok := err.(net.Error); ok { - if neterr.Temporary() { - retry = true - } - } - - // Retry if we get a 5xx series error. - if res != nil { - if res.StatusCode >= 500 && res.StatusCode < 600 { - retry = true - } - } - - return retry -} diff --git a/Godeps/_workspace/src/github.com/mitchellh/goamz/route53/route53.go b/Godeps/_workspace/src/github.com/mitchellh/goamz/route53/route53.go deleted file mode 100644 index 92d623f..0000000 --- a/Godeps/_workspace/src/github.com/mitchellh/goamz/route53/route53.go +++ /dev/null @@ -1,377 +0,0 @@ -// The route53 package provides types and functions for interaction with the AWS -// Route53 service -package route53 - -import ( - "bytes" - "encoding/xml" - "fmt" - "io" - "net/http" - "net/url" - "reflect" - "strconv" - "strings" - "time" - - "github.com/mitchellh/goamz/aws" -) - -// The Route53 type encapsulates operations operations with the route53 endpoint. -type Route53 struct { - aws.Auth - aws.Region - httpClient *http.Client -} - -const APIVersion = "2013-04-01" - -// New creates a new ELB instance. -func New(auth aws.Auth, region aws.Region) *Route53 { - return NewWithClient(auth, region, aws.RetryingClient) -} - -func NewWithClient(auth aws.Auth, region aws.Region, httpClient *http.Client) *Route53 { - return &Route53{auth, region, httpClient} -} - -type CreateHostedZoneRequest struct { - Name string `xml:"Name"` - CallerReference string `xml:"CallerReference"` - Comment string `xml:"HostedZoneConfig>Comment"` -} - -type CreateHostedZoneResponse struct { - HostedZone HostedZone `xml:"HostedZone"` - ChangeInfo ChangeInfo `xml:"ChangeInfo"` - DelegationSet DelegationSet `xml:"DelegationSet"` -} - -type HostedZone struct { - ID string `xml:"Id"` - Name string `xml:"Name"` - CallerReference string `xml:"CallerReference"` - Comment string `xml:"Config>Comment"` - ResourceCount int `xml:"ResourceRecordSetCount"` -} - -type ChangeInfo struct { - ID string `xml:"Id"` - Status string `xml:"Status"` - SubmittedAt string `xml:"SubmittedAt"` -} - -type DelegationSet struct { - NameServers []string `xml:"NameServers>NameServer"` -} - -func (r *Route53) query(method, path string, req, resp interface{}) error { - params := make(map[string]string) - endpoint, err := url.Parse(r.Region.Route53Endpoint) - if err != nil { - return err - } - endpoint.Path = path - sign(r.Auth, endpoint.Path, params) - - // If they look like url.Values, just encode... - if queryArgs, ok := req.(url.Values); ok { - endpoint.RawQuery = queryArgs.Encode() - req = nil - } - - // Encode the body - var body io.ReadWriter - if req != nil { - bodyBuf := bytes.NewBuffer(nil) - enc := xml.NewEncoder(bodyBuf) - start := xml.StartElement{ - Name: xml.Name{ - Space: "", - Local: reflect.Indirect(reflect.ValueOf(req)).Type().Name(), - }, - Attr: []xml.Attr{{xml.Name{"", "xmlns"}, "https://route53.amazonaws.com/doc/2013-04-01/"}}, - } - if err := enc.EncodeElement(req, start); err != nil { - return err - } - - // This is really a sadness, but can't think of a better way to - // do this for now in Go's constructs. - replace := "" - if strings.Contains(bodyBuf.String(), replace) { - var newBuf bytes.Buffer - newBuf.WriteString(strings.Replace(bodyBuf.String(), replace, "", -1)) - bodyBuf = &newBuf - } - - // http://docs.aws.amazon.com/Route53/latest/APIReference/CreateAliasRRSAPI.html - if reflect.Indirect(reflect.ValueOf(req)).Type().Name() == "ChangeResourceRecordSetsRequest" { - for _, change := range req.(ChangeResourceRecordSetsRequest).Changes { - if change.Record.AliasTarget != nil { - replace := change.Record.Type + "0" - var newBuf bytes.Buffer - newBuf.WriteString(strings.Replace(bodyBuf.String(), replace, change.Record.Type+"", -1)) - bodyBuf = &newBuf - } - } - } - - body = bodyBuf - } - - // Make the http request - hReq, err := http.NewRequest(method, endpoint.String(), body) - if err != nil { - return err - } - for k, v := range params { - hReq.Header.Set(k, v) - } - re, err := r.httpClient.Do(hReq) - if err != nil { - return err - } - defer re.Body.Close() - - // Check the status code - switch re.StatusCode { - case 200: - case 201: - default: - var body bytes.Buffer - io.Copy(&body, re.Body) - return fmt.Errorf("Request failed, got status code: %d. Response: %s", - re.StatusCode, body.Bytes()) - } - - // Decode the response - decoder := xml.NewDecoder(re.Body) - return decoder.Decode(resp) -} - -func multimap(p map[string]string) url.Values { - q := make(url.Values, len(p)) - for k, v := range p { - q[k] = []string{v} - } - return q -} - -// CreateHostedZone is used to create a new hosted zone -func (r *Route53) CreateHostedZone(req *CreateHostedZoneRequest) (*CreateHostedZoneResponse, error) { - // Generate a unique caller reference if none provided - if req.CallerReference == "" { - req.CallerReference = time.Now().Format(time.RFC3339Nano) - } - out := &CreateHostedZoneResponse{} - if err := r.query("POST", fmt.Sprintf("/%s/hostedzone", APIVersion), req, out); err != nil { - return nil, err - } - return out, nil -} - -type DeleteHostedZoneResponse struct { - ChangeInfo ChangeInfo `xml:"ChangeInfo"` -} - -func (r *Route53) DeleteHostedZone(ID string) (*DeleteHostedZoneResponse, error) { - // Remove the hostedzone prefix if given - ID = CleanZoneID(ID) - out := &DeleteHostedZoneResponse{} - err := r.query("DELETE", fmt.Sprintf("/%s/hostedzone/%s", APIVersion, ID), nil, out) - if err != nil { - return nil, err - } - return out, err -} - -// CleanZoneID is used to remove the leading /hostedzone/ -func CleanZoneID(ID string) string { - if strings.HasPrefix(ID, "/hostedzone/") { - ID = strings.TrimPrefix(ID, "/hostedzone/") - } - return ID -} - -// CleanChangeID is used to remove the leading /change/ -func CleanChangeID(ID string) string { - if strings.HasPrefix(ID, "/change/") { - ID = strings.TrimPrefix(ID, "/change/") - } - return ID -} - -type GetHostedZoneResponse struct { - HostedZone HostedZone `xml:"HostedZone"` - DelegationSet DelegationSet `xml:"DelegationSet"` -} - -func (r *Route53) GetHostedZone(ID string) (*GetHostedZoneResponse, error) { - // Remove the hostedzone prefix if given - ID = CleanZoneID(ID) - out := &GetHostedZoneResponse{} - err := r.query("GET", fmt.Sprintf("/%s/hostedzone/%s", APIVersion, ID), nil, out) - if err != nil { - return nil, err - } - return out, err -} - -type ListHostedZonesResponse struct { - HostedZones []HostedZone `xml:"HostedZones>HostedZone"` - Marker string `xml:"Marker"` - IsTruncated bool `xml:"IsTruncated"` - NextMarker string `xml:"NextMarker"` - MaxItems int `xml:"MaxItems"` -} - -func (r *Route53) ListHostedZones(marker string, maxItems int) (*ListHostedZonesResponse, error) { - values := url.Values{} - - if marker != "" { - values.Add("marker", marker) - } - - if maxItems != 0 { - values.Add("maxItems", strconv.Itoa(maxItems)) - } - - out := &ListHostedZonesResponse{} - err := r.query("GET", fmt.Sprintf("/%s/hostedzone/", APIVersion), values, out) - if err != nil { - return nil, err - } - return out, err -} - -type GetChangeResponse struct { - ChangeInfo ChangeInfo `xml:"ChangeInfo"` -} - -func (r *Route53) GetChange(ID string) (string, error) { - ID = CleanChangeID(ID) - out := &GetChangeResponse{} - err := r.query("GET", fmt.Sprintf("/%s/change/%s", APIVersion, ID), nil, out) - if err != nil { - return "", err - } - return out.ChangeInfo.Status, err -} - -type ChangeResourceRecordSetsRequest struct { - Comment string `xml:"ChangeBatch>Comment,omitempty"` - Changes []Change `xml:"ChangeBatch>Changes>Change"` -} - -type Change struct { - Action string `xml:"Action"` - Record ResourceRecordSet `xml:"ResourceRecordSet"` -} - -type AliasTarget struct { - HostedZoneId string - DNSName string - EvaluateTargetHealth bool -} - -type ChangeResourceRecordSetsResponse struct { - ChangeInfo ChangeInfo `xml:"ChangeInfo"` -} - -func (r *Route53) ChangeResourceRecordSets(zone string, - req *ChangeResourceRecordSetsRequest) (*ChangeResourceRecordSetsResponse, error) { - // This is really sad, but we have to format this differently - // for Route53 to make them happy. - reqCopy := *req - for i, change := range reqCopy.Changes { - if len(change.Record.Records) > 1 { - var buf bytes.Buffer - for _, r := range change.Record.Records { - buf.WriteString(fmt.Sprintf( - "%s", - r)) - } - - change.Record.Records = nil - change.Record.RecordsXML = fmt.Sprintf( - "%s", buf.String()) - reqCopy.Changes[i] = change - } - } - - zone = CleanZoneID(zone) - out := &ChangeResourceRecordSetsResponse{} - if err := r.query("POST", fmt.Sprintf("/%s/hostedzone/%s/rrset", APIVersion, - zone), reqCopy, out); err != nil { - return nil, err - } - return out, nil -} - -type ListOpts struct { - Name string - Type string - Identifier string - MaxItems int -} - -type ListResourceRecordSetsResponse struct { - Records []ResourceRecordSet `xml:"ResourceRecordSets>ResourceRecordSet"` - IsTruncated bool `xml:"IsTruncated"` - MaxItems int `xml:"MaxItems"` - NextRecordName string `xml:"NextRecordName"` - NextRecordType string `xml:"NextRecordType"` - NextRecordIdentifier string `xml:"NextRecordIdentifier"` -} - -type ResourceRecordSet struct { - Name string `xml:"Name"` - Type string `xml:"Type"` - TTL int `xml:"TTL"` - Records []string `xml:"ResourceRecords>ResourceRecord>Value,omitempty"` - SetIdentifier string `xml:"SetIdentifier,omitempty"` - Weight int `xml:"Weight,omitempty"` - HealthCheckId string `xml:"HealthCheckId,omitempty"` - Region string `xml:"Region,omitempty"` - Failover string `xml:"Failover,omitempty"` - AliasTarget *AliasTarget `xml:"AliasTarget,omitempty"` - - RecordsXML string `xml:",innerxml"` -} - -func (r *Route53) ListResourceRecordSets(zone string, lopts *ListOpts) (*ListResourceRecordSetsResponse, error) { - if lopts == nil { - lopts = &ListOpts{} - } - params := make(map[string]string) - if lopts.Name != "" { - params["name"] = lopts.Name - } - if lopts.Type != "" { - params["type"] = lopts.Type - } - if lopts.Identifier != "" { - params["identifier"] = lopts.Identifier - } - if lopts.MaxItems != 0 { - params["maxitems"] = strconv.FormatInt(int64(lopts.MaxItems), 10) - } - - req := multimap(params) - zone = CleanZoneID(zone) - out := &ListResourceRecordSetsResponse{} - if err := r.query("GET", fmt.Sprintf("/%s/hostedzone/%s/rrset", APIVersion, zone), req, out); err != nil { - return nil, err - } - return out, nil -} - -func FQDN(name string) string { - n := len(name) - if n == 0 || name[n-1] == '.' { - return name - } else { - return name + "." - } -} diff --git a/Godeps/_workspace/src/github.com/mitchellh/goamz/route53/sign.go b/Godeps/_workspace/src/github.com/mitchellh/goamz/route53/sign.go deleted file mode 100644 index 0e05686..0000000 --- a/Godeps/_workspace/src/github.com/mitchellh/goamz/route53/sign.go +++ /dev/null @@ -1,29 +0,0 @@ -package route53 - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "fmt" - "time" - - "github.com/mitchellh/goamz/aws" -) - -var b64 = base64.StdEncoding - -func sign(auth aws.Auth, path string, params map[string]string) { - date := time.Now().In(time.UTC).Format(time.RFC1123) - params["Date"] = date - hash := hmac.New(sha256.New, []byte(auth.SecretKey)) - hash.Write([]byte(date)) - signature := make([]byte, b64.EncodedLen(hash.Size())) - b64.Encode(signature, hash.Sum(nil)) - - header := fmt.Sprintf("AWS3-HTTPS AWSAccessKeyId=%s,Algorithm=HmacSHA256,Signature=%s", - auth.AccessKey, signature) - params["X-Amzn-Authorization"] = string(header) - if auth.Token != "" { - params["X-Amz-Security-Token"] = auth.Token - } -} diff --git a/Godeps/_workspace/src/github.com/square/go-jose/.travis.yml b/Godeps/_workspace/src/github.com/square/go-jose/.travis.yml deleted file mode 100644 index 6a24691..0000000 --- a/Godeps/_workspace/src/github.com/square/go-jose/.travis.yml +++ /dev/null @@ -1,37 +0,0 @@ -language: go - -sudo: false - -matrix: - fast_finish: true - allow_failures: - - go: tip - -go: -- 1.2 -- 1.3 -- 1.4 -- 1.5 -- 1.6 -- tip - -before_script: -- export PATH=$HOME/.local/bin:$PATH - -before_install: -- go get github.com/axw/gocov/gocov -- go get github.com/mattn/goveralls -- go get golang.org/x/tools/cmd/cover || true -- go get code.google.com/p/go.tools/cmd/cover || true -- pip install cram --user `whoami` - -script: -- go test . -v -covermode=count -coverprofile=profile.cov -- go test ./cipher -v -covermode=count -coverprofile=cipher/profile.cov -- cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t -- cd .. - -after_success: -- tail -n+2 cipher/profile.cov >> profile.cov -- $HOME/gopath/bin/goveralls -coverprofile=profile.cov -service=travis-ci - diff --git a/Godeps/_workspace/src/github.com/vaughan0/go-ini/LICENSE b/Godeps/_workspace/src/github.com/vaughan0/go-ini/LICENSE deleted file mode 100644 index 968b453..0000000 --- a/Godeps/_workspace/src/github.com/vaughan0/go-ini/LICENSE +++ /dev/null @@ -1,14 +0,0 @@ -Copyright (c) 2013 Vaughan Newton - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit -persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the -Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/vaughan0/go-ini/README.md b/Godeps/_workspace/src/github.com/vaughan0/go-ini/README.md deleted file mode 100644 index d5cd4e7..0000000 --- a/Godeps/_workspace/src/github.com/vaughan0/go-ini/README.md +++ /dev/null @@ -1,70 +0,0 @@ -go-ini -====== - -INI parsing library for Go (golang). - -View the API documentation [here](http://godoc.org/github.com/vaughan0/go-ini). - -Usage ------ - -Parse an INI file: - -```go -import "github.com/vaughan0/go-ini" - -file, err := ini.LoadFile("myfile.ini") -``` - -Get data from the parsed file: - -```go -name, ok := file.Get("person", "name") -if !ok { - panic("'name' variable missing from 'person' section") -} -``` - -Iterate through values in a section: - -```go -for key, value := range file["mysection"] { - fmt.Printf("%s => %s\n", key, value) -} -``` - -Iterate through sections in a file: - -```go -for name, section := range file { - fmt.Printf("Section name: %s\n", name) -} -``` - -File Format ------------ - -INI files are parsed by go-ini line-by-line. Each line may be one of the following: - - * A section definition: [section-name] - * A property: key = value - * A comment: #blahblah _or_ ;blahblah - * Blank. The line will be ignored. - -Properties defined before any section headers are placed in the default section, which has -the empty string as it's key. - -Example: - -```ini -# I am a comment -; So am I! - -[apples] -colour = red or green -shape = applish - -[oranges] -shape = square -colour = blue -``` diff --git a/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini.go b/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini.go deleted file mode 100644 index 81aeb32..0000000 --- a/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini.go +++ /dev/null @@ -1,123 +0,0 @@ -// Package ini provides functions for parsing INI configuration files. -package ini - -import ( - "bufio" - "fmt" - "io" - "os" - "regexp" - "strings" -) - -var ( - sectionRegex = regexp.MustCompile(`^\[(.*)\]$`) - assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`) -) - -// ErrSyntax is returned when there is a syntax error in an INI file. -type ErrSyntax struct { - Line int - Source string // The contents of the erroneous line, without leading or trailing whitespace -} - -func (e ErrSyntax) Error() string { - return fmt.Sprintf("invalid INI syntax on line %d: %s", e.Line, e.Source) -} - -// A File represents a parsed INI file. -type File map[string]Section - -// A Section represents a single section of an INI file. -type Section map[string]string - -// Returns a named Section. A Section will be created if one does not already exist for the given name. -func (f File) Section(name string) Section { - section := f[name] - if section == nil { - section = make(Section) - f[name] = section - } - return section -} - -// Looks up a value for a key in a section and returns that value, along with a boolean result similar to a map lookup. -func (f File) Get(section, key string) (value string, ok bool) { - if s := f[section]; s != nil { - value, ok = s[key] - } - return -} - -// Loads INI data from a reader and stores the data in the File. -func (f File) Load(in io.Reader) (err error) { - bufin, ok := in.(*bufio.Reader) - if !ok { - bufin = bufio.NewReader(in) - } - return parseFile(bufin, f) -} - -// Loads INI data from a named file and stores the data in the File. -func (f File) LoadFile(file string) (err error) { - in, err := os.Open(file) - if err != nil { - return - } - defer in.Close() - return f.Load(in) -} - -func parseFile(in *bufio.Reader, file File) (err error) { - section := "" - lineNum := 0 - for done := false; !done; { - var line string - if line, err = in.ReadString('\n'); err != nil { - if err == io.EOF { - done = true - } else { - return - } - } - lineNum++ - line = strings.TrimSpace(line) - if len(line) == 0 { - // Skip blank lines - continue - } - if line[0] == ';' || line[0] == '#' { - // Skip comments - continue - } - - if groups := assignRegex.FindStringSubmatch(line); groups != nil { - key, val := groups[1], groups[2] - key, val = strings.TrimSpace(key), strings.TrimSpace(val) - file.Section(section)[key] = val - } else if groups := sectionRegex.FindStringSubmatch(line); groups != nil { - name := strings.TrimSpace(groups[1]) - section = name - // Create the section if it does not exist - file.Section(section) - } else { - return ErrSyntax{lineNum, line} - } - - } - return nil -} - -// Loads and returns a File from a reader. -func Load(in io.Reader) (File, error) { - file := make(File) - err := file.Load(in) - return file, err -} - -// Loads and returns an INI File from a file on disk. -func LoadFile(filename string) (File, error) { - file := make(File) - err := file.LoadFile(filename) - return file, err -} diff --git a/Godeps/_workspace/src/github.com/vaughan0/go-ini/test.ini b/Godeps/_workspace/src/github.com/vaughan0/go-ini/test.ini deleted file mode 100644 index d13c999..0000000 --- a/Godeps/_workspace/src/github.com/vaughan0/go-ini/test.ini +++ /dev/null @@ -1,2 +0,0 @@ -[default] -stuff = things diff --git a/Godeps/_workspace/src/github.com/xenolf/lego/acme/client.go b/Godeps/_workspace/src/github.com/xenolf/lego/acme/client.go index bc64114..5bda6bc 100644 --- a/Godeps/_workspace/src/github.com/xenolf/lego/acme/client.go +++ b/Godeps/_workspace/src/github.com/xenolf/lego/acme/client.go @@ -504,6 +504,7 @@ func (c *Client) requestCertificate(authz []authorizationResource, bundle bool, if len(cert) > 0 { cerRes.CertStableURL = resp.Header.Get("Content-Location") + cerRes.AccountRef = c.user.GetRegistration().URI issuedCert := pemEncode(derCertificateBytes(cert)) // If bundle is true, we want to return a certificate bundle. diff --git a/Godeps/_workspace/src/github.com/xenolf/lego/acme/dns_challenge.go b/Godeps/_workspace/src/github.com/xenolf/lego/acme/dns_challenge.go index 2911fc1..eb843b7 100644 --- a/Godeps/_workspace/src/github.com/xenolf/lego/acme/dns_challenge.go +++ b/Godeps/_workspace/src/github.com/xenolf/lego/acme/dns_challenge.go @@ -21,7 +21,7 @@ var ( fqdnToZone = map[string]string{} ) -var recursiveNameserver = "google-public-dns-a.google.com:53" +var RecursiveNameserver = "google-public-dns-a.google.com:53" // DNS01Record returns a DNS record which will fulfill the `dns-01` challenge func DNS01Record(domain, keyAuth string) (fqdn string, value string, ttl int) { @@ -69,7 +69,15 @@ func (s *dnsChallenge) Solve(chlng challenge, domain string) error { logf("[INFO][%s] Checking DNS record propagation...", domain) - err = waitFor(90, 2, func() (bool, error) { + var timeout, interval time.Duration + switch provider := s.provider.(type) { + case ChallengeProviderTimeout: + timeout, interval = provider.Timeout() + default: + timeout, interval = 60*time.Second, 2*time.Second + } + + err = WaitFor(timeout, interval, func() (bool, error) { return preCheckDNS(fqdn, value) }) if err != nil { @@ -82,7 +90,7 @@ func (s *dnsChallenge) Solve(chlng challenge, domain string) error { // checkDNSPropagation checks if the expected TXT record has been propagated to all authoritative nameservers. func checkDNSPropagation(fqdn, value string) (bool, error) { // Initial attempt to resolve at the recursive NS - r, err := dnsQuery(fqdn, dns.TypeTXT, recursiveNameserver, true) + r, err := dnsQuery(fqdn, dns.TypeTXT, RecursiveNameserver, true) if err != nil { return false, err } @@ -160,12 +168,12 @@ func dnsQuery(fqdn string, rtype uint16, nameserver string, recursive bool) (in func lookupNameservers(fqdn string) ([]string, error) { var authoritativeNss []string - zone, err := findZoneByFqdn(fqdn, recursiveNameserver) + zone, err := FindZoneByFqdn(fqdn, RecursiveNameserver) if err != nil { return nil, err } - r, err := dnsQuery(zone, dns.TypeNS, recursiveNameserver, true) + r, err := dnsQuery(zone, dns.TypeNS, RecursiveNameserver, true) if err != nil { return nil, err } @@ -182,14 +190,14 @@ func lookupNameservers(fqdn string) ([]string, error) { return nil, fmt.Errorf("Could not determine authoritative nameservers") } -// findZoneByFqdn determines the zone of the given fqdn -func findZoneByFqdn(fqdn, nameserver string) (string, error) { +// FindZoneByFqdn determines the zone of the given fqdn +func FindZoneByFqdn(fqdn, nameserver string) (string, error) { // Do we have it cached? if zone, ok := fqdnToZone[fqdn]; ok { return zone, nil } - // Query the authorative nameserver for a hopefully non-existing SOA record, + // Query the authoritative nameserver for a hopefully non-existing SOA record, // in the authority section of the reply it will have the SOA of the // containing zone. rfc2308 has this to say on the subject: // Name servers authoritative for a zone MUST include the SOA record of @@ -208,8 +216,8 @@ func findZoneByFqdn(fqdn, nameserver string) (string, error) { if soa, ok := ans.(*dns.SOA); ok { zone := soa.Hdr.Name // If we ended up on one of the TLDs, it means the domain did not exist. - publicsuffix, _ := publicsuffix.PublicSuffix(unFqdn(zone)) - if publicsuffix == unFqdn(zone) { + publicsuffix, _ := publicsuffix.PublicSuffix(UnFqdn(zone)) + if publicsuffix == UnFqdn(zone) { return "", fmt.Errorf("Could not determine zone authoritatively") } fqdnToZone[fqdn] = zone @@ -223,8 +231,8 @@ func findZoneByFqdn(fqdn, nameserver string) (string, error) { if soa, ok := ns.(*dns.SOA); ok { zone := soa.Hdr.Name // If we ended up on one of the TLDs, it means the domain did not exist. - publicsuffix, _ := publicsuffix.PublicSuffix(unFqdn(zone)) - if publicsuffix == unFqdn(zone) { + publicsuffix, _ := publicsuffix.PublicSuffix(UnFqdn(zone)) + if publicsuffix == UnFqdn(zone) { return "", fmt.Errorf("Could not determine zone authoritatively") } fqdnToZone[fqdn] = zone @@ -234,8 +242,13 @@ func findZoneByFqdn(fqdn, nameserver string) (string, error) { return "", fmt.Errorf("NS %s did not return the expected SOA record in the authority section", nameserver) } -// toFqdn converts the name into a fqdn appending a trailing dot. -func toFqdn(name string) string { +// ClearFqdnCache clears the cache of fqdn to zone mappings. Primarily used in testing. +func ClearFqdnCache() { + fqdnToZone = map[string]string{} +} + +// ToFqdn converts the name into a fqdn appending a trailing dot. +func ToFqdn(name string) string { n := len(name) if n == 0 || name[n-1] == '.' { return name @@ -243,39 +256,11 @@ func toFqdn(name string) string { return name + "." } -// unFqdn converts the fqdn into a name removing the trailing dot. -func unFqdn(name string) string { +// UnFqdn converts the fqdn into a name removing the trailing dot. +func UnFqdn(name string) string { n := len(name) if n != 0 && name[n-1] == '.' { return name[:n-1] } return name } - -// clearFqdnCache clears the cache of fqdn to zone mappings. Primarily used in testing. -func clearFqdnCache() { - fqdnToZone = map[string]string{} -} - -// waitFor polls the given function 'f', once every 'interval' seconds, up to 'timeout' seconds. -func waitFor(timeout, interval int, f func() (bool, error)) error { - var lastErr string - timeup := time.After(time.Duration(timeout) * time.Second) - for { - select { - case <-timeup: - return fmt.Errorf("Time limit exceeded. Last error: %s", lastErr) - default: - } - - stop, err := f() - if stop { - return nil - } - if err != nil { - lastErr = err.Error() - } - - time.Sleep(time.Duration(interval) * time.Second) - } -} diff --git a/Godeps/_workspace/src/github.com/xenolf/lego/acme/dns_challenge_dnsimple.go b/Godeps/_workspace/src/github.com/xenolf/lego/acme/dns_challenge_dnsimple.go deleted file mode 100644 index 56b96f9..0000000 --- a/Godeps/_workspace/src/github.com/xenolf/lego/acme/dns_challenge_dnsimple.go +++ /dev/null @@ -1,139 +0,0 @@ -package acme - -import ( - "fmt" - "os" - "strings" - - "github.com/weppos/dnsimple-go/dnsimple" -) - -// DNSProviderDNSimple is an implementation of the DNSProvider interface. -type DNSProviderDNSimple struct { - client *dnsimple.Client -} - -// NewDNSProviderDNSimple returns a DNSProviderDNSimple instance with a configured dnsimple client. -// Authentication is either done using the passed credentials or - when empty - using the environment -// variables DNSIMPLE_EMAIL and DNSIMPLE_API_KEY. -func NewDNSProviderDNSimple(dnsimpleEmail, dnsimpleAPIKey string) (*DNSProviderDNSimple, error) { - if dnsimpleEmail == "" || dnsimpleAPIKey == "" { - dnsimpleEmail, dnsimpleAPIKey = dnsimpleEnvAuth() - if dnsimpleEmail == "" || dnsimpleAPIKey == "" { - return nil, fmt.Errorf("DNSimple credentials missing") - } - } - - c := &DNSProviderDNSimple{ - client: dnsimple.NewClient(dnsimpleAPIKey, dnsimpleEmail), - } - - return c, nil -} - -// Present creates a TXT record to fulfil the dns-01 challenge. -func (c *DNSProviderDNSimple) Present(domain, token, keyAuth string) error { - fqdn, value, ttl := DNS01Record(domain, keyAuth) - - zoneID, zoneName, err := c.getHostedZone(domain) - if err != nil { - return err - } - - recordAttributes := c.newTxtRecord(zoneName, fqdn, value, ttl) - _, _, err = c.client.Domains.CreateRecord(zoneID, *recordAttributes) - if err != nil { - return fmt.Errorf("DNSimple API call failed: %v", err) - } - - return nil -} - -// CleanUp removes the TXT record matching the specified parameters. -func (c *DNSProviderDNSimple) CleanUp(domain, token, keyAuth string) error { - fqdn, _, _ := DNS01Record(domain, keyAuth) - - records, err := c.findTxtRecords(domain, fqdn) - if err != nil { - return err - } - - for _, rec := range records { - _, err := c.client.Domains.DeleteRecord(rec.DomainId, rec.Id) - if err != nil { - return err - } - } - return nil -} - -func (c *DNSProviderDNSimple) getHostedZone(domain string) (string, string, error) { - domains, _, err := c.client.Domains.List() - if err != nil { - return "", "", fmt.Errorf("DNSimple API call failed: %v", err) - } - - var hostedDomain dnsimple.Domain - for _, d := range domains { - if strings.HasSuffix(domain, d.Name) { - if len(d.Name) > len(hostedDomain.Name) { - hostedDomain = d - } - } - } - if hostedDomain.Id == 0 { - return "", "", fmt.Errorf("No matching DNSimple domain found for domain %s", domain) - } - - return fmt.Sprintf("%v", hostedDomain.Id), hostedDomain.Name, nil -} - -func (c *DNSProviderDNSimple) findTxtRecords(domain, fqdn string) ([]dnsimple.Record, error) { - zoneID, zoneName, err := c.getHostedZone(domain) - if err != nil { - return nil, err - } - - var records []dnsimple.Record - result, _, err := c.client.Domains.ListRecords(zoneID, "", "TXT") - if err != nil { - return records, fmt.Errorf("DNSimple API call has failed: %v", err) - } - - recordName := c.extractRecordName(fqdn, zoneName) - for _, record := range result { - if record.Name == recordName { - records = append(records, record) - } - } - - return records, nil -} - -func (c *DNSProviderDNSimple) newTxtRecord(zone, fqdn, value string, ttl int) *dnsimple.Record { - name := c.extractRecordName(fqdn, zone) - - return &dnsimple.Record{ - Type: "TXT", - Name: name, - Content: value, - TTL: ttl, - } -} - -func (c *DNSProviderDNSimple) extractRecordName(fqdn, domain string) string { - name := unFqdn(fqdn) - if idx := strings.Index(name, "."+domain); idx != -1 { - return name[:idx] - } - return name -} - -func dnsimpleEnvAuth() (email, apiKey string) { - email = os.Getenv("DNSIMPLE_EMAIL") - apiKey = os.Getenv("DNSIMPLE_API_KEY") - if len(email) == 0 || len(apiKey) == 0 { - return "", "" - } - return -} diff --git a/Godeps/_workspace/src/github.com/xenolf/lego/acme/dns_challenge_manual.go b/Godeps/_workspace/src/github.com/xenolf/lego/acme/dns_challenge_manual.go index 4f1e309..ccb79ad 100644 --- a/Godeps/_workspace/src/github.com/xenolf/lego/acme/dns_challenge_manual.go +++ b/Godeps/_workspace/src/github.com/xenolf/lego/acme/dns_challenge_manual.go @@ -22,9 +22,16 @@ func NewDNSProviderManual() (*DNSProviderManual, error) { func (*DNSProviderManual) Present(domain, token, keyAuth string) error { fqdn, value, ttl := DNS01Record(domain, keyAuth) dnsRecord := fmt.Sprintf(dnsTemplate, fqdn, ttl, value) - logf("[INFO] acme: Please create the following TXT record in your DNS zone:") + + authZone, err := FindZoneByFqdn(fqdn, RecursiveNameserver) + if err != nil { + return err + } + + logf("[INFO] acme: Please create the following TXT record in your %s zone:", authZone) logf("[INFO] acme: %s", dnsRecord) logf("[INFO] acme: Press 'Enter' when you are done") + reader := bufio.NewReader(os.Stdin) _, _ = reader.ReadString('\n') return nil @@ -34,7 +41,13 @@ func (*DNSProviderManual) Present(domain, token, keyAuth string) error { func (*DNSProviderManual) CleanUp(domain, token, keyAuth string) error { fqdn, _, ttl := DNS01Record(domain, keyAuth) dnsRecord := fmt.Sprintf(dnsTemplate, fqdn, ttl, "...") - logf("[INFO] acme: You can now remove this TXT record from your DNS zone:") + + authZone, err := FindZoneByFqdn(fqdn, RecursiveNameserver) + if err != nil { + return err + } + + logf("[INFO] acme: You can now remove this TXT record from your %s zone:", authZone) logf("[INFO] acme: %s", dnsRecord) return nil } diff --git a/Godeps/_workspace/src/github.com/xenolf/lego/acme/dns_challenge_rfc2136.go b/Godeps/_workspace/src/github.com/xenolf/lego/acme/dns_challenge_rfc2136.go deleted file mode 100644 index 7c6900d..0000000 --- a/Godeps/_workspace/src/github.com/xenolf/lego/acme/dns_challenge_rfc2136.go +++ /dev/null @@ -1,106 +0,0 @@ -package acme - -import ( - "fmt" - "net" - "strings" - "time" - - "github.com/miekg/dns" -) - -// DNSProviderRFC2136 is an implementation of the ChallengeProvider interface that -// uses dynamic DNS updates (RFC 2136) to create TXT records on a nameserver. -type DNSProviderRFC2136 struct { - nameserver string - tsigAlgorithm string - tsigKey string - tsigSecret string -} - -// NewDNSProviderRFC2136 returns a new DNSProviderRFC2136 instance. -// To disable TSIG authentication 'tsigAlgorithm, 'tsigKey' and 'tsigSecret' must be set to the empty string. -// 'nameserver' must be a network address in the the form "host" or "host:port". -func NewDNSProviderRFC2136(nameserver, tsigAlgorithm, tsigKey, tsigSecret string) (*DNSProviderRFC2136, error) { - // Append the default DNS port if none is specified. - if _, _, err := net.SplitHostPort(nameserver); err != nil { - if strings.Contains(err.Error(), "missing port") { - nameserver = net.JoinHostPort(nameserver, "53") - } else { - return nil, err - } - } - d := &DNSProviderRFC2136{ - nameserver: nameserver, - } - if tsigAlgorithm == "" { - tsigAlgorithm = dns.HmacMD5 - } - d.tsigAlgorithm = tsigAlgorithm - if len(tsigKey) > 0 && len(tsigSecret) > 0 { - d.tsigKey = tsigKey - d.tsigSecret = tsigSecret - } - - return d, nil -} - -// Present creates a TXT record using the specified parameters -func (r *DNSProviderRFC2136) Present(domain, token, keyAuth string) error { - fqdn, value, ttl := DNS01Record(domain, keyAuth) - return r.changeRecord("INSERT", fqdn, value, ttl) -} - -// CleanUp removes the TXT record matching the specified parameters -func (r *DNSProviderRFC2136) CleanUp(domain, token, keyAuth string) error { - fqdn, value, ttl := DNS01Record(domain, keyAuth) - return r.changeRecord("REMOVE", fqdn, value, ttl) -} - -func (r *DNSProviderRFC2136) changeRecord(action, fqdn, value string, ttl int) error { - // Find the zone for the given fqdn - zone, err := findZoneByFqdn(fqdn, r.nameserver) - if err != nil { - return err - } - - // Create RR - rr := new(dns.TXT) - rr.Hdr = dns.RR_Header{Name: fqdn, Rrtype: dns.TypeTXT, Class: dns.ClassINET, Ttl: uint32(ttl)} - rr.Txt = []string{value} - rrs := []dns.RR{rr} - - // Create dynamic update packet - m := new(dns.Msg) - m.SetUpdate(zone) - switch action { - case "INSERT": - // Always remove old challenge left over from who knows what. - m.RemoveRRset(rrs) - m.Insert(rrs) - case "REMOVE": - m.Remove(rrs) - default: - return fmt.Errorf("Unexpected action: %s", action) - } - - // Setup client - c := new(dns.Client) - c.SingleInflight = true - // TSIG authentication / msg signing - if len(r.tsigKey) > 0 && len(r.tsigSecret) > 0 { - m.SetTsig(dns.Fqdn(r.tsigKey), r.tsigAlgorithm, 300, time.Now().Unix()) - c.TsigSecret = map[string]string{dns.Fqdn(r.tsigKey): r.tsigSecret} - } - - // Send the query - reply, _, err := c.Exchange(m, r.nameserver) - if err != nil { - return fmt.Errorf("DNS update failed: %v", err) - } - if reply != nil && reply.Rcode != dns.RcodeSuccess { - return fmt.Errorf("DNS update failed. Server replied: %s", dns.RcodeToString[reply.Rcode]) - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/xenolf/lego/acme/dns_challenge_route53.go b/Godeps/_workspace/src/github.com/xenolf/lego/acme/dns_challenge_route53.go deleted file mode 100644 index 43e42da..0000000 --- a/Godeps/_workspace/src/github.com/xenolf/lego/acme/dns_challenge_route53.go +++ /dev/null @@ -1,137 +0,0 @@ -package acme - -import ( - "fmt" - "strings" - "time" - - "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/goamz/route53" -) - -// DNSProviderRoute53 is an implementation of the DNSProvider interface -type DNSProviderRoute53 struct { - client *route53.Route53 -} - -// NewDNSProviderRoute53 returns a DNSProviderRoute53 instance with a configured route53 client. -// Authentication is either done using the passed credentials or - when empty - falling back to -// the customary AWS credential mechanisms, including the file referenced by $AWS_CREDENTIAL_FILE -// (defaulting to $HOME/.aws/credentials) optionally scoped to $AWS_PROFILE, credentials -// supplied by the environment variables AWS_ACCESS_KEY_ID + AWS_SECRET_ACCESS_KEY [ + AWS_SECURITY_TOKEN ], -// and finally credentials available via the EC2 instance metadata service. -func NewDNSProviderRoute53(awsAccessKey, awsSecretKey, awsRegionName string) (*DNSProviderRoute53, error) { - region, ok := aws.Regions[awsRegionName] - if !ok { - return nil, fmt.Errorf("Invalid AWS region name %s", awsRegionName) - } - - // use aws.GetAuth, which tries really hard to find credentails: - // - uses awsAccessKey and awsSecretKey, if provided - // - uses AWS_PROFILE / AWS_CREDENTIAL_FILE, if provided - // - uses AWS_ACCESS_KEY_ID + AWS_SECRET_ACCESS_KEY and optionally AWS_SECURITY_TOKEN, if provided - // - uses EC2 instance metadata credentials (http://169.254.169.254/latest/meta-data/…), if available - // ...and otherwise returns an error - auth, err := aws.GetAuth(awsAccessKey, awsSecretKey) - if err != nil { - return nil, err - } - - client := route53.New(auth, region) - return &DNSProviderRoute53{client: client}, nil -} - -// Present creates a TXT record using the specified parameters -func (r *DNSProviderRoute53) Present(domain, token, keyAuth string) error { - fqdn, value, ttl := DNS01Record(domain, keyAuth) - value = `"` + value + `"` - return r.changeRecord("UPSERT", fqdn, value, ttl) -} - -// CleanUp removes the TXT record matching the specified parameters -func (r *DNSProviderRoute53) CleanUp(domain, token, keyAuth string) error { - fqdn, value, ttl := DNS01Record(domain, keyAuth) - value = `"` + value + `"` - return r.changeRecord("DELETE", fqdn, value, ttl) -} - -func (r *DNSProviderRoute53) changeRecord(action, fqdn, value string, ttl int) error { - hostedZoneID, err := r.getHostedZoneID(fqdn) - if err != nil { - return err - } - recordSet := newTXTRecordSet(fqdn, value, ttl) - update := route53.Change{Action: action, Record: recordSet} - changes := []route53.Change{update} - req := route53.ChangeResourceRecordSetsRequest{Comment: "Created by Lego", Changes: changes} - resp, err := r.client.ChangeResourceRecordSets(hostedZoneID, &req) - if err != nil { - return err - } - - return waitFor(90, 5, func() (bool, error) { - status, err := r.client.GetChange(resp.ChangeInfo.ID) - if err != nil { - return false, err - } - if status == "INSYNC" { - return true, nil - } - return false, nil - }) -} - -func (r *DNSProviderRoute53) getHostedZoneID(fqdn string) (string, error) { - zones := []route53.HostedZone{} - zoneResp, err := r.client.ListHostedZones("", 0) - if err != nil { - return "", err - } - zones = append(zones, zoneResp.HostedZones...) - - for zoneResp.IsTruncated { - resp, err := r.client.ListHostedZones(zoneResp.Marker, 0) - if err != nil { - if rateExceeded(err) { - time.Sleep(time.Second) - continue - } - return "", err - } - zoneResp = resp - zones = append(zones, zoneResp.HostedZones...) - } - - var hostedZone route53.HostedZone - for _, zone := range zones { - if strings.HasSuffix(fqdn, zone.Name) { - if len(zone.Name) > len(hostedZone.Name) { - hostedZone = zone - } - } - } - if hostedZone.ID == "" { - return "", fmt.Errorf("No Route53 hosted zone found for domain %s", fqdn) - } - - return hostedZone.ID, nil -} - -func newTXTRecordSet(fqdn, value string, ttl int) route53.ResourceRecordSet { - return route53.ResourceRecordSet{ - Name: fqdn, - Type: "TXT", - Records: []string{value}, - TTL: ttl, - } - -} - -// Route53 API has pretty strict rate limits (5req/s globally per account) -// Hence we check if we are being throttled to maybe retry the request -func rateExceeded(err error) bool { - if strings.Contains(err.Error(), "Throttling") { - return true - } - return false -} diff --git a/Godeps/_workspace/src/github.com/xenolf/lego/acme/jws.go b/Godeps/_workspace/src/github.com/xenolf/lego/acme/jws.go index 78d8272..8435d0c 100644 --- a/Godeps/_workspace/src/github.com/xenolf/lego/acme/jws.go +++ b/Godeps/_workspace/src/github.com/xenolf/lego/acme/jws.go @@ -9,7 +9,7 @@ import ( "fmt" "net/http" - "github.com/square/go-jose" + "gopkg.in/square/go-jose.v1" ) type jws struct { diff --git a/Godeps/_workspace/src/github.com/xenolf/lego/acme/messages.go b/Godeps/_workspace/src/github.com/xenolf/lego/acme/messages.go index d238df8..d1fac92 100644 --- a/Godeps/_workspace/src/github.com/xenolf/lego/acme/messages.go +++ b/Godeps/_workspace/src/github.com/xenolf/lego/acme/messages.go @@ -3,7 +3,7 @@ package acme import ( "time" - "github.com/square/go-jose" + "gopkg.in/square/go-jose.v1" ) type directory struct { @@ -109,6 +109,7 @@ type CertificateResource struct { Domain string `json:"domain"` CertURL string `json:"certUrl"` CertStableURL string `json:"certStableUrl"` + AccountRef string `json:"accountRef,omitempty"` PrivateKey []byte `json:"-"` Certificate []byte `json:"-"` } diff --git a/Godeps/_workspace/src/github.com/xenolf/lego/acme/provider.go b/Godeps/_workspace/src/github.com/xenolf/lego/acme/provider.go index b8998d2..d177ff0 100644 --- a/Godeps/_workspace/src/github.com/xenolf/lego/acme/provider.go +++ b/Godeps/_workspace/src/github.com/xenolf/lego/acme/provider.go @@ -1,8 +1,28 @@ package acme -// ChallengeProvider presents the solution to a challenge available to be solved -// CleanUp will be called by the challenge if Present ends in a non-error state. +import "time" + +// ChallengeProvider enables implementing a custom challenge +// provider. Present presents the solution to a challenge available to +// be solved. CleanUp will be called by the challenge if Present ends +// in a non-error state. type ChallengeProvider interface { Present(domain, token, keyAuth string) error CleanUp(domain, token, keyAuth string) error } + +// ChallengeProviderTimeout allows for implementing a +// ChallengeProvider where an unusually long timeout is required when +// waiting for an ACME challenge to be satisfied, such as when +// checking for DNS record progagation. If an implementor of a +// ChallengeProvider provides a Timeout method, then the return values +// of the Timeout method will be used when appropriate by the acme +// package. The interval value is the time between checks. +// +// The default values used for timeout and interval are 60 seconds and +// 2 seconds respectively. These are used when no Timeout method is +// defined for the ChallengeProvider. +type ChallengeProviderTimeout interface { + ChallengeProvider + Timeout() (timeout, interval time.Duration) +} diff --git a/Godeps/_workspace/src/github.com/xenolf/lego/acme/utils.go b/Godeps/_workspace/src/github.com/xenolf/lego/acme/utils.go new file mode 100644 index 0000000..2fa0db3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/xenolf/lego/acme/utils.go @@ -0,0 +1,29 @@ +package acme + +import ( + "fmt" + "time" +) + +// WaitFor polls the given function 'f', once every 'interval', up to 'timeout'. +func WaitFor(timeout, interval time.Duration, f func() (bool, error)) error { + var lastErr string + timeup := time.After(timeout) + for { + select { + case <-timeup: + return fmt.Errorf("Time limit exceeded. Last error: %s", lastErr) + default: + } + + stop, err := f() + if stop { + return nil + } + if err != nil { + lastErr = err.Error() + } + + time.Sleep(interval) + } +} diff --git a/Godeps/_workspace/src/github.com/xenolf/lego/acme/dns_challenge_cloudflare.go b/Godeps/_workspace/src/github.com/xenolf/lego/providers/dns/cloudflare/cloudflare.go similarity index 57% rename from Godeps/_workspace/src/github.com/xenolf/lego/acme/dns_challenge_cloudflare.go rename to Godeps/_workspace/src/github.com/xenolf/lego/providers/dns/cloudflare/cloudflare.go index b5bf6d1..20af873 100644 --- a/Godeps/_workspace/src/github.com/xenolf/lego/acme/dns_challenge_cloudflare.go +++ b/Godeps/_workspace/src/github.com/xenolf/lego/providers/dns/cloudflare/cloudflare.go @@ -1,4 +1,6 @@ -package acme +// Package cloudflare implements a DNS provider for solving the DNS-01 +// challenge using cloudflare DNS. +package cloudflare import ( "bytes" @@ -7,39 +9,52 @@ import ( "io" "net/http" "os" - "strings" "time" + + "github.com/xenolf/lego/acme" ) // CloudFlareAPIURL represents the API endpoint to call. // TODO: Unexport? const CloudFlareAPIURL = "https://api.cloudflare.com/client/v4" -// DNSProviderCloudFlare is an implementation of the DNSProvider interface -type DNSProviderCloudFlare struct { +// DNSProvider is an implementation of the acme.ChallengeProvider interface +type DNSProvider struct { authEmail string authKey string } -// NewDNSProviderCloudFlare returns a DNSProviderCloudFlare instance with a configured cloudflare client. -// Credentials can either be passed as arguments or through CLOUDFLARE_EMAIL and CLOUDFLARE_API_KEY env vars. -func NewDNSProviderCloudFlare(cloudflareEmail, cloudflareKey string) (*DNSProviderCloudFlare, error) { - if cloudflareEmail == "" || cloudflareKey == "" { - cloudflareEmail, cloudflareKey = cloudflareEnvAuth() - if cloudflareEmail == "" || cloudflareKey == "" { - return nil, fmt.Errorf("CloudFlare credentials missing") - } +// NewDNSProvider returns a DNSProvider instance configured for cloudflare. +// Credentials must be passed in the environment variables: CLOUDFLARE_EMAIL +// and CLOUDFLARE_API_KEY. +func NewDNSProvider() (*DNSProvider, error) { + email := os.Getenv("CLOUDFLARE_EMAIL") + key := os.Getenv("CLOUDFLARE_API_KEY") + return NewDNSProviderCredentials(email, key) +} + +// NewDNSProviderCredentials uses the supplied credentials to return a +// DNSProvider instance configured for cloudflare. +func NewDNSProviderCredentials(email, key string) (*DNSProvider, error) { + if email == "" || key == "" { + return nil, fmt.Errorf("CloudFlare credentials missing") } - return &DNSProviderCloudFlare{ - authEmail: cloudflareEmail, - authKey: cloudflareKey, + return &DNSProvider{ + authEmail: email, + authKey: key, }, nil } +// Timeout returns the timeout and interval to use when checking for DNS +// propagation. Adjusting here to cope with spikes in propagation times. +func (c *DNSProvider) Timeout() (timeout, interval time.Duration) { + return 120 * time.Second, 2 * time.Second +} + // Present creates a TXT record to fulfil the dns-01 challenge -func (c *DNSProviderCloudFlare) Present(domain, token, keyAuth string) error { - fqdn, value, _ := DNS01Record(domain, keyAuth) +func (c *DNSProvider) Present(domain, token, keyAuth string) error { + fqdn, value, _ := acme.DNS01Record(domain, keyAuth) zoneID, err := c.getHostedZoneID(fqdn) if err != nil { return err @@ -47,7 +62,7 @@ func (c *DNSProviderCloudFlare) Present(domain, token, keyAuth string) error { rec := cloudFlareRecord{ Type: "TXT", - Name: unFqdn(fqdn), + Name: acme.UnFqdn(fqdn), Content: value, TTL: 120, } @@ -66,8 +81,8 @@ func (c *DNSProviderCloudFlare) Present(domain, token, keyAuth string) error { } // CleanUp removes the TXT record matching the specified parameters -func (c *DNSProviderCloudFlare) CleanUp(domain, token, keyAuth string) error { - fqdn, _, _ := DNS01Record(domain, keyAuth) +func (c *DNSProvider) CleanUp(domain, token, keyAuth string) error { + fqdn, _, _ := acme.DNS01Record(domain, keyAuth) record, err := c.findTxtRecord(fqdn) if err != nil { @@ -82,41 +97,37 @@ func (c *DNSProviderCloudFlare) CleanUp(domain, token, keyAuth string) error { return nil } -func (c *DNSProviderCloudFlare) getHostedZoneID(fqdn string) (string, error) { +func (c *DNSProvider) getHostedZoneID(fqdn string) (string, error) { // HostedZone represents a CloudFlare DNS zone type HostedZone struct { ID string `json:"id"` Name string `json:"name"` } - result, err := c.makeRequest("GET", "/zones?per_page=1000", nil) + authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameserver) if err != nil { return "", err } - var zones []HostedZone - err = json.Unmarshal(result, &zones) + result, err := c.makeRequest("GET", "/zones?name=" + acme.UnFqdn(authZone), nil) if err != nil { return "", err } - var hostedZone HostedZone - for _, zone := range zones { - name := toFqdn(zone.Name) - if strings.HasSuffix(fqdn, name) { - if len(zone.Name) > len(hostedZone.Name) { - hostedZone = zone - } - } + var hostedZone []HostedZone + err = json.Unmarshal(result, &hostedZone) + if err != nil { + return "", err } - if hostedZone.ID == "" { - return "", fmt.Errorf("No matching CloudFlare zone found for %s", fqdn) + + if len(hostedZone) != 1 { + return "", fmt.Errorf("Zone %s not found in CloudFlare for domain %s", authZone, fqdn) } - return hostedZone.ID, nil + return hostedZone[0].ID, nil } -func (c *DNSProviderCloudFlare) findTxtRecord(fqdn string) (*cloudFlareRecord, error) { +func (c *DNSProvider) findTxtRecord(fqdn string) (*cloudFlareRecord, error) { zoneID, err := c.getHostedZoneID(fqdn) if err != nil { return nil, err @@ -134,7 +145,7 @@ func (c *DNSProviderCloudFlare) findTxtRecord(fqdn string) (*cloudFlareRecord, e } for _, rec := range records { - if rec.Name == unFqdn(fqdn) && rec.Type == "TXT" { + if rec.Name == acme.UnFqdn(fqdn) && rec.Type == "TXT" { return &rec, nil } } @@ -142,7 +153,7 @@ func (c *DNSProviderCloudFlare) findTxtRecord(fqdn string) (*cloudFlareRecord, e return nil, fmt.Errorf("No existing record found for %s", fqdn) } -func (c *DNSProviderCloudFlare) makeRequest(method, uri string, body io.Reader) (json.RawMessage, error) { +func (c *DNSProvider) makeRequest(method, uri string, body io.Reader) (json.RawMessage, error) { // APIError contains error details for failed requests type APIError struct { Code int `json:"code,omitempty"` @@ -163,7 +174,7 @@ func (c *DNSProviderCloudFlare) makeRequest(method, uri string, body io.Reader) req.Header.Set("X-Auth-Email", c.authEmail) req.Header.Set("X-Auth-Key", c.authKey) - req.Header.Set("User-Agent", userAgent()) + //req.Header.Set("User-Agent", userAgent()) client := http.Client{Timeout: 30 * time.Second} resp, err := client.Do(req) @@ -189,15 +200,6 @@ func (c *DNSProviderCloudFlare) makeRequest(method, uri string, body io.Reader) return r.Result, nil } -func cloudflareEnvAuth() (email, apiKey string) { - email = os.Getenv("CLOUDFLARE_EMAIL") - apiKey = os.Getenv("CLOUDFLARE_API_KEY") - if len(email) == 0 || len(apiKey) == 0 { - return "", "" - } - return -} - // cloudFlareRecord represents a CloudFlare DNS record type cloudFlareRecord struct { Name string `json:"name"` diff --git a/Godeps/_workspace/src/github.com/xenolf/lego/acme/dns_challenge_digitalocean.go b/Godeps/_workspace/src/github.com/xenolf/lego/providers/dns/digitalocean/digitalocean.go similarity index 62% rename from Godeps/_workspace/src/github.com/xenolf/lego/acme/dns_challenge_digitalocean.go rename to Godeps/_workspace/src/github.com/xenolf/lego/providers/dns/digitalocean/digitalocean.go index 176439e..9f744f3 100644 --- a/Godeps/_workspace/src/github.com/xenolf/lego/acme/dns_challenge_digitalocean.go +++ b/Godeps/_workspace/src/github.com/xenolf/lego/providers/dns/digitalocean/digitalocean.go @@ -1,33 +1,49 @@ -package acme +// Package digitalocean implements a DNS provider for solving the DNS-01 +// challenge using digitalocean DNS. +package digitalocean import ( "bytes" "encoding/json" "fmt" "net/http" + "os" "sync" + "time" + + "github.com/xenolf/lego/acme" ) -// DNSProviderDigitalOcean is an implementation of the DNSProvider interface +// DNSProvider is an implementation of the acme.ChallengeProvider interface // that uses DigitalOcean's REST API to manage TXT records for a domain. -type DNSProviderDigitalOcean struct { +type DNSProvider struct { apiAuthToken string recordIDs map[string]int recordIDsMu sync.Mutex } -// NewDNSProviderDigitalOcean returns a new DNSProviderDigitalOcean instance. -// apiAuthToken is the personal access token created in the DigitalOcean account -// control panel, and it will be sent in bearer authorization headers. -func NewDNSProviderDigitalOcean(apiAuthToken string) (*DNSProviderDigitalOcean, error) { - return &DNSProviderDigitalOcean{ +// NewDNSProvider returns a DNSProvider instance configured for Digital +// Ocean. Credentials must be passed in the environment variable: +// DO_AUTH_TOKEN. +func NewDNSProvider() (*DNSProvider, error) { + apiAuthToken := os.Getenv("DO_AUTH_TOKEN") + return NewDNSProviderCredentials(apiAuthToken) +} + +// NewDNSProviderCredentials uses the supplied credentials to return a +// DNSProvider instance configured for Digital Ocean. +func NewDNSProviderCredentials(apiAuthToken string) (*DNSProvider, error) { + if apiAuthToken == "" { + return nil, fmt.Errorf("DigitalOcean credentials missing") + } + return &DNSProvider{ apiAuthToken: apiAuthToken, recordIDs: make(map[string]int), }, nil } // Present creates a TXT record using the specified parameters -func (d *DNSProviderDigitalOcean) Present(domain, token, keyAuth string) error { +func (d *DNSProvider) Present(domain, token, keyAuth string) error { // txtRecordRequest represents the request body to DO's API to make a TXT record type txtRecordRequest struct { RecordType string `json:"type"` @@ -45,9 +61,16 @@ func (d *DNSProviderDigitalOcean) Present(domain, token, keyAuth string) error { } `json:"domain_record"` } - fqdn, value, _ := DNS01Record(domain, keyAuth) + fqdn, value, _ := acme.DNS01Record(domain, keyAuth) + + authZone, err := acme.FindZoneByFqdn(acme.ToFqdn(domain), acme.RecursiveNameserver) + if err != nil { + return fmt.Errorf("Could not determine zone for domain: '%s'. %s", domain, err) + } + + authZone = acme.UnFqdn(authZone) - reqURL := fmt.Sprintf("%s/v2/domains/%s/records", digitalOceanBaseURL, domain) + reqURL := fmt.Sprintf("%s/v2/domains/%s/records", digitalOceanBaseURL, authZone) reqData := txtRecordRequest{RecordType: "TXT", Name: fqdn, Data: value} body, err := json.Marshal(reqData) if err != nil { @@ -61,7 +84,8 @@ func (d *DNSProviderDigitalOcean) Present(domain, token, keyAuth string) error { req.Header.Set("Content-Type", "application/json") req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", d.apiAuthToken)) - resp, err := http.DefaultClient.Do(req) + client := http.Client{Timeout: 30 * time.Second} + resp, err := client.Do(req) if err != nil { return err } @@ -87,8 +111,8 @@ func (d *DNSProviderDigitalOcean) Present(domain, token, keyAuth string) error { } // CleanUp removes the TXT record matching the specified parameters -func (d *DNSProviderDigitalOcean) CleanUp(domain, token, keyAuth string) error { - fqdn, _, _ := DNS01Record(domain, keyAuth) +func (d *DNSProvider) CleanUp(domain, token, keyAuth string) error { + fqdn, _, _ := acme.DNS01Record(domain, keyAuth) // get the record's unique ID from when we created it d.recordIDsMu.Lock() @@ -98,7 +122,14 @@ func (d *DNSProviderDigitalOcean) CleanUp(domain, token, keyAuth string) error { return fmt.Errorf("unknown record ID for '%s'", fqdn) } - reqURL := fmt.Sprintf("%s/v2/domains/%s/records/%d", digitalOceanBaseURL, domain, recordID) + authZone, err := acme.FindZoneByFqdn(acme.ToFqdn(domain), acme.RecursiveNameserver) + if err != nil { + return fmt.Errorf("Could not determine zone for domain: '%s'. %s", domain, err) + } + + authZone = acme.UnFqdn(authZone) + + reqURL := fmt.Sprintf("%s/v2/domains/%s/records/%d", digitalOceanBaseURL, authZone, recordID) req, err := http.NewRequest("DELETE", reqURL, nil) if err != nil { return err @@ -106,7 +137,8 @@ func (d *DNSProviderDigitalOcean) CleanUp(domain, token, keyAuth string) error { req.Header.Set("Content-Type", "application/json") req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", d.apiAuthToken)) - resp, err := http.DefaultClient.Do(req) + client := http.Client{Timeout: 30 * time.Second} + resp, err := client.Do(req) if err != nil { return err } diff --git a/Godeps/_workspace/src/github.com/xenolf/lego/providers/dns/dnsimple/dnsimple.go b/Godeps/_workspace/src/github.com/xenolf/lego/providers/dns/dnsimple/dnsimple.go new file mode 100644 index 0000000..c58c91e --- /dev/null +++ b/Godeps/_workspace/src/github.com/xenolf/lego/providers/dns/dnsimple/dnsimple.go @@ -0,0 +1,142 @@ +// Package dnsimple implements a DNS provider for solving the DNS-01 challenge +// using dnsimple DNS. +package dnsimple + +import ( + "fmt" + "os" + "strings" + + "github.com/weppos/dnsimple-go/dnsimple" + "github.com/xenolf/lego/acme" +) + +// DNSProvider is an implementation of the acme.ChallengeProvider interface. +type DNSProvider struct { + client *dnsimple.Client +} + +// NewDNSProvider returns a DNSProvider instance configured for dnsimple. +// Credentials must be passed in the environment variables: DNSIMPLE_EMAIL +// and DNSIMPLE_API_KEY. +func NewDNSProvider() (*DNSProvider, error) { + email := os.Getenv("DNSIMPLE_EMAIL") + key := os.Getenv("DNSIMPLE_API_KEY") + return NewDNSProviderCredentials(email, key) +} + +// NewDNSProviderCredentials uses the supplied credentials to return a +// DNSProvider instance configured for dnsimple. +func NewDNSProviderCredentials(email, key string) (*DNSProvider, error) { + if email == "" || key == "" { + return nil, fmt.Errorf("DNSimple credentials missing") + } + + return &DNSProvider{ + client: dnsimple.NewClient(key, email), + }, nil +} + +// Present creates a TXT record to fulfil the dns-01 challenge. +func (c *DNSProvider) Present(domain, token, keyAuth string) error { + fqdn, value, ttl := acme.DNS01Record(domain, keyAuth) + + zoneID, zoneName, err := c.getHostedZone(domain) + if err != nil { + return err + } + + recordAttributes := c.newTxtRecord(zoneName, fqdn, value, ttl) + _, _, err = c.client.Domains.CreateRecord(zoneID, *recordAttributes) + if err != nil { + return fmt.Errorf("DNSimple API call failed: %v", err) + } + + return nil +} + +// CleanUp removes the TXT record matching the specified parameters. +func (c *DNSProvider) CleanUp(domain, token, keyAuth string) error { + fqdn, _, _ := acme.DNS01Record(domain, keyAuth) + + records, err := c.findTxtRecords(domain, fqdn) + if err != nil { + return err + } + + for _, rec := range records { + _, err := c.client.Domains.DeleteRecord(rec.DomainId, rec.Id) + if err != nil { + return err + } + } + return nil +} + +func (c *DNSProvider) getHostedZone(domain string) (string, string, error) { + zones, _, err := c.client.Domains.List() + if err != nil { + return "", "", fmt.Errorf("DNSimple API call failed: %v", err) + } + + + authZone, err := acme.FindZoneByFqdn(domain, acme.RecursiveNameserver) + if err != nil { + return "", "", err + } + + var hostedZone dnsimple.Domain + for _, zone := range zones { + if zone.Name == acme.UnFqdn(authZone) { + hostedZone = zone + } + } + + if hostedZone.Id == 0 { + return "", "", fmt.Errorf("Zone %s not found in DNSimple for domain %s", authZone, domain) + + } + + return fmt.Sprintf("%v", hostedZone.Id), hostedZone.Name, nil +} + +func (c *DNSProvider) findTxtRecords(domain, fqdn string) ([]dnsimple.Record, error) { + zoneID, zoneName, err := c.getHostedZone(domain) + if err != nil { + return nil, err + } + + var records []dnsimple.Record + result, _, err := c.client.Domains.ListRecords(zoneID, "", "TXT") + if err != nil { + return records, fmt.Errorf("DNSimple API call has failed: %v", err) + } + + recordName := c.extractRecordName(fqdn, zoneName) + for _, record := range result { + if record.Name == recordName { + records = append(records, record) + } + } + + return records, nil +} + +func (c *DNSProvider) newTxtRecord(zone, fqdn, value string, ttl int) *dnsimple.Record { + name := c.extractRecordName(fqdn, zone) + + return &dnsimple.Record{ + Type: "TXT", + Name: name, + Content: value, + TTL: ttl, + } +} + +func (c *DNSProvider) extractRecordName(fqdn, domain string) string { + name := acme.UnFqdn(fqdn) + if idx := strings.Index(name, "."+domain); idx != -1 { + return name[:idx] + } + return name +} diff --git a/Godeps/_workspace/src/github.com/xenolf/lego/providers/dns/route53/route53.go b/Godeps/_workspace/src/github.com/xenolf/lego/providers/dns/route53/route53.go new file mode 100644 index 0000000..47b5617 --- /dev/null +++ b/Godeps/_workspace/src/github.com/xenolf/lego/providers/dns/route53/route53.go @@ -0,0 +1,164 @@ +// Package route53 implements a DNS provider for solving the DNS-01 challenge +// using AWS Route 53 DNS. +package route53 + +import ( + "fmt" + "math/rand" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/route53" + "github.com/xenolf/lego/acme" +) + +const ( + maxRetries = 5 +) + +// DNSProvider implements the acme.ChallengeProvider interface +type DNSProvider struct { + client *route53.Route53 +} + +// customRetryer implements the client.Retryer interface by composing the +// DefaultRetryer. It controls the logic for retrying recoverable request +// errors (e.g. when rate limits are exceeded). +type customRetryer struct { + client.DefaultRetryer +} + +// RetryRules overwrites the DefaultRetryer's method. +// It uses a basic exponential backoff algorithm that returns an initial +// delay of ~400ms with an upper limit of ~30 seconds which should prevent +// causing a high number of consecutive throttling errors. +// For reference: Route 53 enforces an account-wide(!) 5req/s query limit. +func (d customRetryer) RetryRules(r *request.Request) time.Duration { + retryCount := r.RetryCount + if retryCount > 7 { + retryCount = 7 + } + + delay := (1 << uint(retryCount)) * (rand.Intn(50) + 200) + return time.Duration(delay) * time.Millisecond +} + +// NewDNSProvider returns a DNSProvider instance configured for the AWS +// Route 53 service. +// +// AWS Credentials are automatically detected in the following locations +// and prioritized in the following order: +// 1. Environment variables: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, +// AWS_REGION, [AWS_SESSION_TOKEN] +// 2. Shared credentials file (defaults to ~/.aws/credentials) +// 3. Amazon EC2 IAM role +// +// See also: https://github.com/aws/aws-sdk-go/wiki/configuring-sdk +func NewDNSProvider() (*DNSProvider, error) { + r := customRetryer{} + r.NumMaxRetries = maxRetries + config := request.WithRetryer(aws.NewConfig(), r) + client := route53.New(session.New(config)) + + return &DNSProvider{client: client}, nil +} + +// Present creates a TXT record using the specified parameters +func (r *DNSProvider) Present(domain, token, keyAuth string) error { + fqdn, value, ttl := acme.DNS01Record(domain, keyAuth) + value = `"` + value + `"` + return r.changeRecord("UPSERT", fqdn, value, ttl) +} + +// CleanUp removes the TXT record matching the specified parameters +func (r *DNSProvider) CleanUp(domain, token, keyAuth string) error { + fqdn, value, ttl := acme.DNS01Record(domain, keyAuth) + value = `"` + value + `"` + return r.changeRecord("DELETE", fqdn, value, ttl) +} + +func (r *DNSProvider) changeRecord(action, fqdn, value string, ttl int) error { + hostedZoneID, err := r.getHostedZoneID(fqdn) + if err != nil { + return err + } + + recordSet := newTXTRecordSet(fqdn, value, ttl) + reqParams := &route53.ChangeResourceRecordSetsInput{ + HostedZoneId: aws.String(hostedZoneID), + ChangeBatch: &route53.ChangeBatch{ + Comment: aws.String("Managed by Lego"), + Changes: []*route53.Change{ + { + Action: aws.String(action), + ResourceRecordSet: recordSet, + }, + }, + }, + } + + resp, err := r.client.ChangeResourceRecordSets(reqParams) + if err != nil { + return err + } + + statusId := resp.ChangeInfo.Id + + return acme.WaitFor(120*time.Second, 4*time.Second, func() (bool, error) { + reqParams := &route53.GetChangeInput{ + Id: statusId, + } + resp, err := r.client.GetChange(reqParams) + if err != nil { + return false, err + } + if *resp.ChangeInfo.Status == route53.ChangeStatusInsync { + return true, nil + } + return false, nil + }) +} + +func (r *DNSProvider) getHostedZoneID(fqdn string) (string, error) { + authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameserver) + if err != nil { + return "", err + } + + // .DNSName should not have a trailing dot + reqParams := &route53.ListHostedZonesByNameInput{ + DNSName: aws.String(acme.UnFqdn(authZone)), + MaxItems: aws.String("1"), + } + resp, err := r.client.ListHostedZonesByName(reqParams) + if err != nil { + return "", err + } + + // .Name has a trailing dot + if len(resp.HostedZones) == 0 || *resp.HostedZones[0].Name != authZone { + return "", fmt.Errorf("Zone %s not found in Route53 for domain %s", authZone, fqdn) + } + + zoneId := *resp.HostedZones[0].Id + if strings.HasPrefix(zoneId, "/hostedzone/") { + zoneId = strings.TrimPrefix(zoneId, "/hostedzone/") + } + + return zoneId, nil +} + +func newTXTRecordSet(fqdn, value string, ttl int) *route53.ResourceRecordSet { + return &route53.ResourceRecordSet{ + Name: aws.String(fqdn), + Type: aws.String("TXT"), + TTL: aws.Int64(int64(ttl)), + ResourceRecords: []*route53.ResourceRecord{ + {Value: aws.String(value)}, + }, + } +} diff --git a/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/.gitcookies.sh.enc b/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/.gitcookies.sh.enc new file mode 100644 index 0000000..730e569 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/.gitcookies.sh.enc @@ -0,0 +1 @@ +'|Ę&{tÄU|gGę(ěŹCy=+¨śňcű:u:/pś#~žü["±4¤!­nŮAŞDK<Šuf˙hĹażÂ:şü¸ˇ´B/ŁŘ¤ą¤ň_hÎŰSăT*wĚxĽŻťą-ç|ťŕŔÓŃÄäóĚ㣗A$$â6ŁÁâG)8nĎpűĆˡ3ĚšśoďĎvŽB–3ż­]xÝ“Ó2l§G•|qRŢŻ ö2 5R–Ó×Ç$´ń˝YčˇŢÝ™l‘Ë«yAI"ŰŚ®íĂ»ąĽkÄ|Kĺţ[9ĆâŇĺ=°ú˙źń|@S•3 ó#ćťx?ľV„,ľ‚SĆÝőśwPíogŇ6&V6 ©D.dBŠ 7 \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/square/go-jose/.gitignore b/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/.gitignore similarity index 100% rename from Godeps/_workspace/src/github.com/square/go-jose/.gitignore rename to Godeps/_workspace/src/gopkg.in/square/go-jose.v1/.gitignore diff --git a/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/.travis.yml b/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/.travis.yml new file mode 100644 index 0000000..7582bb7 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/.travis.yml @@ -0,0 +1,45 @@ +language: go + +sudo: false + +matrix: + fast_finish: true + allow_failures: + - go: tip + +go: +- 1.3 +- 1.4 +- 1.5 +- 1.6 +- tip + +go_import_path: gopkg.in/square/go-jose.v1 + +before_script: +- export PATH=$HOME/.local/bin:$PATH + +before_install: +# Install encrypted gitcookies to get around bandwidth-limits +# that is causing Travis-CI builds to fail. For more info, see +# https://github.com/golang/go/issues/12933 +- openssl aes-256-cbc -K $encrypted_1528c3c2cafd_key -iv $encrypted_1528c3c2cafd_iv -in .gitcookies.sh.enc -out .gitcookies.sh -d || true +- bash .gitcookies.sh || true +- go get github.com/wadey/gocovmerge +- go get github.com/mattn/goveralls +- go get golang.org/x/tools/cmd/cover || true +- go get code.google.com/p/go.tools/cmd/cover || true +- pip install cram --user `whoami` + +script: +- go test . -v -covermode=count -coverprofile=profile.cov +- go test . -tags std_json -v -covermode=count -coverprofile=profile-std-json.cov +- go test ./cipher -v -covermode=count -coverprofile=cipher/profile.cov +- go test ./json -v # no coverage for forked encoding/json package +- cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t +- cd .. + +after_success: +- gocovmerge *.cov */*.cov > merged.coverprofile +- $HOME/gopath/bin/goveralls -coverprofile merged.coverprofile -service=travis-ci + diff --git a/Godeps/_workspace/src/github.com/square/go-jose/BUG-BOUNTY.md b/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/BUG-BOUNTY.md similarity index 100% rename from Godeps/_workspace/src/github.com/square/go-jose/BUG-BOUNTY.md rename to Godeps/_workspace/src/gopkg.in/square/go-jose.v1/BUG-BOUNTY.md diff --git a/Godeps/_workspace/src/github.com/square/go-jose/CONTRIBUTING.md b/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/CONTRIBUTING.md similarity index 100% rename from Godeps/_workspace/src/github.com/square/go-jose/CONTRIBUTING.md rename to Godeps/_workspace/src/gopkg.in/square/go-jose.v1/CONTRIBUTING.md diff --git a/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/LICENSE b/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/square/go-jose/README.md b/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/README.md similarity index 89% rename from Godeps/_workspace/src/github.com/square/go-jose/README.md rename to Godeps/_workspace/src/gopkg.in/square/go-jose.v1/README.md index 586f11b..fd859da 100644 --- a/Godeps/_workspace/src/github.com/square/go-jose/README.md +++ b/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/README.md @@ -1,6 +1,6 @@ # Go JOSE -[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/square/go-jose) [![license](http://img.shields.io/badge/license-apache_2.0-red.svg?style=flat)](https://raw.githubusercontent.com/square/go-jose/master/LICENSE) [![build](https://travis-ci.org/square/go-jose.svg?branch=master)](https://travis-ci.org/square/go-jose) [![coverage](https://coveralls.io/repos/github/square/go-jose/badge.svg?branch=master)](https://coveralls.io/r/square/go-jose) +[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v1) [![license](http://img.shields.io/badge/license-apache_2.0-red.svg?style=flat)](https://raw.githubusercontent.com/square/go-jose/master/LICENSE) [![build](https://travis-ci.org/square/go-jose.svg?branch=master)](https://travis-ci.org/square/go-jose) [![coverage](https://coveralls.io/repos/github/square/go-jose/badge.svg?branch=master)](https://coveralls.io/r/square/go-jose) Package jose aims to provide an implementation of the Javascript Object Signing and Encryption set of standards. For the moment, it mainly focuses on encryption @@ -27,6 +27,13 @@ command-line utility ([`jose-util`](https://github.com/square/go-jose/tree/master/jose-util)) for dealing with JOSE messages in a shell. +**Note**: We use a forked version of the `encoding/json` package from the Go +standard library which uses case-sensitive matching for member names (instead +of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)). +This is to avoid differences in interpretation of messages between go-jose and +libraries in other languages. If you do not like this behavior, you can use the +`std_json` build tag to disable it (though we do not recommend doing so). + ### Versions We use [gopkg.in](https://gopkg.in) for versioning. diff --git a/Godeps/_workspace/src/github.com/square/go-jose/asymmetric.go b/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/asymmetric.go similarity index 99% rename from Godeps/_workspace/src/github.com/square/go-jose/asymmetric.go rename to Godeps/_workspace/src/gopkg.in/square/go-jose.v1/asymmetric.go index 254a8a3..381156c 100644 --- a/Godeps/_workspace/src/github.com/square/go-jose/asymmetric.go +++ b/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/asymmetric.go @@ -28,7 +28,7 @@ import ( "fmt" "math/big" - "github.com/square/go-jose/cipher" + "gopkg.in/square/go-jose.v1/cipher" ) // A generic RSA-based encrypter/verifier diff --git a/Godeps/_workspace/src/github.com/square/go-jose/cipher/cbc_hmac.go b/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go similarity index 100% rename from Godeps/_workspace/src/github.com/square/go-jose/cipher/cbc_hmac.go rename to Godeps/_workspace/src/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go diff --git a/Godeps/_workspace/src/github.com/square/go-jose/cipher/concat_kdf.go b/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go similarity index 100% rename from Godeps/_workspace/src/github.com/square/go-jose/cipher/concat_kdf.go rename to Godeps/_workspace/src/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go diff --git a/Godeps/_workspace/src/github.com/square/go-jose/cipher/ecdh_es.go b/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go similarity index 100% rename from Godeps/_workspace/src/github.com/square/go-jose/cipher/ecdh_es.go rename to Godeps/_workspace/src/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go diff --git a/Godeps/_workspace/src/github.com/square/go-jose/cipher/key_wrap.go b/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/cipher/key_wrap.go similarity index 100% rename from Godeps/_workspace/src/github.com/square/go-jose/cipher/key_wrap.go rename to Godeps/_workspace/src/gopkg.in/square/go-jose.v1/cipher/key_wrap.go diff --git a/Godeps/_workspace/src/github.com/square/go-jose/crypter.go b/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/crypter.go similarity index 100% rename from Godeps/_workspace/src/github.com/square/go-jose/crypter.go rename to Godeps/_workspace/src/gopkg.in/square/go-jose.v1/crypter.go diff --git a/Godeps/_workspace/src/github.com/square/go-jose/doc.go b/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/doc.go similarity index 100% rename from Godeps/_workspace/src/github.com/square/go-jose/doc.go rename to Godeps/_workspace/src/gopkg.in/square/go-jose.v1/doc.go diff --git a/Godeps/_workspace/src/github.com/square/go-jose/encoding.go b/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/encoding.go similarity index 95% rename from Godeps/_workspace/src/github.com/square/go-jose/encoding.go rename to Godeps/_workspace/src/gopkg.in/square/go-jose.v1/encoding.go index 88f0284..3e2ac0a 100644 --- a/Godeps/_workspace/src/github.com/square/go-jose/encoding.go +++ b/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/encoding.go @@ -21,7 +21,6 @@ import ( "compress/flate" "encoding/base64" "encoding/binary" - "encoding/json" "io" "math/big" "regexp" @@ -46,13 +45,13 @@ func base64URLDecode(data string) ([]byte, error) { // Helper function to serialize known-good objects. // Precondition: value is not a nil pointer. func mustSerializeJSON(value interface{}) []byte { - out, err := json.Marshal(value) + out, err := MarshalJSON(value) if err != nil { panic(err) } // We never want to serialize the top-level value "null," since it's not a // valid JOSE message. But if a caller passes in a nil pointer to this method, - // json.Marshal will happily serialize it as the top-level value "null". If + // MarshalJSON will happily serialize it as the top-level value "null". If // that value is then embedded in another operation, for instance by being // base64-encoded and fed as input to a signing algorithm // (https://github.com/square/go-jose/issues/22), the result will be @@ -147,12 +146,12 @@ func newBufferFromInt(num uint64) *byteBuffer { } func (b *byteBuffer) MarshalJSON() ([]byte, error) { - return json.Marshal(b.base64()) + return MarshalJSON(b.base64()) } func (b *byteBuffer) UnmarshalJSON(data []byte) error { var encoded string - err := json.Unmarshal(data, &encoded) + err := UnmarshalJSON(data, &encoded) if err != nil { return err } diff --git a/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/json/LICENSE b/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/json/LICENSE new file mode 100644 index 0000000..7448756 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/json/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/json/README.md b/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/json/README.md new file mode 100644 index 0000000..86de5e5 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/json/README.md @@ -0,0 +1,13 @@ +# Safe JSON + +This repository contains a fork of the `encoding/json` package from Go 1.6. + +The following changes were made: + +* Object deserialization uses case-sensitive member name matching instead of + [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html). + This is to avoid differences in the interpretation of JOSE messages between + go-jose and libraries written in other languages. +* When deserializing a JSON object, we check for duplicate keys and reject the + input whenever we detect a duplicate. Rather than trying to work with malformed + data, we prefer to reject it right away. diff --git a/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/json/decode.go b/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/json/decode.go new file mode 100644 index 0000000..37457e5 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/json/decode.go @@ -0,0 +1,1183 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Represents JSON data structure using native Go types: booleans, floats, +// strings, arrays, and maps. + +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "errors" + "fmt" + "reflect" + "runtime" + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. +// Unmarshal will only set exported fields of the struct. +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a string-keyed map, Unmarshal first +// establishes a map to use, If the map is nil, Unmarshal allocates a new map. +// Otherwise Unmarshal reuses the existing map, keeping existing entries. +// Unmarshal then stores key-value pairs from the JSON object into the map. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +// +func Unmarshal(data []byte, v interface{}) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + var d decodeState + err := checkValid(data, &d.scan) + if err != nil { + return err + } + + d.init(data) + return d.unmarshal(v) +} + +// Unmarshaler is the interface implemented by objects +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes +} + +func (e *UnmarshalTypeError) Error() string { + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// (No longer used; kept for compatibility.) +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} + +func (d *decodeState) unmarshal(v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = r.(error) + } + }() + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d.scan.reset() + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + d.value(rv) + return d.savedError +} + +// A Number represents a JSON number literal. +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +// isValidNumber reports whether s is a valid JSON number literal. +func isValidNumber(s string) bool { + // This function implements the JSON numbers grammar. + // See https://tools.ietf.org/html/rfc7159#section-6 + // and http://json.org/number.gif + + if s == "" { + return false + } + + // Optional - + if s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + + // Digits + switch { + default: + return false + + case s[0] == '0': + s = s[1:] + + case '1' <= s[0] && s[0] <= '9': + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + if s[0] == '+' || s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // Make sure we are at the end. + return s == "" +} + +// decodeState represents the state while decoding a JSON value. +type decodeState struct { + data []byte + off int // read offset in data + scan scanner + nextscan scanner // for calls to nextValue + savedError error + useNumber bool +} + +// errPhase is used for errors that should not happen unless +// there is a bug in the JSON decoder or something is editing +// the data slice while the decoder executes. +var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") + +func (d *decodeState) init(data []byte) *decodeState { + d.data = data + d.off = 0 + d.savedError = nil + return d +} + +// error aborts the decoding by panicking with err. +func (d *decodeState) error(err error) { + panic(err) +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = err + } +} + +// next cuts off and returns the next full JSON value in d.data[d.off:]. +// The next value is known to be an object or array, not a literal. +func (d *decodeState) next() []byte { + c := d.data[d.off] + item, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // Our scanner has seen the opening brace/bracket + // and thinks we're still in the middle of the object. + // invent a closing brace/bracket to get it out. + if c == '{' { + d.scan.step(&d.scan, '}') + } else { + d.scan.step(&d.scan, ']') + } + + return item +} + +// scanWhile processes bytes in d.data[d.off:] until it +// receives a scan code not equal to op. +// It updates d.off and returns the new scan code. +func (d *decodeState) scanWhile(op int) int { + var newOp int + for { + if d.off >= len(d.data) { + newOp = d.scan.eof() + d.off = len(d.data) + 1 // mark processed EOF with len+1 + } else { + c := d.data[d.off] + d.off++ + newOp = d.scan.step(&d.scan, c) + } + if newOp != op { + break + } + } + return newOp +} + +// value decodes a JSON value from d.data[d.off:] into the value. +// it updates d.off to point past the decoded value. +func (d *decodeState) value(v reflect.Value) { + if !v.IsValid() { + _, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // d.scan thinks we're still at the beginning of the item. + // Feed in an empty string - the shortest, simplest value - + // so that it knows we got to the end of the value. + if d.scan.redo { + // rewind. + d.scan.redo = false + d.scan.step = stateBeginValue + } + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + + n := len(d.scan.parseState) + if n > 0 && d.scan.parseState[n-1] == parseObjectKey { + // d.scan thinks we just read an object key; finish the object + d.scan.step(&d.scan, ':') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '}') + } + + return + } + + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(v) + + case scanBeginObject: + d.object(v) + + case scanBeginLiteral: + d.literal(v) + } +} + +type unquotedValue struct{} + +// valueQuoted is like value but decodes a +// quoted string literal or literal null into an interface value. +// If it finds anything other than a quoted string literal or null, +// valueQuoted returns unquotedValue{}. +func (d *decodeState) valueQuoted() interface{} { + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(reflect.Value{}) + + case scanBeginObject: + d.object(reflect.Value{}) + + case scanBeginLiteral: + switch v := d.literalInterface().(type) { + case nil, string: + return v + } + } + return unquotedValue{} +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// array consumes an array from d.data[d.off-1:], decoding into the value v. +// the first byte of the array ('[') has been read already. +func (d *decodeState) array(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + } + + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + v.Set(reflect.ValueOf(d.arrayInterface())) + return + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + case reflect.Array: + case reflect.Slice: + break + } + + i := 0 + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + d.value(v.Index(i)) + } else { + // Ran out of fixed array: skip. + d.value(reflect.Value{}) + } + i++ + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } +} + +var nullLiteral = []byte("null") + +// object consumes an object from d.data[d.off-1:], decoding into the value v. +// the first byte ('{') of the object has been read already. +func (d *decodeState) object(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + v = pv + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + v.Set(reflect.ValueOf(d.objectInterface())) + return + } + + // Check type of target: struct or map[string]T + switch v.Kind() { + case reflect.Map: + // map must have string kind + t := v.Type() + if t.Key().Kind() != reflect.String { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + + default: + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + + var mapElem reflect.Value + keys := map[string]bool{} + + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquote(item) + if !ok { + d.error(errPhase) + } + + // Check for duplicate keys. + _, ok = keys[key] + if !ok { + keys[key] = true + } else { + d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) + } + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := v.Type().Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv = mapElem + } else { + var f *field + fields := cachedTypeFields(v.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, []byte(key)) { + f = ff + break + } + } + if f != nil { + subv = v + destring = f.quoted + for _, i := range f.index { + if subv.Kind() == reflect.Ptr { + if subv.IsNil() { + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + } + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + if destring { + switch qv := d.valueQuoted().(type) { + case nil: + d.literalStore(nullLiteral, subv, false) + case string: + d.literalStore([]byte(qv), subv, true) + default: + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) + } + } else { + d.value(subv) + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kv := reflect.ValueOf(key).Convert(v.Type().Key()) + v.SetMapIndex(kv, subv) + } + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } +} + +// literal consumes a literal from d.data[d.off-1:], decoding into the value v. +// The first byte of the literal has been read already +// (that's how the caller knows it's a literal). +func (d *decodeState) literal(v reflect.Value) { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + + d.literalStore(d.data[start:d.off], v, false) +} + +// convertNumber converts the number literal s to a float64 or a Number +// depending on the setting of d.useNumber. +func (d *decodeState) convertNumber(s string) (interface{}, error) { + if d.useNumber { + return Number(s), nil + } + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} + } + return f, nil +} + +var numberType = reflect.TypeOf(Number("")) + +// literalStore decodes a literal stored in item into v. +// +// fromQuoted indicates whether this literal came from unwrapping a +// string from the ",string" struct tag option. this is used only to +// produce more helpful error messages. +func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { + // Check for unmarshaler. + if len(item) == 0 { + //Empty string given + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return + } + wantptr := item[0] == 'n' // null + u, ut, pv := d.indirect(v, wantptr) + if u != nil { + err := u.UnmarshalJSON(item) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + if item[0] != '"' { + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + return + } + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + err := ut.UnmarshalText(s) + if err != nil { + d.error(err) + } + return + } + + v = pv + + switch c := item[0]; c { + case 'n': // null + switch v.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + // otherwise, ignore null for primitives/string + } + case 't', 'f': // true, false + value := c == 't' + switch v.Kind() { + default: + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + case reflect.Bool: + v.SetBool(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + } + + case '"': // string + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + case reflect.Slice: + if v.Type().Elem().Kind() != reflect.Uint8 { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + break + } + b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(b, s) + if err != nil { + d.saveError(err) + break + } + v.SetBytes(b[:n]) + case reflect.String: + v.SetString(string(s)) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(string(s))) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + } + + default: // number + if c != '-' && (c < '0' || c > '9') { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + s := string(item) + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + v.SetString(s) + if !isValidNumber(s) { + d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)) + } + break + } + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + } + case reflect.Interface: + n, err := d.convertNumber(s) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(s, v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetFloat(n) + } + } +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns interface{} +func (d *decodeState) valueInterface() interface{} { + switch d.scanWhile(scanSkipSpace) { + default: + d.error(errPhase) + panic("unreachable") + case scanBeginArray: + return d.arrayInterface() + case scanBeginObject: + return d.objectInterface() + case scanBeginLiteral: + return d.literalInterface() + } +} + +// arrayInterface is like array but returns []interface{}. +func (d *decodeState) arrayInterface() []interface{} { + var v = make([]interface{}, 0) + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + v = append(v, d.valueInterface()) + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + return v +} + +// objectInterface is like object but returns map[string]interface{}. +func (d *decodeState) objectInterface() map[string]interface{} { + m := make(map[string]interface{}) + keys := map[string]bool{} + + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read string key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquote(item) + if !ok { + d.error(errPhase) + } + + // Check for duplicate keys. + _, ok = keys[key] + if !ok { + keys[key] = true + } else { + d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + m[key] = d.valueInterface() + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } + return m +} + +// literalInterface is like literal but returns an interface value. +func (d *decodeState) literalInterface() interface{} { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + item := d.data[start:d.off] + + switch c := item[0]; c { + case 'n': // null + return nil + + case 't', 'f': // true, false + return c == 't' + + case '"': // string + s, ok := unquote(item) + if !ok { + d.error(errPhase) + } + return s + + default: // number + if c != '-' && (c < '0' || c > '9') { + d.error(errPhase) + } + n, err := d.convertNumber(string(item)) + if err != nil { + d.saveError(err) + } + return n + } +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + r, err := strconv.ParseUint(string(s[2:6]), 16, 64) + if err != nil { + return -1 + } + return rune(r) +} + +// unquote converts a quoted JSON string literal s into an actual string t. +// The rules are different than for Go, so cannot use strconv.Unquote. +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) + t = string(s) + return +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/json/encode.go b/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/json/encode.go new file mode 100644 index 0000000..1dae8bb --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/square/go-jose.v1/json/encode.go @@ -0,0 +1,1197 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements encoding and decoding of JSON objects as defined in +// RFC 4627. The mapping between JSON objects and Go values is described +// in the documentation for the Marshal and Unmarshal functions. +// +// See "JSON and Go" for an introduction to this package: +// https://golang.org/doc/articles/json_and_go.html +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. If no MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" +// to keep some browsers from misinterpreting JSON output as HTML. +// Ampersand "&" is also escaped to "\u0026" for the same reason. +// +// Array and slice values encode as JSON arrays, except that +// []byte encodes as a base64-encoded string, and a nil slice +// encodes as the null JSON object. +// +// Struct values encode as JSON objects. Each exported struct field +// becomes a member of the object unless +// - the field's tag is "-", or +// - the field is empty and its tag specifies the "omitempty" option. +// The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or string of +// length zero. The object's default key string is the struct field name +// but can be specified in the struct field's tag value. The "json" key in +// the struct field's tag value is the key name, followed by an optional comma +// and options. Examples: +// +// // Field is ignored by this package. +// Field int `json:"-"` +// +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` +// +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` +// +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` +// +// The "string" option signals that a field is stored as JSON inside a +// JSON-encoded string. It applies only to fields of string, floating point, +// integer, or boolean types. This extra level of encoding is sometimes used +// when communicating with JavaScript programs: +// +// Int64String int64 `json:",string"` +// +// The key name will be used if it's a non-empty string consisting of +// only Unicode letters, digits, dollar signs, percent signs, hyphens, +// underscores and slashes. +// +// Anonymous struct fields are usually marshaled as if their inner exported fields +// were fields in the outer struct, subject to the usual Go visibility rules amended +// as described in the next paragraph. +// An anonymous struct field with a name given in its JSON tag is treated as +// having that name, rather than being anonymous. +// An anonymous struct field of interface type is treated the same as having +// that type as its name, rather than being anonymous. +// +// The Go visibility rules for struct fields are amended for JSON when +// deciding which field to marshal or unmarshal. If there are +// multiple fields at the same level, and that level is the least +// nested (and would therefore be the nesting level selected by the +// usual Go rules), the following extra rules apply: +// +// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, +// even if there are multiple untagged fields that would otherwise conflict. +// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. +// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. +// +// Handling of anonymous struct fields is new in Go 1.1. +// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of +// an anonymous struct field in both current and earlier versions, give the field +// a JSON tag of "-". +// +// Map values encode as JSON objects. +// The map's key type must be string; the map keys are used as JSON object +// keys, subject to the UTF-8 coercion described for string values above. +// +// Pointer values encode as the value pointed to. +// A nil pointer encodes as the null JSON object. +// +// Interface values encode as the value contained in the interface. +// A nil interface value encodes as the null JSON object. +// +// Channel, complex, and function values cannot be encoded in JSON. +// Attempting to encode such a value causes Marshal to return +// an UnsupportedTypeError. +// +// JSON cannot represent cyclic data structures and Marshal does not +// handle them. Passing cyclic structures to Marshal will result in +// an infinite recursion. +// +func Marshal(v interface{}) ([]byte, error) { + e := &encodeState{} + err := e.marshal(v) + if err != nil { + return nil, err + } + return e.Bytes(), nil +} + +// MarshalIndent is like Marshal but applies Indent to format the output. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + b, err := Marshal(v) + if err != nil { + return nil, err + } + var buf bytes.Buffer + err = Indent(&buf, b, prefix, indent) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 +// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 +// so that the JSON will be safe to embed inside HTML